id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
147,648 |
MAVENSDC/cdflib
|
benchmarks/benchmarks.py
|
benchmarks.benchmarks.TimeSuite
|
class TimeSuite:
"""
An example benchmark that times the performance of various kinds
of iterating over dictionaries in Python.
"""
def setup(self):
self.epochs = np.ones(1000) * 62567898765432.0
self.epochs_tt2000 = (np.ones(1000) * 186999622360321123).astype(int)
def time_epoch_encode(self):
cdfepoch.encode(self.epochs)
def time_epoch_to_datetime(self):
cdfepoch.to_datetime(self.epochs)
def time_epoch_to_datetime_tt2000(self):
cdfepoch.to_datetime(self.epochs_tt2000)
|
class TimeSuite:
'''
An example benchmark that times the performance of various kinds
of iterating over dictionaries in Python.
'''
def setup(self):
pass
def time_epoch_encode(self):
pass
def time_epoch_to_datetime(self):
pass
def time_epoch_to_datetime_tt2000(self):
pass
| 5 | 1 | 2 | 0 | 2 | 0 | 1 | 0.4 | 0 | 2 | 1 | 0 | 4 | 2 | 4 | 4 | 18 | 4 | 10 | 7 | 5 | 4 | 10 | 7 | 5 | 1 | 0 | 0 | 4 |
147,649 |
MAVENSDC/cdflib
|
cdflib/cdfread.py
|
cdflib.cdfread.CDF
|
class CDF:
"""
Read a CDF file into the CDF object. This object contains methods to load
the cdf file information, variable names, and values.
Example
-------
>>> import cdflib
>>> cdf_file = cdflib.CDF('/path/to/cdf_file.cdf')
>>> cdf_file.cdf_info()
>>> x = cdf_file.varget("NameOfVariable", startrec=0, endrec=150)
"""
def __init__(self, path: Union[str, Path], validate: bool = False, string_encoding: str = "ascii", s3_read_method: int = 1):
"""
Parameters
----------
path : Path, str
Path to CDF file. This can be a link to a file in an S3 bucket as well.
validate : bool, optional
If True, validate the MD5 checksum of the CDF file.
string_encoding : str, optional
The encoding used to read strings. Defaults to 'ascii', which is what
the CDF internal format description prescribes as the encoding for
character strings. Other encodings may have been used to create files
however, and this keyword argument gives users the flexibility to read
those files.
s3_read_method: int, optional
If the user is specifying a file that lives within an AWS S3 bucket, this variable
defines how the file is read in. The choices are:
- 1 will read the file into memory to load in memory)
- 2 will download the file to a tmp directory
- 3 reads the file in chunks directly from S3 over https
Notes
-----
An open file handle to the CDF file remains whilst a CDF object is live.
It is automatically cleaned up with the CDF instance is deleted.
"""
if isinstance(path, Path):
fname = path.absolute().as_posix()
else:
fname = path
self.file: Union[str, Path]
if fname.startswith("s3://"):
# later put in s3 'does it exist' checker
self.ftype = "s3"
self.file = fname # path for files, fname for urls and S3
elif fname.startswith("http://") or fname.startswith("https://"):
# later put in url 404 'does it exist' checker
self.ftype = "url"
self.file = fname # path for files, fname for urls and S3
else:
self.ftype = "file"
path = Path(path).resolve().expanduser()
if not path.is_file():
path = path.with_suffix(".cdf")
if not path.is_file():
raise FileNotFoundError(f"{path} not found")
self.file = path # path for files, fname for urls and S3
self.file = path
self.string_encoding = string_encoding
self._f = self._file_or_url_or_s3_handler(str(self.file), self.ftype, s3_read_method)
magic_number = self._f.read(4).hex()
compressed_bool = self._f.read(4).hex()
if magic_number not in ("cdf30001", "cdf26002", "0000ffff"):
raise OSError(f"{path} is not a CDF file or a non-supported CDF!")
self.cdfversion = 3 if magic_number == "cdf30001" else 2
self._compressed = not (compressed_bool == "0000ffff")
self.compressed_file = None
self.temp_file: Optional[Path] = None
if self._compressed:
if self.ftype == "url" or self.ftype == "s3":
if s3_read_method == 3:
# extra step, read entire file
self._f.seek(0)
self._f = s3_fetchall(self._f.fhandle) # type: ignore
self._unstream_file(self._f)
path = self.file
self._uncompress_file()
if self.temp_file is None:
raise OSError("Decompression was unsuccessful. Only GZIP compression is currently supported.")
self.compressed_file = self.file
self.file = self.temp_file
self._f.close()
self._f = self.file.open("rb")
self.ftype = "file"
if self.cdfversion == 3:
cdr_info, foffs = self._read_cdr(8)
gdr_info = self._read_gdr(foffs)
else:
cdr_info, foffs = self._read_cdr2(8)
gdr_info = self._read_gdr2(foffs)
if cdr_info.md5 and validate:
if not self._md5_validation():
raise OSError("This file fails the md5 checksum.")
if not cdr_info.format_:
raise OSError("This package does not support multi-format CDF")
if cdr_info.encoding in (3, 14, 15):
raise OSError("This package does not support CDFs with this " + self._encoding_token(cdr_info.encoding) + " encoding")
# SET GLOBAL VARIABLES
self._post25 = cdr_info.post25
self._version = cdr_info.version
self._encoding = cdr_info.encoding
self._majority = self._major_token(cdr_info.majority)
self._copyright = cdr_info.copyright_
self._md5 = cdr_info.md5
self._first_zvariable = gdr_info.first_zvariable
self._first_rvariable = gdr_info.first_rvariable
self._first_adr = gdr_info.first_adr
self._num_zvariable = gdr_info.num_zvariables
self._num_rvariable = gdr_info.num_rvariables
self._rvariables_num_dims = gdr_info.rvariables_num_dims
self._rvariables_dim_sizes = gdr_info.rvariables_dim_sizes
self._num_att = gdr_info.num_attributes
self._num_rdim = gdr_info.rvariables_num_dims
self._rdim_sizes = gdr_info.rvariables_dim_sizes
if self.cdfversion == 3:
self._leap_second_updated = gdr_info.leapsecond_updated
if self.compressed_file is not None:
self.compressed_file = None
def __del__(self) -> None:
# This implicitly will delete a temporary uncompressed file if we
# created it earlier.
if hasattr(self, "_f") and hasattr(self._f, "close"):
self._f.close()
if hasattr(self, "temp_file") and self.temp_file is not None:
os.remove(self.temp_file)
def __getitem__(self, variable: str) -> Union[str, np.ndarray]:
return self.varget(variable)
def __enter__(self) -> "CDF":
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
return
def cdf_info(self) -> CDFInfo:
"""
Returns basic CDF information.
Returns
-------
CDFInfo
"""
varnames = self._get_varnames()
return CDFInfo(
self.file,
self._version,
self._encoding,
self._majority,
varnames[0],
varnames[1],
self._get_attnames(),
self._copyright,
self._md5,
self._num_rdim,
self._rdim_sizes,
self._compressed,
)
def varinq(self, variable: str) -> VDRInfo:
"""
Get basic variable information.
Returns
-------
VDRInfo
"""
vdr_info = self.vdr_info(variable)
return VDRInfo(
vdr_info.name,
vdr_info.variable_number,
self._variable_token(vdr_info.section_type),
vdr_info.data_type,
self._datatype_token(vdr_info.data_type),
vdr_info.num_elements,
vdr_info.num_dims,
vdr_info.dim_sizes,
self._sparse_token(vdr_info.sparse),
vdr_info.max_rec,
vdr_info.record_vary,
vdr_info.dim_vary,
vdr_info.compression_level,
vdr_info.pad,
vdr_info.blocking_factor,
)
def attinq(self, attribute: Union[str, int]) -> ADRInfo:
"""
Get attribute information.
Parameters
----------
attribute : str, int
Attribute to get information for.
Returns
-------
ADRInfo
"""
position = self._first_adr
if isinstance(attribute, str):
for _ in range(0, self._num_att):
name, next_adr = self._read_adr_fast(position)
if name.strip().lower() == attribute.strip().lower():
return self._read_adr(position)
position = next_adr
raise KeyError(f"No attribute {attribute}")
elif isinstance(attribute, int):
if attribute < 0 or attribute > self._num_zvariable:
raise KeyError(f"No attribute {attribute}")
for _ in range(0, attribute):
name, next_adr = self._read_adr_fast(position)
position = next_adr
return self._read_adr(position)
else:
raise ValueError("attribute keyword must be a string or integer")
def attget(self, attribute: Union[str, int], entry: Optional[Union[str, int]] = None) -> AttData:
"""
Returns the value of the attribute at the entry number provided.
A variable name can be used instead of its corresponding
entry number.
Parameters
----------
attribute : str, int
Attribute name or number to get.
entry : int, optional
Returns
-------
AttData
"""
# Starting position
position = self._first_adr
# Get Correct ADR
adr_info = None
if isinstance(attribute, str):
for _ in range(0, self._num_att):
name, next_adr = self._read_adr_fast(position)
if name.strip().lower() == attribute.strip().lower():
adr_info = self._read_adr(position)
if isinstance(entry, str) and adr_info.scope == 1:
# If the user has specified a string entry, they are obviously looking for a variable attribute.
# Filter out any global attributes that may have the same name.
adr_info = None
position = next_adr
continue
break
else:
position = next_adr
if adr_info is None:
raise KeyError(f"No attribute {attribute} for entry {entry}")
elif isinstance(attribute, int):
if (attribute < 0) or (attribute > self._num_att):
raise KeyError(f"No attribute {attribute}")
if not isinstance(entry, int):
raise TypeError(f"{entry} has to be a number.")
for _ in range(0, attribute):
name, next_adr = self._read_adr_fast(position)
position = next_adr
adr_info = self._read_adr(position)
else:
raise ValueError("Please set attribute keyword equal to " "the name or number of an attribute")
# Find the correct entry from the "entry" variable
if adr_info.scope == 1:
if not isinstance(entry, int):
raise ValueError('"entry" must be an integer')
num_entry_string = "num_gr_entry"
first_entry_string = "first_gr_entry"
max_entry_string = "max_gr_entry"
entry_num = entry
else:
var_num = -1
zvar = False
if isinstance(entry, str):
# a zVariable?
positionx = self._first_zvariable
for x in range(0, self._num_zvariable):
name, vdr_next = self._read_vdr_fast(positionx)
if name.strip().lower() == entry.strip().lower():
var_num = x
zvar = True
break
positionx = vdr_next
if var_num == -1:
# a rVariable?
positionx = self._first_rvariable
for x in range(0, self._num_rvariable):
name, vdr_next = self._read_vdr_fast(positionx)
if name.strip().lower() == entry.strip().lower():
var_num = x
break
positionx = vdr_next
if var_num == -1:
raise ValueError(f"No variable by this name: {entry}")
entry_num = var_num
else:
if self._num_zvariable > 0 and self._num_rvariable > 0:
raise ValueError("This CDF has both r and z variables. " "Use variable name instead")
if self._num_zvariable > 0:
zvar = True
entry_num = entry
if zvar:
num_entry_string = "num_z_entry"
first_entry_string = "first_z_entry"
max_entry_string = "max_z_entry"
else:
num_entry_string = "num_gr_entry"
first_entry_string = "first_gr_entry"
max_entry_string = "max_gr_entry"
if entry_num > getattr(adr_info, max_entry_string):
raise ValueError("The entry does not exist")
return self._get_attdata(adr_info, entry_num, getattr(adr_info, num_entry_string), getattr(adr_info, first_entry_string))
def varget(
self,
variable: Optional[str] = None,
epoch: Optional[str] = None,
starttime: Optional[epoch.epoch_types] = None,
endtime: Optional[epoch.epoch_types] = None,
startrec: int = 0,
endrec: Optional[int] = None,
) -> Union[str, np.ndarray]:
"""
Returns the variable data.
Parameters
----------
variable: str
Variable name to fetch.
startrec: int
Index of the first record to get.
endrec : int
Index of the last record to get. All records from *startrec* to
*endrec* inclusive are fetched.
Notes
-----
Variable can be entered either
a name or a variable number. By default, it returns a
'numpy.ndarray' or 'list' class object, depending on the
data type, with the variable data and its specification.
By default, the full variable data is returned. To acquire
only a portion of the data for a record-varying variable,
either the time or record (0-based) range can be specified.
'epoch' can be used to specify which time variable this
variable depends on and is to be searched for the time range.
For the ISTP-compliant CDFs, the time variable will come from
the attribute 'DEPEND_0' from this variable. The function will
automatically search for it thus no need to specify 'epoch'.
If either the start or end time is not specified,
the possible minimum or maximum value for the specific epoch
data type is assumed. If either the start or end record is not
specified, the range starts at 0 or/and ends at the last of the
written data.
The start (and end) time should be presented in a list as:
[year month day hour minute second millisec] for CDF_EPOCH
[year month day hour minute second millisec microsec nanosec picosec] for CDF_EPOCH16
[year month day hour minute second millisec microsec nanosec] for CDF_TIME_TT2000
If not enough time components are presented, only the last item can have the floating
portion for the sub-time components.
Note: CDF's CDF_EPOCH16 data type uses 2 8-byte doubles for each data value.
In Python, each value is presented as a complex or numpy.complex128.
"""
if isinstance(variable, int) and self._num_zvariable > 0 and self._num_rvariable > 0:
raise ValueError("This CDF has both r and z variables. " "Use variable name instead")
if (starttime is not None or endtime is not None) and (startrec != 0 or endrec is not None):
raise ValueError("Can't specify both time and record range")
vdr_info = self.vdr_info(variable)
if vdr_info.max_rec < 0:
raise ValueError(f"No records found for variable {variable}")
return self._read_vardata(
vdr_info,
epoch=epoch,
starttime=starttime,
endtime=endtime,
startrec=startrec,
endrec=endrec,
)
def vdr_info(self, variable: Union[str, int]) -> VDR:
if isinstance(variable, int) and self._num_zvariable > 0 and self._num_rvariable > 0:
raise ValueError("This CDF has both r and z variables. " "Use variable name instead")
if isinstance(variable, str):
# Check z variables for the name, then r variables
position = self._first_zvariable
num_variables = self._num_zvariable
vdr_info = None
for zVar in [1, 0]:
for _ in range(0, num_variables):
name, vdr_next = self._read_vdr_fast(position)
if name.strip().lower() == variable.strip().lower():
vdr_info = self._read_vdr(position)
break
position = vdr_next
position = self._first_rvariable
num_variables = self._num_rvariable
if vdr_info is None:
raise ValueError(f"Variable name '{variable}' not found.")
elif isinstance(variable, int):
if self._num_zvariable > 0:
position = self._first_zvariable
num_variable = self._num_zvariable
# zVar = True
elif self._num_rvariable > 0:
position = self._first_rvariable
num_variable = self._num_rvariable
# zVar = False
if variable < 0 or variable >= num_variable:
raise ValueError(f"No variable by this number: {variable}")
for _ in range(0, variable):
name, next_vdr = self._read_vdr_fast(position)
position = next_vdr
vdr_info = self._read_vdr(position)
else:
raise ValueError("Please set variable keyword equal to " "the name or number of an variable")
return vdr_info
def globalattsget(self) -> Dict[str, List[Union[str, np.ndarray]]]:
"""
Gets all global attributes.
This function returns all of the global attribute entries,
in a dictionary (in the form of ``'attribute': {entry: value}``
pairs) from a CDF.
"""
byte_loc = self._first_adr
return_dict: Dict[str, List[Union[str, np.ndarray]]] = {}
for _ in range(self._num_att):
adr_info = self._read_adr(byte_loc)
if adr_info.scope != 1:
byte_loc = adr_info.next_adr_loc
continue
if adr_info.num_gr_entry == 0:
byte_loc = adr_info.next_adr_loc
continue
entries = []
aedr_byte_loc = adr_info.first_gr_entry
for _ in range(adr_info.num_gr_entry):
aedr_info = self._read_aedr(aedr_byte_loc)
entryData = aedr_info.entry
# This exists to get rid of extraneous numpy arrays
if isinstance(entryData, np.ndarray):
if len(entryData) == 1:
entryData = entryData[0]
entries.append(entryData)
aedr_byte_loc = aedr_info.next_aedr
return_dict[adr_info.name] = entries
byte_loc = adr_info.next_adr_loc
return return_dict
def varattsget(self, variable: Union[str, int]) -> Dict[str, Union[None, str, np.ndarray]]:
"""
Gets all variable attributes.
Unlike attget, which returns a single attribute entry value,
this function returns all of the variable attribute entries,
in a dictionary (in the form of 'attribute': value pair) for
a variable.
"""
if isinstance(variable, int) and self._num_zvariable > 0 and self._num_rvariable > 0:
raise ValueError("This CDF has both r and z variables. Use variable name")
if isinstance(variable, str):
position = self._first_zvariable
num_variables = self._num_zvariable
for zVar in [True, False]:
for _ in range(0, num_variables):
name, vdr_next = self._read_vdr_fast(position)
if name.strip().lower() == variable.strip().lower():
vdr_info = self._read_vdr(position)
return self._read_varatts(vdr_info.variable_number, zVar)
position = vdr_next
position = self._first_rvariable
num_variables = self._num_rvariable
raise ValueError(f"No variable by this name: {variable}")
elif isinstance(variable, int):
if self._num_zvariable > 0:
num_variable = self._num_zvariable
zVar = True
else:
num_variable = self._num_rvariable
zVar = False
if variable < 0 or variable >= num_variable:
raise ValueError(f"No variable by this number: {variable}")
return self._read_varatts(variable, zVar)
def _uncompress_rle(self, data: bytes) -> bytearray:
result = bytearray()
index = 0
while index < len(data):
value = data[index]
if value == 0:
index += 1
count = data[index] + 1
result += b"\0" * count
else:
result.append(value)
index += 1
return result
def _uncompress_file(self) -> None:
"""
Writes the current file into a file in the temporary directory.
If that doesn't work, create a new file in the CDFs directory.
"""
if self.cdfversion == 3:
data_start, data_size, cType, _ = self._read_ccr(8)
else:
data_start, data_size, cType, _ = self._read_ccr2(8)
if cType == 5:
self._f.seek(data_start)
decompressed_data = gzip_inflate(self._f.read(data_size))
elif cType == 1:
self._f.seek(data_start)
decompressed_data = self._uncompress_rle(self._f.read(data_size))
else:
return
self.temp_file = Path(tempfile.NamedTemporaryFile(suffix=".cdf").name)
with self.temp_file.open("wb") as g:
g.write(bytearray.fromhex("cdf30001"))
g.write(bytearray.fromhex("0000ffff"))
g.write(decompressed_data)
def _read_ccr(self, byte_loc: int) -> Tuple[int, int, int, int]:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), "big")
self._f.seek(byte_loc + 12)
cproffset = int.from_bytes(self._f.read(8), "big")
data_start = byte_loc + 32
data_size = block_size - 32
cType, cParams = self._read_cpr(cproffset)
return data_start, data_size, cType, cParams
def _read_ccr2(self, byte_loc: int) -> Tuple[int, int, int, int]:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), "big")
self._f.seek(byte_loc + 8)
cproffset = int.from_bytes(self._f.read(4), "big")
data_start = byte_loc + 20
data_size = block_size - 20
cType, cParams = self._read_cpr2(cproffset)
return data_start, data_size, cType, cParams
def _read_cpr(self, byte_loc: int) -> Tuple[int, int]:
if self.cdfversion == 3:
return self._read_cpr3(byte_loc)
else:
return self._read_cpr2(byte_loc)
def _read_cpr3(self, byte_loc: int) -> Tuple[int, int]:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), "big")
cpr = self._f.read(block_size - 8)
cType = int.from_bytes(cpr[4:8], "big")
cParams = int.from_bytes(cpr[16:20], "big")
return cType, cParams
def _read_cpr2(self, byte_loc: int) -> Tuple[int, int]:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), "big")
cpr = self._f.read(block_size - 4)
cType = int.from_bytes(cpr[4:8], "big")
cParams = int.from_bytes(cpr[16:20], "big")
return cType, cParams
def _md5_validation(self) -> bool:
"""
Verifies the MD5 checksum.
Only used in the __init__() function
"""
if self.compressed_file is not None:
fh = self.compressed_file.open("rb")
else:
fh = self._f
md5 = hashlib.md5()
block_size = 16384
fh.seek(-16, 2)
remaining = fh.tell() # File size minus checksum size
fh.seek(0)
while remaining > block_size:
data = fh.read(block_size)
remaining = remaining - block_size
md5.update(data)
if remaining > 0:
data = fh.read(remaining)
md5.update(data)
existing_md5 = fh.read(16).hex()
if self.compressed_file is not None:
fh.close()
return md5.hexdigest() == existing_md5
@staticmethod
def _encoding_token(encoding: int) -> str:
encodings = {
1: "NETWORK",
2: "SUN",
3: "VAX",
4: "DECSTATION",
5: "SGi",
6: "IBMPC",
7: "IBMRS",
9: "PPC",
11: "HP",
12: "NeXT",
13: "ALPHAOSF1",
14: "ALPHAVMSd",
15: "ALPHAVMSg",
16: "ALPHAVMSi",
}
return encodings[encoding]
@staticmethod
def _major_token(major: int) -> str:
majors = {1: "Row_major", 2: "Column_major"}
return majors[major]
@staticmethod
def _scope_token(scope: int) -> str:
scopes = {1: "Global", 2: "Variable"}
return scopes[scope]
@staticmethod
def _variable_token(variable: int) -> str:
variables = {3: "rVariable", 8: "zVariable"}
return variables[variable]
@staticmethod
def _datatype_token(datatype: int) -> str:
datatypes = {
1: "CDF_INT1",
2: "CDF_INT2",
4: "CDF_INT4",
8: "CDF_INT8",
11: "CDF_UINT1",
12: "CDF_UINT2",
14: "CDF_UINT4",
21: "CDF_REAL4",
22: "CDF_REAL8",
31: "CDF_EPOCH",
32: "CDF_EPOCH16",
33: "CDF_TIME_TT2000",
41: "CDF_BYTE",
44: "CDF_FLOAT",
45: "CDF_DOUBLE",
51: "CDF_CHAR",
52: "CDF_UCHAR",
}
return datatypes[datatype]
@staticmethod
def _sparse_token(sparse: int) -> str:
sparses = {0: "No_sparse", 1: "Pad_sparse", 2: "Prev_sparse"}
return sparses[sparse]
def _get_varnames(self) -> Tuple[List[str], List[str]]:
zvars = []
rvars = []
if self._num_zvariable > 0:
position = self._first_zvariable
num_variable = self._num_zvariable
for _ in range(0, num_variable):
name, next_vdr = self._read_vdr_fast(position)
zvars.append(name)
position = next_vdr
if self._num_rvariable > 0:
position = self._first_rvariable
num_variable = self._num_rvariable
for _ in range(0, num_variable):
name, next_vdr = self._read_vdr_fast(position)
rvars.append(name)
position = next_vdr
return rvars, zvars
def _get_attnames(self) -> List[Dict[str, str]]:
attrs = []
position = self._first_adr
for _ in range(0, self._num_att):
attr = {}
adr_info = self._read_adr(position)
attr[adr_info.name] = self._scope_token(adr_info.scope)
attrs.append(attr)
position = adr_info.next_adr_loc
return attrs
def _read_cdr(self, byte_loc: int) -> Tuple[CDRInfo, int]:
"""
Read a CDF descriptor record (CDR).
"""
self._f.seek(0)
self._f.seek(byte_loc)
block_size = int.from_bytes(self._f.read(8), "big")
cdr = self._f.read(block_size - 8)
foffs = self._f.tell()
# _ = int.from_bytes(cdr[0:4],'big') #Section Type
# gdroff = int.from_bytes(cdr[4:12], 'big') # GDR Location
version = int.from_bytes(cdr[12:16], "big")
if version not in (2, 3):
raise ValueError(f"CDF version {version} not handled")
release = int.from_bytes(cdr[16:20], "big")
encoding = int.from_bytes(cdr[20:24], "big")
# FLAG
#
# 0 The majority of variable values within a variable record.
# Variable records are described in Chapter 4. Set indicates
# row-majority. Clear indicates column-majority.
# 1 The file format of the CDF. Set indicates single-file.
# Clear indicates multi-file.
# 2 The checksum of the CDF. Set indicates a checksum method is used.
# 3 The MD5 checksum method indicator.
# Set indicates MD5 method is used for the checksum. Bit 2 must be set.
# 4 Reserved for another checksum method.
# Bit 2 must be set and bit 3 must be clear.
flag = int.from_bytes(cdr[24:28], "big")
flag_bits = f"{flag:032b}"
row_majority = flag_bits[31] == "1"
single_format = flag_bits[30] == "1"
md5 = flag_bits[29] == "1" and flag_bits[28] == "1"
increment = int.from_bytes(cdr[36:40], "big")
cdfcopyright = cdr[48:].decode(self.string_encoding)
cdfcopyright = cdfcopyright.replace("\x00", "")
cdr_info = CDRInfo(
encoding=encoding,
copyright_=cdfcopyright,
version=str(version) + "." + str(release) + "." + str(increment),
majority=1 if row_majority else 2,
format_=single_format,
md5=md5,
post25=True,
)
return cdr_info, foffs
def _read_cdr2(self, byte_loc: int) -> Tuple[CDRInfo, int]:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), "big")
cdr = self._f.read(block_size - 4)
foffs = self._f.tell()
# gdroff = int.from_bytes(cdr[4:8], 'big') # GDR Location
version = int.from_bytes(cdr[8:12], "big")
release = int.from_bytes(cdr[12:16], "big")
encoding = int.from_bytes(cdr[16:20], "big")
flag = int.from_bytes(cdr[20:24], "big")
flag_bits = f"{flag:032b}"
row_majority = flag_bits[31] == "1"
single_format = flag_bits[30] == "1"
md5 = flag_bits[29] == "1" and flag_bits[28] == "1"
increment = int.from_bytes(cdr[32:36], "big")
cdfcopyright = cdr[44:].decode(self.string_encoding)
cdfcopyright = cdfcopyright.replace("\x00", "")
cdr_info = CDRInfo(
encoding=encoding,
copyright_=cdfcopyright,
version=str(version) + "." + str(release) + "." + str(increment),
majority=1 if row_majority else 2,
format_=single_format,
md5=md5,
post25=version == 2 and release >= 5,
)
return cdr_info, foffs
def _read_gdr(self, byte_loc: int) -> GDRInfo:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), "big") # Block Size
gdr = self._f.read(block_size - 8)
first_rvariable = int.from_bytes(gdr[4:12], "big", signed=True)
first_zvariable = int.from_bytes(gdr[12:20], "big", signed=True)
first_adr = int.from_bytes(gdr[20:28], "big", signed=True)
eof = int.from_bytes(gdr[28:36], "big", signed=True)
num_rvariable = int.from_bytes(gdr[36:40], "big", signed=True)
num_att = int.from_bytes(gdr[40:44], "big", signed=True)
num_rdim = int.from_bytes(gdr[48:52], "big", signed=True)
num_zvariable = int.from_bytes(gdr[52:56], "big", signed=True)
leapSecondlastUpdated = int.from_bytes(gdr[68:72], "big", signed=True)
# rDimSizes, depends on Number of dimensions for r variables
# A bunch of 4 byte integers in a row. Length is (size of GDR) - 84
# In this case. there is nothing
rdim_sizes = []
for x in range(0, num_rdim):
ioff = 76 + x * 4
rdim_sizes.append(int.from_bytes(gdr[ioff : ioff + 4], "big", signed=True))
return GDRInfo(
first_zvariable=first_zvariable,
first_rvariable=first_rvariable,
first_adr=first_adr,
num_zvariables=num_zvariable,
num_rvariables=num_rvariable,
num_attributes=num_att,
rvariables_num_dims=num_rdim,
rvariables_dim_sizes=rdim_sizes,
eof=eof,
leapsecond_updated=leapSecondlastUpdated,
)
def _read_gdr2(self, byte_loc: int) -> GDRInfo:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), "big") # Block Size
gdr = self._f.read(block_size - 4)
first_rvariable = int.from_bytes(gdr[4:8], "big", signed=True)
first_zvariable = int.from_bytes(gdr[8:12], "big", signed=True)
first_adr = int.from_bytes(gdr[12:16], "big", signed=True)
eof = int.from_bytes(gdr[16:20], "big", signed=True)
num_rvariable = int.from_bytes(gdr[20:24], "big", signed=True)
num_att = int.from_bytes(gdr[24:28], "big", signed=True)
num_rdim = int.from_bytes(gdr[32:36], "big", signed=True)
num_zvariable = int.from_bytes(gdr[36:40], "big", signed=True)
rdim_sizes = []
for x in range(0, num_rdim):
ioff = 56 + x * 4
rdim_sizes.append(int.from_bytes(gdr[ioff : ioff + 4], "big", signed=True))
return GDRInfo(
first_zvariable=first_zvariable,
first_rvariable=first_rvariable,
first_adr=first_adr,
num_zvariables=num_zvariable,
num_rvariables=num_rvariable,
num_attributes=num_att,
rvariables_num_dims=num_rdim,
rvariables_dim_sizes=rdim_sizes,
eof=eof,
)
def _read_varatts(self, var_num: int, zVar: bool) -> Dict[str, Union[None, str, np.ndarray]]:
byte_loc = self._first_adr
return_dict: Dict[str, Union[None, str, np.ndarray]] = {}
for z in range(0, self._num_att):
adr_info = self._read_adr(byte_loc)
if adr_info.scope == 1:
byte_loc = adr_info.next_adr_loc
continue
if zVar:
byte_loc = adr_info.first_z_entry
num_entry = adr_info.num_z_entry
else:
byte_loc = adr_info.first_gr_entry
num_entry = adr_info.num_gr_entry
for _ in range(0, num_entry):
entryNum, byte_next = self._read_aedr_fast(byte_loc)
if entryNum != var_num:
byte_loc = byte_next
continue
aedr_info = self._read_aedr(byte_loc)
entryData = aedr_info.entry
# This exists to get rid of extraneous numpy arrays
if isinstance(entryData, np.ndarray):
if len(entryData) == 1:
entryData = entryData[0]
return_dict[adr_info.name] = entryData
break
byte_loc = adr_info.next_adr_loc
return return_dict
def _read_adr(self, position: int) -> ADRInfo:
"""
Read an attribute descriptor record (ADR).
"""
if self.cdfversion == 3:
return self._read_adr3(position)
else:
return self._read_adr2(position)
def _read_adr3(self, byte_loc: int) -> ADRInfo:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), "big") # Block Size
adr = self._f.read(block_size - 8)
next_adr_loc = int.from_bytes(adr[4:12], "big", signed=True)
position_next_gr_entry = int.from_bytes(adr[12:20], "big", signed=True)
scope = int.from_bytes(adr[20:24], "big", signed=True)
num = int.from_bytes(adr[24:28], "big", signed=True)
num_gr_entry = int.from_bytes(adr[28:32], "big", signed=True)
MaxEntry = int.from_bytes(adr[32:36], "big", signed=True)
position_next_z_entry = int.from_bytes(adr[40:48], "big", signed=True)
num_z_entry = int.from_bytes(adr[48:52], "big", signed=True)
MaxZEntry = int.from_bytes(adr[52:56], "big", signed=True)
name = str(adr[60:315].decode(self.string_encoding))
name = name.replace("\x00", "")
return ADRInfo(
scope,
next_adr_loc,
num,
num_gr_entry,
MaxEntry,
num_z_entry,
MaxZEntry,
position_next_z_entry,
position_next_gr_entry,
name,
)
def _read_adr2(self, byte_loc: int) -> ADRInfo:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), "big") # Block Size
adr = self._f.read(block_size - 4)
next_adr_loc = int.from_bytes(adr[4:8], "big", signed=True)
position_next_gr_entry = int.from_bytes(adr[8:12], "big", signed=True)
scope = int.from_bytes(adr[12:16], "big", signed=True)
num = int.from_bytes(adr[16:20], "big", signed=True)
num_gr_entry = int.from_bytes(adr[20:24], "big", signed=True)
MaxEntry = int.from_bytes(adr[24:28], "big", signed=True)
position_next_z_entry = int.from_bytes(adr[32:36], "big", signed=True)
num_z_entry = int.from_bytes(adr[36:40], "big", signed=True)
MaxZEntry = int.from_bytes(adr[40:44], "big", signed=True)
name = str(adr[48:112].decode(self.string_encoding))
name = name.replace("\x00", "")
return ADRInfo(
scope,
next_adr_loc,
num,
num_gr_entry,
MaxEntry,
num_z_entry,
MaxZEntry,
position_next_z_entry,
position_next_gr_entry,
name,
)
def _read_adr_fast(self, position: int) -> Tuple[str, int]:
"""
Read an attribute descriptor record (ADR).
"""
if self.cdfversion == 3:
return self._read_adr_fast3(position)
else:
return self._read_adr_fast2(position)
def _read_adr_fast3(self, byte_loc: int) -> Tuple[str, int]:
# Position of next ADR
self._f.seek(byte_loc + 12, 0)
next_adr_loc = int.from_bytes(self._f.read(8), "big", signed=True)
# Name
self._f.seek(byte_loc + 68, 0)
name = str(self._f.read(256).decode(self.string_encoding))
name = name.replace("\x00", "")
return name, next_adr_loc
def _read_adr_fast2(self, byte_loc: int) -> Tuple[str, int]:
# Position of next ADR
self._f.seek(byte_loc + 8, 0)
next_adr_loc = int.from_bytes(self._f.read(4), "big", signed=True)
# Name
self._f.seek(byte_loc + 52, 0)
name = str(self._f.read(64).decode(self.string_encoding))
name = name.replace("\x00", "")
return name, next_adr_loc
def _read_aedr_fast(self, byte_loc: int) -> Tuple[int, int]:
if self.cdfversion == 3:
return self._read_aedr_fast3(byte_loc)
else:
return self._read_aedr_fast2(byte_loc)
def _read_aedr_fast3(self, byte_loc: int) -> Tuple[int, int]:
self._f.seek(byte_loc + 12, 0)
next_aedr = int.from_bytes(self._f.read(8), "big", signed=True)
# Variable number or global entry number
self._f.seek(byte_loc + 28, 0)
entry_num = int.from_bytes(self._f.read(4), "big", signed=True)
return entry_num, next_aedr
def _read_aedr_fast2(self, byte_loc: int) -> Tuple[int, int]:
self._f.seek(byte_loc + 8, 0)
next_aedr = int.from_bytes(self._f.read(4), "big", signed=True)
# Variable number or global entry number
self._f.seek(byte_loc + 20, 0)
entry_num = int.from_bytes(self._f.read(4), "big", signed=True)
return entry_num, next_aedr
def _read_aedr(self, byte_loc: int) -> AEDR:
if self.cdfversion == 3:
return self._read_aedr3(byte_loc)
else:
return self._read_aedr2(byte_loc)
def _read_aedr3(self, byte_loc: int) -> AEDR:
"""
Reads an Attribute Entry Descriptor Record at a specific byte location.
"""
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), "big")
aedr = self._f.read(block_size - 8)
next_aedr = int.from_bytes(aedr[4:12], "big", signed=True)
data_type = int.from_bytes(aedr[16:20], "big", signed=True)
# Variable number or global entry number
entry_num = int.from_bytes(aedr[20:24], "big", signed=True)
# Number of elements
# Length of string if string, otherwise its the number of numbers
num_elements = int.from_bytes(aedr[24:28], "big", signed=True)
# Supposed to be reserved space
num_strings = int.from_bytes(aedr[28:32], "big", signed=True)
if num_strings < 1:
num_strings = 1
# Literally nothing
# _ = int.from_bytes(aedr[32:36],'big', signed=True) #Nothing
# _ = int.from_bytes(aedr[36:40],'big', signed=True) #Nothing
# _ = int.from_bytes(aedr[40:44],'big', signed=True) #Nothing
# _ = int.from_bytes(aedr[44:48],'big', signed=True) #Nothing
byte_stream = aedr[48:]
entry = self._read_data(byte_stream, data_type, 1, num_elements)
return AEDR(entry, data_type, num_elements, next_aedr, entry_num, num_strings)
def _read_aedr2(self, byte_loc: int) -> AEDR:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), "big")
aedr = self._f.read(block_size - 4)
next_aedr = int.from_bytes(aedr[4:8], "big", signed=True)
data_type = int.from_bytes(aedr[12:16], "big", signed=True)
# Variable number or global entry number
entry_num = int.from_bytes(aedr[16:20], "big", signed=True)
# Number of elements
# Length of string if string, otherwise its the number of numbers
num_elements = int.from_bytes(aedr[20:24], "big", signed=True)
byte_stream = aedr[44:]
entry = self._read_data(byte_stream, data_type, 1, num_elements)
return AEDR(entry, data_type, num_elements, next_aedr, entry_num)
def _read_vdr(self, byte_loc: int) -> VDR:
"""
Read a variable descriptor record (VDR).
"""
if self.cdfversion == 3:
return self._read_vdr3(byte_loc)
else:
return self._read_vdr2(byte_loc)
def _read_vdr3(self, byte_loc: int) -> VDR:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), "big")
vdr = self._f.read(block_size - 8)
# Type of internal record
section_type = int.from_bytes(vdr[0:4], "big")
next_vdr = int.from_bytes(vdr[4:12], "big", signed=True)
data_type = int.from_bytes(vdr[12:16], "big", signed=True)
max_rec = int.from_bytes(vdr[16:20], "big", signed=True)
head_vxr = int.from_bytes(vdr[20:28], "big", signed=True)
last_vxr = int.from_bytes(vdr[28:36], "big", signed=True)
flags = int.from_bytes(vdr[36:40], "big", signed=True)
flag_bits = f"{flags:032b}"
record_variance_bool = flag_bits[31] == "1"
pad_bool = flag_bits[30] == "1"
compression_bool = flag_bits[29] == "1"
sparse = int.from_bytes(vdr[40:44], "big", signed=True)
num_elements = int.from_bytes(vdr[56:60], "big", signed=True)
var_num = int.from_bytes(vdr[60:64], "big", signed=True)
CPRorSPRoffset = int.from_bytes(vdr[64:72], "big", signed=True)
blocking_factor = int.from_bytes(vdr[72:76], "big", signed=True)
name = str(vdr[76:332].decode(self.string_encoding))
name = name.replace("\x00", "")
zdim_sizes = []
dim_sizes = []
dim_varys = []
if section_type == 8:
# zvariable
num_dims = int.from_bytes(vdr[332:336], "big", signed=True)
for x in range(0, num_dims):
ioff = 336 + 4 * x
zdim_sizes.append(int.from_bytes(vdr[ioff : ioff + 4], "big", signed=True))
coff = 336 + 4 * num_dims
for x in range(0, num_dims):
dim_varys.append(int.from_bytes(vdr[coff + 4 * x : coff + 4 * x + 4], "big", signed=True))
adj = 0
# Check for "False" dimensions, and delete them
for x in range(0, num_dims):
y = num_dims - x - 1
if dim_varys[y] == 0:
del zdim_sizes[y]
del dim_varys[y]
adj = adj + 1
num_dims = num_dims - adj
coff = 336 + 8 * num_dims
else:
# rvariable
for x in range(0, self._rvariables_num_dims):
ioff = 332 + 4 * x
dim_varys.append(int.from_bytes(vdr[ioff : ioff + 4], "big", signed=True))
for x in range(0, self._rvariables_num_dims):
if dim_varys[x] != 0:
dim_sizes.append(self._rvariables_dim_sizes[x])
num_dims = len(dim_sizes)
coff = 332 + 4 * self._rvariables_num_dims
# Only set if pad value is in the flags
if pad_bool:
byte_stream = vdr[coff:]
pad = self._read_data(byte_stream, data_type, 1, num_elements)
else:
pad = None
if section_type == 8:
dim_sizes = zdim_sizes
if compression_bool:
ctype, cparm = self._read_cpr(CPRorSPRoffset)
compression_level = cparm
else:
compression_level = 0
return VDR(
data_type=data_type,
section_type=section_type,
next_vdr_location=next_vdr,
variable_number=var_num,
head_vxr=head_vxr,
last_vxr=last_vxr,
max_rec=max_rec,
name=name,
num_dims=num_dims,
dim_sizes=dim_sizes,
compression_bool=compression_bool,
compression_level=compression_level,
blocking_factor=blocking_factor,
dim_vary=dim_varys,
record_vary=record_variance_bool,
num_elements=num_elements,
sparse=sparse,
pad=pad,
)
def _read_vdr2(self, byte_loc: int) -> VDR:
if self._post25 is True:
toadd = 0
else:
toadd = 128
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), "big")
vdr = self._f.read(block_size - 4)
# Type of internal record
section_type = int.from_bytes(vdr[0:4], "big")
next_vdr = int.from_bytes(vdr[4:8], "big", signed=True)
data_type = int.from_bytes(vdr[8:12], "big", signed=True)
max_rec = int.from_bytes(vdr[12:16], "big", signed=True)
head_vxr = int.from_bytes(vdr[16:20], "big", signed=True)
last_vxr = int.from_bytes(vdr[20:24], "big", signed=True)
flags = int.from_bytes(vdr[24:28], "big", signed=True)
flag_bits = f"{flags:032b}"
record_variance_bool = flag_bits[31] == "1"
pad_bool = flag_bits[30] == "1"
compression_bool = flag_bits[29] == "1"
sparse = int.from_bytes(vdr[28:32], "big", signed=True)
num_elements = int.from_bytes(vdr[44 + toadd : 48 + toadd], "big", signed=True)
var_num = int.from_bytes(vdr[48 + toadd : 52 + toadd], "big", signed=True)
CPRorSPRoffset = int.from_bytes(vdr[52 + toadd : 56 + toadd], "big", signed=True)
blocking_factor = int.from_bytes(vdr[56 + toadd : 60 + toadd], "big", signed=True)
name = str(vdr[60 + toadd : 124 + toadd].decode(self.string_encoding))
name = name.replace("\x00", "")
zdim_sizes = []
dim_sizes = []
dim_varys = []
if section_type == 8:
# zvariable
num_dims = int.from_bytes(vdr[124 + toadd : 128 + toadd], "big", signed=True)
for x in range(0, num_dims):
xoff = 128 + toadd + 4 * x
zdim_sizes.append(int.from_bytes(vdr[xoff : xoff + 4], "big", signed=True))
coff = 128 + toadd + 4 * num_dims
for x in range(0, num_dims):
icoff = coff + 4 * x
if int.from_bytes(vdr[icoff : icoff + 4], "big", signed=True) == 0:
dim_varys.append(False)
else:
dim_varys.append(True)
adj = 0
# Check for "False" dimensions, and delete them
for x in range(0, num_dims):
y = num_dims - x - 1
if dim_varys[y] == 0 or dim_varys[y] == False:
del zdim_sizes[y]
del dim_varys[y]
adj = adj + 1
num_dims = num_dims - adj
coff = 128 + toadd + 8 * num_dims
else:
# rvariable
for x in range(0, self._rvariables_num_dims):
ix = 124 + toadd + 4 * x
if int.from_bytes(vdr[ix : ix + 4], "big", signed=True) == 0:
dim_varys.append(False)
else:
dim_varys.append(True)
for x in range(0, len(dim_varys)):
dim_sizes.append(self._rvariables_dim_sizes[x])
num_dims = len(dim_sizes)
coff = 124 + toadd + 4 * self._rvariables_num_dims
# Only set if pad value is in the flags
pad: Union[None, str, np.ndarray] = None
if pad_bool:
byte_stream = vdr[coff:]
try:
pad = self._read_data(byte_stream, data_type, 1, num_elements)
except Exception:
if data_type == 51 or data_type == 52:
pad = " " * num_elements
if section_type == 8:
dim_sizes = zdim_sizes
if compression_bool:
ctype, cparm = self._read_cpr(CPRorSPRoffset)
compression_level = cparm
else:
compression_level = 0
return VDR(
data_type=data_type,
section_type=section_type,
next_vdr_location=next_vdr,
variable_number=var_num,
head_vxr=head_vxr,
last_vxr=last_vxr,
max_rec=max_rec,
name=name,
num_dims=num_dims,
dim_sizes=dim_sizes,
compression_bool=compression_bool,
compression_level=compression_level,
blocking_factor=blocking_factor,
dim_vary=dim_varys,
record_vary=record_variance_bool,
num_elements=num_elements,
sparse=sparse,
pad=pad,
)
def _read_vdr_fast(self, byte_loc: int) -> Tuple[str, int]:
if self.cdfversion == 3:
return self._read_vdr_fast3(byte_loc)
else:
return self._read_vdr_fast2(byte_loc)
def _read_vdr_fast3(self, byte_loc: int) -> Tuple[str, int]:
self._f.seek(byte_loc + 12, 0)
next_vdr = int.from_bytes(self._f.read(8), "big", signed=True)
self._f.seek(byte_loc + 84, 0)
name = str(self._f.read(256).decode(self.string_encoding))
name = name.replace("\x00", "")
return name, next_vdr
def _read_vdr_fast2(self, byte_loc: int) -> Tuple[str, int]:
if self._post25:
toadd = 0
else:
toadd = 128
self._f.seek(byte_loc + 8, 0)
next_vdr = int.from_bytes(self._f.read(4), "big", signed=True)
self._f.seek(byte_loc + toadd + 64, 0)
name = str(self._f.read(64).decode(self.string_encoding))
name = name.replace("\x00", "")
return name, next_vdr
def _read_vxrs(
self, byte_loc: int, vvr_offsets: List[int] = [], vvr_start: List[int] = [], vvr_end: List[int] = []
) -> Tuple[List[int], List[int], List[int]]:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), "big", signed=True) # Block Size
vxrs = self._f.read(block_size - 8)
next_vxr_pos = int.from_bytes(vxrs[4:12], "big", signed=True)
num_ent = int.from_bytes(vxrs[12:16], "big", signed=True)
num_ent_used = int.from_bytes(vxrs[16:20], "big", signed=True)
# coff = 20
for ix in range(0, num_ent_used):
soffset = 20 + 4 * ix
num_start = int.from_bytes(vxrs[soffset : soffset + 4], "big", signed=True)
eoffset = 20 + 4 * num_ent + 4 * ix
num_end = int.from_bytes(vxrs[eoffset : eoffset + 4], "big", signed=True)
ooffset = 20 + 2 * 4 * num_ent + 8 * ix
rec_offset = int.from_bytes(vxrs[ooffset : ooffset + 8], "big", signed=True)
type_offset = 8 + rec_offset
self._f.seek(type_offset, 0)
next_type = int.from_bytes(self._f.read(4), "big", signed=True)
if next_type == 6:
vvr_offsets, vvr_start, vvr_end = self._read_vxrs(
rec_offset, vvr_offsets=vvr_offsets, vvr_start=vvr_start, vvr_end=vvr_end
)
else:
vvr_offsets.extend([rec_offset])
vvr_start.extend([num_start])
vvr_end.extend([num_end])
if next_vxr_pos != 0:
vvr_offsets, vvr_start, vvr_end = self._read_vxrs(
next_vxr_pos, vvr_offsets=vvr_offsets, vvr_start=vvr_start, vvr_end=vvr_end
)
return vvr_offsets, vvr_start, vvr_end
def _read_vxrs2(
self, byte_loc: int, vvr_offsets: List[int] = [], vvr_start: List[int] = [], vvr_end: List[int] = []
) -> Tuple[List[int], List[int], List[int]]:
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), "big", signed=True)
vxrs = self._f.read(block_size - 4)
next_vxr_pos = int.from_bytes(vxrs[4:8], "big", signed=True)
num_ent = int.from_bytes(vxrs[8:12], "big", signed=True)
num_ent_used = int.from_bytes(vxrs[12:16], "big", signed=True)
# coff = 16
for ix in range(0, num_ent_used):
soffset = 16 + 4 * ix
num_start = int.from_bytes(vxrs[soffset : soffset + 4], "big", signed=True)
eoffset = 16 + 4 * num_ent + 4 * ix
num_end = int.from_bytes(vxrs[eoffset : eoffset + 4], "big", signed=True)
ooffset = 16 + 2 * 4 * num_ent + 4 * ix
rec_offset = int.from_bytes(vxrs[ooffset : ooffset + 4], "big", signed=True)
type_offset = 4 + rec_offset
self._f.seek(type_offset, 0)
next_type = int.from_bytes(self._f.read(4), "big", signed=True)
if next_type == 6:
vvr_offsets, vvr_start, vvr_end = self._read_vxrs2(
rec_offset, vvr_offsets=vvr_offsets, vvr_start=vvr_start, vvr_end=vvr_end
)
else:
vvr_offsets.extend([rec_offset])
vvr_start.extend([num_start])
vvr_end.extend([num_end])
if next_vxr_pos != 0:
vvr_offsets, vvr_start, vvr_end = self._read_vxrs2(
next_vxr_pos, vvr_offsets=vvr_offsets, vvr_start=vvr_start, vvr_end=vvr_end
)
return vvr_offsets, vvr_start, vvr_end
def _read_vvrs(
self, vdr: VDR, vvr_offs: List[int], vvr_start: List[int], vvr_end: List[int], startrec: int, endrec: int
) -> Union[str, np.ndarray]:
"""
Reads in all VVRS that are pointed to in the VVR_OFFS array.
Creates a large byte array of all values called "byte_stream".
Decodes the byte_stream, then returns them.
"""
numBytes = self._type_size(vdr.data_type, vdr.num_elements)
numValues = self._num_values(vdr)
totalRecs = endrec - startrec + 1
firstBlock = -1
lastBlock = -1
totalBytes = numBytes * numValues * totalRecs
byte_stream = bytearray(totalBytes)
pos = 0
if vdr.sparse == 0:
for vvr_num in range(0, len(vvr_offs)):
if vvr_end[vvr_num] >= startrec and firstBlock == -1:
firstBlock = vvr_num
if vvr_end[vvr_num] >= endrec:
lastBlock = vvr_num
break
for vvr_num in range(firstBlock, (lastBlock + 1)):
if self.cdfversion == 3:
var_block_data = self._read_vvr_block(vvr_offs[vvr_num])
else:
var_block_data = self._read_vvr_block2(vvr_offs[vvr_num])
asize = len(var_block_data)
byte_stream[pos : pos + asize] = var_block_data
pos = pos + asize
startPos = (startrec - vvr_start[firstBlock]) * numBytes * numValues
stopOff = (vvr_end[lastBlock] - endrec) * numBytes * numValues
byte_stream = byte_stream[startPos : len(byte_stream) - stopOff]
else:
# with sparse records
if vdr.pad is not None:
# use default pad value
filled_data = self._convert_np_data(vdr.pad, vdr.data_type, vdr.num_elements)
else:
filled_data = self._convert_np_data(
self._default_pad(vdr.data_type, vdr.num_elements),
vdr.data_type,
vdr.num_elements,
)
cur_block = -1
rec_size = numBytes * numValues
for rec_num in range(startrec, (endrec + 1)):
block, prev_block = self._find_block(vvr_start, vvr_end, cur_block, rec_num)
if block > -1:
record_off = rec_num - vvr_start[block]
if cur_block != block:
if self.cdfversion == 3:
var_block_data = self._read_vvr_block(vvr_offs[block])
else:
var_block_data = self._read_vvr_block2(vvr_offs[block])
cur_block = block
xoff = record_off * rec_size
byte_stream[pos : pos + rec_size] = var_block_data[xoff : xoff + rec_size]
else:
if vdr.sparse == 1:
# use defined pad or default pad
byte_stream[pos : pos + rec_size] = filled_data * numValues
else:
# use previous physical record
if prev_block != -1:
if self.cdfversion == 3:
var_prev_block_data = self._read_vvr_block(vvr_offs[prev_block])
else:
var_prev_block_data = self._read_vvr_block2(vvr_offs[prev_block])
lastRecOff = (vvr_end[prev_block] - vvr_start[prev_block]) * rec_size
byte_stream[pos : pos + rec_size] = var_prev_block_data[lastRecOff:]
else:
byte_stream[pos : pos + rec_size] = filled_data * numValues
pos = pos + rec_size
if block > -1:
cur_block = block
dimensions = []
var_vary = vdr.dim_vary
var_sizes = vdr.dim_sizes
for x in range(0, vdr.num_dims):
if var_vary[x] == 0:
continue
dimensions.append(var_sizes[x])
return self._read_data(byte_stream, vdr.data_type, totalRecs, vdr.num_elements, dimensions)
def _convert_option(self) -> str:
"""
Determines how to convert CDF byte ordering to the system
byte ordering.
"""
if sys.byteorder == "little" and self._endian() == "big-endian":
# big->little
order = ">"
elif sys.byteorder == "big" and self._endian() == "little-endian":
# little->big
order = "<"
else:
# no conversion
order = "="
return order
def _endian(self) -> str:
"""
Determines endianess of the CDF file
Only used in __init__
"""
if (
self._encoding == 1
or self._encoding == 2
or self._encoding == 5
or self._encoding == 7
or self._encoding == 9
or self._encoding == 11
or self._encoding == 12
):
return "big-endian"
else:
return "little-endian"
@staticmethod
def _type_size(data_type: Union[int, str], num_elms: int) -> int:
# DATA TYPES
#
# 1 - 1 byte signed int
# 2 - 2 byte signed int
# 4 - 4 byte signed int
# 8 - 8 byte signed int
# 11 - 1 byte unsigned int
# 12 - 2 byte unsigned int
# 14 - 4 byte unsigned int
# 41 - same as 1
# 21 - 4 byte float
# 22 - 8 byte float (double)
# 44 - same as 21
# 45 - same as 22
# 31 - double representing milliseconds
# 32 - 2 doubles representing milliseconds
# 33 - 8 byte signed integer representing nanoseconds from J2000
# 51 - signed character
# 52 - unsigned character
if isinstance(data_type, int):
if (data_type == 1) or (data_type == 11) or (data_type == 41):
return 1
elif (data_type == 2) or (data_type == 12):
return 2
elif (data_type == 4) or (data_type == 14):
return 4
elif (data_type == 8) or (data_type == 33):
return 8
elif (data_type == 21) or (data_type == 44):
return 4
elif (data_type == 22) or (data_type == 31) or (data_type == 45):
return 8
elif data_type == 32:
return 16
elif (data_type == 51) or (data_type == 52):
return num_elms
else:
raise TypeError("Unknown data type....")
elif isinstance(data_type, str):
data_typeU = data_type.upper()
if (data_typeU == "CDF_INT1") or (data_typeU == "CDF_UINT1") or (data_typeU == "CDF_BYTE"):
return 1
elif (data_typeU == "CDF_INT2") or (data_typeU == "CDF_UINT2"):
return 2
elif (data_typeU == "CDF_INT4") or (data_typeU == "CDF_UINT4"):
return 4
elif (data_typeU == "CDF_INT8") or (data_typeU == "CDF_TIME_TT2000"):
return 8
elif (data_typeU == "CDF_REAL4") or (data_typeU == "CDF_FLOAT"):
return 4
elif (data_typeU == "CDF_REAL8") or (data_typeU == "CDF_DOUBLE") or (data_typeU == "CDF_EPOCH"):
return 8
elif data_typeU == "CDF_EPOCH16":
return 16
elif (data_typeU == "CDF_CHAR") or (data_typeU == "CDF_UCHAR"):
return num_elms
else:
raise TypeError("Unknown data type....")
else:
raise TypeError("Unknown data type....")
def _read_data(
self, byte_stream: bytes, data_type: int, num_recs: int, num_elems: int, dimensions: Optional[List[int]] = None
) -> Union[str, np.ndarray]:
"""
This is the primary routine that converts streams of bytes into usable data.
To do so, we need the bytes, the type of data, the number of records,
the number of elements in a record, and dimension information.
"""
squeeze_needed = False
# If the dimension is [n], it needs to be [n,1]
# for the numpy dtype. This requires us to squeeze
# the matrix later, to get rid of this extra dimension.
dt_string = self._convert_option()
if dimensions is not None:
if self._majority == "Column_major":
dimensions = list(reversed(dimensions))
if len(dimensions) == 1:
dimensions.append(1)
squeeze_needed = True
dt_string += "("
count = 0
for dim in dimensions:
count += 1
dt_string += str(dim)
if count < len(dimensions):
dt_string += ","
dt_string += ")"
ret: Union[str, np.ndarray]
if data_type == 52 or data_type == 51:
# string
if dimensions is None:
byte_data = bytearray(byte_stream[0 : num_recs * num_elems])
# In each record, check for the first '\x00' (null character).
# If found, make all the characters after it null as well.
for x in range(0, num_recs):
y = x * num_elems
z = byte_data[y : y + num_elems].find(b"\x00")
if z > -1 and z < (num_elems - 1):
byte_data[y + z + 1 : y + num_elems] = b"\x00" * (num_elems - z - 1)
ret = byte_data[0 : num_recs * num_elems].decode(self.string_encoding, errors="ignore").replace("\x00", "")
else:
# Count total number of strings
count = 1
for x in range(0, len(dimensions)):
count = count * dimensions[x]
strings = []
if len(dimensions) == 0:
for i in range(0, num_recs * count * num_elems, num_elems):
string1 = byte_stream[i : i + num_elems].decode(self.string_encoding, errors="ignore").replace("\x00", "")
strings.append(string1)
else:
for x in range(0, num_recs):
onerec = []
for i in range(x * count * num_elems, (x + 1) * count * num_elems, num_elems):
string1 = (
byte_stream[i : i + num_elems].decode(self.string_encoding, errors="ignore").replace("\x00", "")
)
onerec.append(string1)
strings.extend(onerec)
ret = np.array(strings).reshape((num_recs,) + tuple(dimensions))
if squeeze_needed:
ret = np.squeeze(ret, axis=(ret.ndim - 1))
if dimensions is not None:
dimensions.pop()
if self._majority == "Column_major":
axes = [0] + list(range(len(dimensions), 0, -1))
ret = np.transpose(ret, axes=axes)
return ret
else:
if (data_type == 1) or (data_type == 41):
dt_string += "i1"
elif data_type == 2:
dt_string += "i2"
elif data_type == 4:
dt_string += "i4"
elif (data_type == 8) or (data_type == 33):
dt_string += "i8"
elif data_type == 11:
dt_string += "u1"
elif data_type == 12:
dt_string += "u2"
elif data_type == 14:
dt_string += "u4"
elif (data_type == 21) or (data_type == 44):
dt_string += "f"
elif (data_type == 22) or (data_type == 45) or (data_type == 31):
dt_string += "d"
elif data_type == 32:
dt_string += "c16"
dt = np.dtype(dt_string)
ret = np.frombuffer(byte_stream, dtype=dt, count=num_recs * num_elems)
try:
ret.setflags(write=True)
except ValueError:
# If we can't set the writable flag, just continue
pass
if squeeze_needed:
ret = np.squeeze(ret, axis=(ret.ndim - 1))
if dimensions is not None:
dimensions.pop()
# Put the data into system byte order
if self._convert_option() != "=":
ret = ret.view(ret.dtype.newbyteorder()).byteswap()
if self._majority == "Column_major":
if dimensions is not None:
axes = [0] + list(range(len(dimensions), 0, -1))
else:
axes = None
ret = np.transpose(ret, axes=axes)
return ret
def _num_values(self, vdr: VDR) -> int:
"""
Returns the number of values in a record, using a given VDR
dictionary. Multiplies the dimension sizes of each dimension,
if it is varying.
"""
values = 1
for x in range(0, vdr.num_dims):
if vdr.dim_vary[x] != 0:
values = values * vdr.dim_sizes[x]
return values
def _get_attdata(self, adr_info: ADRInfo, entry_num: int, num_entry: int, first_entry: int) -> AttData:
position = first_entry
for _ in range(0, num_entry):
got_entry_num, next_aedr = self._read_aedr_fast(position)
if entry_num == got_entry_num:
aedr_info = self._read_aedr(position)
item_size = self._type_size(aedr_info.data_type, aedr_info.num_elements)
data_type = self._datatype_token(aedr_info.data_type)
num_items = aedr_info.num_elements
data: Union[str, npt.NDArray] = aedr_info.entry
if isinstance(data, str):
if aedr_info.num_strings is not None:
num_strings = aedr_info.num_strings
num_items = num_strings
if num_strings > 1 and isinstance(aedr_info.entry, str):
data = np.array(aedr_info.entry.split("\\N "))
return AttData(item_size, data_type, num_items, data)
else:
return AttData(item_size, data_type, num_items, _squeeze_or_scalar(data))
else:
position = next_aedr
raise KeyError("The entry does not exist")
def _read_vardata(
self,
vdr_info: VDR,
epoch: Optional[str] = None,
starttime: Optional[epoch.epoch_types] = None,
endtime: Optional[epoch.epoch_types] = None,
startrec: int = 0,
endrec: Optional[int] = None,
) -> Optional[Union[str, np.ndarray]]:
# Error checking
if startrec:
if startrec < 0:
raise ValueError("Invalid start recond")
if not (vdr_info.record_vary):
startrec = 0
if not (endrec is None):
if (endrec < 0) or (endrec > vdr_info.max_rec) or (endrec < startrec):
raise ValueError("Invalid end recond")
if not (vdr_info.record_vary):
endrec = 0
else:
endrec = vdr_info.max_rec
if self.cdfversion == 3:
vvr_offsets, vvr_start, vvr_end = self._read_vxrs(vdr_info.head_vxr, vvr_offsets=[], vvr_start=[], vvr_end=[])
else:
vvr_offsets, vvr_start, vvr_end = self._read_vxrs2(vdr_info.head_vxr, vvr_offsets=[], vvr_start=[], vvr_end=[])
if vdr_info.record_vary:
# Record varying
if starttime is not None or endtime is not None:
recs = self._findtimerecords(vdr_info.name, starttime, endtime, epoch=epoch)
if recs is None:
return None
elif len(recs) == 0:
return None
else:
startrec = recs[0]
endrec = recs[-1]
else:
startrec = 0
endrec = 0
data = self._read_vvrs(vdr_info, vvr_offsets, vvr_start, vvr_end, startrec, endrec)
if vdr_info.record_vary:
return data
else:
return data[0]
def _findtimerecords(
self, var_name: str, starttime: epoch.epoch_types, endtime: epoch.epoch_types, epoch: Optional[str] = None
) -> np.ndarray:
if epoch is not None:
vdr_info = self.varinq(epoch)
if vdr_info is None:
raise ValueError("Epoch not found")
if vdr_info.Data_Type == 31 or vdr_info.Data_Type == 32 or vdr_info.Data_Type == 33:
epochtimes = self.varget(epoch)
else:
vdr_info = self.varinq(var_name)
if vdr_info.Data_Type == 31 or vdr_info.Data_Type == 32 or vdr_info.Data_Type == 33:
epochtimes = self.varget(var_name)
else:
# acquire depend_0 variable
dependVar = self.attget("DEPEND_0", var_name)
if dependVar is None:
raise ValueError(
"No corresponding epoch from 'DEPEND_0' attribute "
"for variable: {}".format(var_name) + "Use 'epoch' argument to specify its time-based "
"variable"
)
if not isinstance(dependVar.Data, str):
raise ValueError()
vdr_info = self.varinq(dependVar.Data)
if vdr_info.Data_Type != 31 and vdr_info.Data_Type != 32 and vdr_info.Data_Type != 33:
raise ValueError(
"Corresponding variable from 'DEPEND_0' attribute "
"for variable: {}".format(var_name) + " is not a CDF epoch type"
)
epochtimes = self.varget(dependVar.Data)
return self._findrangerecords(vdr_info.Data_Type, epochtimes, starttime, endtime)
def _findrangerecords(
self, data_type: int, epochtimes: epoch.epochs_type, starttime: epoch.epoch_types, endtime: epoch.epoch_types
) -> np.ndarray:
if data_type == 31 or data_type == 32 or data_type == 33:
# CDF_EPOCH or CDF_EPOCH16 or CDF_TIME_TT2000
recs = epoch.CDFepoch.findepochrange(epochtimes, starttime, endtime)
else:
raise ValueError("Not a CDF epoch type")
return recs
def _convert_type(self, data_type: int) -> str:
"""
CDF data types to python struct data types
"""
if (data_type == 1) or (data_type == 41):
dt_string = "b"
elif data_type == 2:
dt_string = "h"
elif data_type == 4:
dt_string = "i"
elif (data_type == 8) or (data_type == 33):
dt_string = "q"
elif data_type == 11:
dt_string = "B"
elif data_type == 12:
dt_string = "H"
elif data_type == 14:
dt_string = "I"
elif (data_type == 21) or (data_type == 44):
dt_string = "f"
elif (data_type == 22) or (data_type == 45) or (data_type == 31):
dt_string = "d"
elif data_type == 32:
dt_string = "d"
elif (data_type == 51) or (data_type == 52):
dt_string = "s"
return dt_string
def _default_pad(self, data_type: int, num_elms: int) -> Union[str, np.ndarray]:
"""
The default pad values by CDF data type
"""
order = self._convert_option()
if data_type == 51 or data_type == 52:
return str(" " * num_elms)
if (data_type == 1) or (data_type == 41):
pad_value = struct.pack(order + "b", -127)
dt_string = "i1"
elif data_type == 2:
pad_value = struct.pack(order + "h", -32767)
dt_string = "i2"
elif data_type == 4:
pad_value = struct.pack(order + "i", -2147483647)
dt_string = "i4"
elif (data_type == 8) or (data_type == 33):
pad_value = struct.pack(order + "q", -9223372036854775807)
dt_string = "i8"
elif data_type == 11:
pad_value = struct.pack(order + "B", 254)
dt_string = "u1"
elif data_type == 12:
pad_value = struct.pack(order + "H", 65534)
dt_string = "u2"
elif data_type == 14:
pad_value = struct.pack(order + "I", 4294967294)
dt_string = "u4"
elif (data_type == 21) or (data_type == 44):
pad_value = struct.pack(order + "f", -1.0e30)
dt_string = "f"
elif (data_type == 22) or (data_type == 45) or (data_type == 31):
pad_value = struct.pack(order + "d", -1.0e30)
dt_string = "d"
else:
# (data_type == 32):
pad_value = struct.pack(order + "2d", *[-1.0e30, -1.0e30])
dt_string = "c16"
dt = np.dtype(dt_string)
ret = np.frombuffer(pad_value, dtype=dt, count=1)
try:
ret.setflags(write=True)
except Exception:
# TODO: Figure out why we need to array set to writeable
pass
return ret
def _convert_np_data(self, data: Union[str, np.ndarray], data_type: int, num_elems: int) -> bytes:
"""
Converts a single np data into byte stream.
"""
if isinstance(data, str):
if data == "":
return ("\x00" * num_elems).encode()
else:
return data.ljust(num_elems, "\x00").encode(self.string_encoding)
elif isinstance(data, np.ndarray):
data_stream = data.real.tobytes()
data_stream += data.imag.tobytes()
return data_stream
else:
return data.tobytes()
def _read_vvr_block(self, offset: int) -> bytes:
"""
Returns a VVR or decompressed CVVR block
"""
self._f.seek(offset, 0)
block_size = int.from_bytes(self._f.read(8), "big")
block = self._f.read(block_size - 8)
section_type = int.from_bytes(block[0:4], "big")
if section_type == 13:
# a CVVR
compressed_size = int.from_bytes(block[8:16], "big")
return gzip_inflate(block[16 : 16 + compressed_size])
elif section_type == 7:
# a VVR
return block[4:]
else:
raise RuntimeError("Unexpected section type")
def _read_vvr_block2(self, offset: int) -> bytes:
"""
Returns a VVR or decompressed CVVR block
"""
self._f.seek(offset, 0)
block_size = int.from_bytes(self._f.read(4), "big")
block = self._f.read(block_size - 4)
section_type = int.from_bytes(block[0:4], "big")
if section_type == 13:
# a CVVR
compressed_size = int.from_bytes(block[8:12], "big")
return gzip_inflate(block[12 : 12 + compressed_size])
elif section_type == 7:
# a VVR
return block[4:]
else:
raise RuntimeError("Unexpected section type")
@staticmethod
def _find_block(starts: List[int], ends: List[int], cur_block: int, rec_num: int) -> Tuple[int, int]:
"""
Finds the block that rec_num is in if it is found. Otherwise it returns -1.
It also returns the block that has the physical data either at or
preceeding the rec_num.
It could be -1 if the preceeding block does not exists.
"""
total = len(starts)
if cur_block == -1:
cur_block = 0
for x in range(cur_block, total):
if starts[x] <= rec_num and ends[x] >= rec_num:
return x, x
if starts[x] > rec_num:
break
return -1, x - 1
def _file_or_url_or_s3_handler(
self, filename: str, filetype: str, s3_read_method: int
) -> Union["S3object", io.BufferedReader, io.BytesIO]:
bdata: Union["S3object", io.BufferedReader, io.BytesIO]
if filetype == "url":
req = urllib.request.Request(filename)
response = urllib.request.urlopen(req)
bdata = io.BytesIO(response.read())
elif filetype == "s3":
try:
import boto3
from botocore import UNSIGNED
from botocore.client import Config
from botocore.handlers import disable_signing
except:
raise ImportError("boto3/botocore package not installed")
s3parts = filename.split("/") # 0-1=s3://, 2=bucket, 3+=key
mybucket = s3parts[2]
mykey = "/".join(s3parts[3:])
if s3_read_method == 3:
# read in-place
s3c = boto3.resource("s3")
try:
obj = s3c.Object(bucket_name=mybucket, key=mykey)
except:
s3c.meta.client.meta.events.register("choose-signer.s3.*", disable_signing)
obj = s3c.Object(bucket_name=mybucket, key=mykey)
bdata = S3object(obj) # type: ignore
else:
# for store in memory or as temp copy
s3c = boto3.client("s3")
try:
obj = s3c.get_object(Bucket=mybucket, Key=mykey)
except:
s3c = boto3.client("s3", config=Config(signature_version=UNSIGNED))
obj = s3c.get_object(Bucket=mybucket, Key=mykey)
bdata = s3_fetchall(obj)
return bdata
else:
bdata = open(filename, "rb")
return bdata
def _unstream_file(self, f) -> None: # type: ignore
"""
Typically for S3 or URL, writes the current file stream
into a file in the temporary directory.
If that doesn't work, create a new file in the CDFs directory.
"""
raw_data = f.read(-1)
self.temp_file = Path(tempfile.NamedTemporaryFile(suffix=".cdf").name)
with self.temp_file.open("wb") as g:
g.write(raw_data)
self.original_stream = self.file
self.file = self.temp_file
self.file = Path(self.file).expanduser()
self.ftype = "file"
|
class CDF:
'''
Read a CDF file into the CDF object. This object contains methods to load
the cdf file information, variable names, and values.
Example
-------
>>> import cdflib
>>> cdf_file = cdflib.CDF('/path/to/cdf_file.cdf')
>>> cdf_file.cdf_info()
>>> x = cdf_file.varget("NameOfVariable", startrec=0, endrec=150)
'''
def __init__(self, path: Union[str, Path], validate: bool = False, string_encoding: str = "ascii", s3_read_method: int = 1):
'''
Parameters
----------
path : Path, str
Path to CDF file. This can be a link to a file in an S3 bucket as well.
validate : bool, optional
If True, validate the MD5 checksum of the CDF file.
string_encoding : str, optional
The encoding used to read strings. Defaults to 'ascii', which is what
the CDF internal format description prescribes as the encoding for
character strings. Other encodings may have been used to create files
however, and this keyword argument gives users the flexibility to read
those files.
s3_read_method: int, optional
If the user is specifying a file that lives within an AWS S3 bucket, this variable
defines how the file is read in. The choices are:
- 1 will read the file into memory to load in memory)
- 2 will download the file to a tmp directory
- 3 reads the file in chunks directly from S3 over https
Notes
-----
An open file handle to the CDF file remains whilst a CDF object is live.
It is automatically cleaned up with the CDF instance is deleted.
'''
pass
def __del__(self) -> None:
pass
def __getitem__(self, variable: str) -> Union[str, np.ndarray]:
pass
def __enter__(self) -> "CDF":
pass
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
pass
def cdf_info(self) -> CDFInfo:
'''
Returns basic CDF information.
Returns
-------
CDFInfo
'''
pass
def varinq(self, variable: str) -> VDRInfo:
'''
Get basic variable information.
Returns
-------
VDRInfo
'''
pass
def attinq(self, attribute: Union[str, int]) -> ADRInfo:
'''
Get attribute information.
Parameters
----------
attribute : str, int
Attribute to get information for.
Returns
-------
ADRInfo
'''
pass
def attget(self, attribute: Union[str, int], entry: Optional[Union[str, int]] = None) -> AttData:
'''
Returns the value of the attribute at the entry number provided.
A variable name can be used instead of its corresponding
entry number.
Parameters
----------
attribute : str, int
Attribute name or number to get.
entry : int, optional
Returns
-------
AttData
'''
pass
def varget(
self,
variable: Optional[str] = None,
epoch: Optional[str] = None,
starttime: Optional[epoch.epoch_types] = None,
endtime: Optional[epoch.epoch_types] = None,
startrec: int = 0,
endrec: Optional[int] = None,
) -> Union[str, np.ndarray]:
'''
Returns the variable data.
Parameters
----------
variable: str
Variable name to fetch.
startrec: int
Index of the first record to get.
endrec : int
Index of the last record to get. All records from *startrec* to
*endrec* inclusive are fetched.
Notes
-----
Variable can be entered either
a name or a variable number. By default, it returns a
'numpy.ndarray' or 'list' class object, depending on the
data type, with the variable data and its specification.
By default, the full variable data is returned. To acquire
only a portion of the data for a record-varying variable,
either the time or record (0-based) range can be specified.
'epoch' can be used to specify which time variable this
variable depends on and is to be searched for the time range.
For the ISTP-compliant CDFs, the time variable will come from
the attribute 'DEPEND_0' from this variable. The function will
automatically search for it thus no need to specify 'epoch'.
If either the start or end time is not specified,
the possible minimum or maximum value for the specific epoch
data type is assumed. If either the start or end record is not
specified, the range starts at 0 or/and ends at the last of the
written data.
The start (and end) time should be presented in a list as:
[year month day hour minute second millisec] for CDF_EPOCH
[year month day hour minute second millisec microsec nanosec picosec] for CDF_EPOCH16
[year month day hour minute second millisec microsec nanosec] for CDF_TIME_TT2000
If not enough time components are presented, only the last item can have the floating
portion for the sub-time components.
Note: CDF's CDF_EPOCH16 data type uses 2 8-byte doubles for each data value.
In Python, each value is presented as a complex or numpy.complex128.
'''
pass
def vdr_info(self, variable: Union[str, int]) -> VDR:
pass
def globalattsget(self) -> Dict[str, List[Union[str, np.ndarray]]]:
'''
Gets all global attributes.
This function returns all of the global attribute entries,
in a dictionary (in the form of ``'attribute': {entry: value}``
pairs) from a CDF.
'''
pass
def varattsget(self, variable: Union[str, int]) -> Dict[str, Union[None, str, np.ndarray]]:
'''
Gets all variable attributes.
Unlike attget, which returns a single attribute entry value,
this function returns all of the variable attribute entries,
in a dictionary (in the form of 'attribute': value pair) for
a variable.
'''
pass
def _uncompress_rle(self, data: bytes) -> bytearray:
pass
def _uncompress_file(self) -> None:
'''
Writes the current file into a file in the temporary directory.
If that doesn't work, create a new file in the CDFs directory.
'''
pass
def _read_ccr(self, byte_loc: int) -> Tuple[int, int, int, int]:
pass
def _read_ccr2(self, byte_loc: int) -> Tuple[int, int, int, int]:
pass
def _read_cpr(self, byte_loc: int) -> Tuple[int, int]:
pass
def _read_cpr3(self, byte_loc: int) -> Tuple[int, int]:
pass
def _read_cpr2(self, byte_loc: int) -> Tuple[int, int]:
pass
def _md5_validation(self) -> bool:
'''
Verifies the MD5 checksum.
Only used in the __init__() function
'''
pass
@staticmethod
def _encoding_token(encoding: int) -> str:
pass
@staticmethod
def _major_token(major: int) -> str:
pass
@staticmethod
def _scope_token(scope: int) -> str:
pass
@staticmethod
def _variable_token(variable: int) -> str:
pass
@staticmethod
def _datatype_token(datatype: int) -> str:
pass
@staticmethod
def _sparse_token(sparse: int) -> str:
pass
def _get_varnames(self) -> Tuple[List[str], List[str]]:
pass
def _get_attnames(self) -> List[Dict[str, str]]:
pass
def _read_cdr(self, byte_loc: int) -> Tuple[CDRInfo, int]:
'''
Read a CDF descriptor record (CDR).
'''
pass
def _read_cdr2(self, byte_loc: int) -> Tuple[CDRInfo, int]:
pass
def _read_gdr(self, byte_loc: int) -> GDRInfo:
pass
def _read_gdr2(self, byte_loc: int) -> GDRInfo:
pass
def _read_varatts(self, var_num: int, zVar: bool) -> Dict[str, Union[None, str, np.ndarray]]:
pass
def _read_adr(self, position: int) -> ADRInfo:
'''
Read an attribute descriptor record (ADR).
'''
pass
def _read_adr3(self, byte_loc: int) -> ADRInfo:
pass
def _read_adr2(self, byte_loc: int) -> ADRInfo:
pass
def _read_adr_fast(self, position: int) -> Tuple[str, int]:
'''
Read an attribute descriptor record (ADR).
'''
pass
def _read_adr_fast3(self, byte_loc: int) -> Tuple[str, int]:
pass
def _read_adr_fast2(self, byte_loc: int) -> Tuple[str, int]:
pass
def _read_aedr_fast(self, byte_loc: int) -> Tuple[int, int]:
pass
def _read_aedr_fast3(self, byte_loc: int) -> Tuple[int, int]:
pass
def _read_aedr_fast2(self, byte_loc: int) -> Tuple[int, int]:
pass
def _read_aedr_fast(self, byte_loc: int) -> Tuple[int, int]:
pass
def _read_aedr3(self, byte_loc: int) -> AEDR:
'''
Reads an Attribute Entry Descriptor Record at a specific byte location.
'''
pass
def _read_aedr2(self, byte_loc: int) -> AEDR:
pass
def _read_vdr(self, byte_loc: int) -> VDR:
'''
Read a variable descriptor record (VDR).
'''
pass
def _read_vdr3(self, byte_loc: int) -> VDR:
pass
def _read_vdr2(self, byte_loc: int) -> VDR:
pass
def _read_vdr_fast(self, byte_loc: int) -> Tuple[str, int]:
pass
def _read_vdr_fast3(self, byte_loc: int) -> Tuple[str, int]:
pass
def _read_vdr_fast2(self, byte_loc: int) -> Tuple[str, int]:
pass
def _read_vxrs(
self, byte_loc: int, vvr_offsets: List[int] = [], vvr_start: List[int] = [], vvr_end: List[int] = []
) -> Tuple[List[int], List[int], List[int]]:
pass
def _read_vxrs2(
self, byte_loc: int, vvr_offsets: List[int] = [], vvr_start: List[int] = [], vvr_end: List[int] = []
) -> Tuple[List[int], List[int], List[int]]:
pass
def _read_vvrs(
self, vdr: VDR, vvr_offs: List[int], vvr_start: List[int], vvr_end: List[int], startrec: int, endrec: int
) -> Union[str, np.ndarray]:
'''
Reads in all VVRS that are pointed to in the VVR_OFFS array.
Creates a large byte array of all values called "byte_stream".
Decodes the byte_stream, then returns them.
'''
pass
def _convert_option(self) -> str:
'''
Determines how to convert CDF byte ordering to the system
byte ordering.
'''
pass
def _endian(self) -> str:
'''
Determines endianess of the CDF file
Only used in __init__
'''
pass
@staticmethod
def _type_size(data_type: Union[int, str], num_elms: int) -> int:
pass
def _read_data(
self, byte_stream: bytes, data_type: int, num_recs: int, num_elems: int, dimensions: Optional[List[int]] = None
) -> Union[str, np.ndarray]:
'''
This is the primary routine that converts streams of bytes into usable data.
To do so, we need the bytes, the type of data, the number of records,
the number of elements in a record, and dimension information.
'''
pass
def _num_values(self, vdr: VDR) -> int:
'''
Returns the number of values in a record, using a given VDR
dictionary. Multiplies the dimension sizes of each dimension,
if it is varying.
'''
pass
def _get_attdata(self, adr_info: ADRInfo, entry_num: int, num_entry: int, first_entry: int) -> AttData:
pass
def _read_vardata(
self,
vdr_info: VDR,
epoch: Optional[str] = None,
starttime: Optional[epoch.epoch_types] = None,
endtime: Optional[epoch.epoch_types] = None,
startrec: int = 0,
endrec: Optional[int] = None,
) -> Optional[Union[str, np.ndarray]]:
pass
def _findtimerecords(
self, var_name: str, starttime: epoch.epoch_types, endtime: epoch.epoch_types, epoch: Optional[str] = None
) -> np.ndarray:
pass
def _findrangerecords(
self, data_type: int, epochtimes: epoch.epochs_type, starttime: epoch.epoch_types, endtime: epoch.epoch_types
) -> np.ndarray:
pass
def _convert_type(self, data_type: int) -> str:
'''
CDF data types to python struct data types
'''
pass
def _default_pad(self, data_type: int, num_elms: int) -> Union[str, np.ndarray]:
'''
The default pad values by CDF data type
'''
pass
def _convert_np_data(self, data: Union[str, np.ndarray], data_type: int, num_elems: int) -> bytes:
'''
Converts a single np data into byte stream.
'''
pass
def _read_vvr_block(self, offset: int) -> bytes:
'''
Returns a VVR or decompressed CVVR block
'''
pass
def _read_vvr_block2(self, offset: int) -> bytes:
'''
Returns a VVR or decompressed CVVR block
'''
pass
@staticmethod
def _find_block(starts: List[int], ends: List[int], cur_block: int, rec_num: int) -> Tuple[int, int]:
'''
Finds the block that rec_num is in if it is found. Otherwise it returns -1.
It also returns the block that has the physical data either at or
preceeding the rec_num.
It could be -1 if the preceeding block does not exists.
'''
pass
def _file_or_url_or_s3_handler(
self, filename: str, filetype: str, s3_read_method: int
) -> Union["S3object", io.BufferedReader, io.BytesIO]:
pass
def _unstream_file(self, f) -> None:
'''
Typically for S3 or URL, writes the current file stream
into a file in the temporary directory.
If that doesn't work, create a new file in the CDFs directory.
'''
pass
| 81 | 28 | 27 | 2 | 21 | 4 | 5 | 0.21 | 0 | 30 | 10 | 0 | 64 | 26 | 72 | 72 | 2,064 | 216 | 1,542 | 535 | 1,427 | 319 | 1,186 | 495 | 1,109 | 34 | 0 | 6 | 352 |
147,650 |
MAVENSDC/cdflib
|
cdflib/dataclasses.py
|
cdflib.dataclasses.AttData
|
class AttData:
"""
Attribute data.
"""
#: Number of bytes for each entry value.
Item_Size: int
#: CDF data type.
Data_Type: str
#: Number of values extracted.
Num_Items: int
#: Data as a scalar value, a numpy array or a string.
Data: Union[Number, str, np.ndarray]
|
class AttData:
'''
Attribute data.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 1 | 5 | 1 | 4 | 7 | 5 | 1 | 4 | 0 | 0 | 0 | 0 |
147,651 |
MAVENSDC/cdflib
|
cdflib/epochs.py
|
cdflib.epochs.CDFepoch
|
class CDFepoch:
"""
Convert between CDF-based epochs, np.datetime64, and Unix time.
There are three (3) epoch data types in CDF:
1. CDF_EPOCH is milliseconds since Year 0 represented as a
single double (float in Python),
2. CDF_EPOCH16 is picoseconds since Year 0 represented as
2-doubles (complex in Python), and
3. CDF_TIME_TT2000 (TT2000 as short) is nanoseconds since J2000 with
leap seconds, represented by an 8-byte integer (int in Python).
In Numpy, they are np.float64, np.complex128 and np.int64, respectively.
All these epoch values can come from from CDF.varget function.
Example
-------
>>> import cdflib
# Convert to an epoch
>>> epoch = cdflib.cdfepoch.compute_epoch([2017,1,1,1,1,1,111])
# Convert from an epoch
>>> time = cdflib.cdfepoch.to_datetime(epoch) # Or pass epochs via CDF.varget.
"""
version = 3
release = 7
increment = 0
month_Token = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
JulianDateJ2000_12h = 2451545
J2000Since0AD12h = 730485
J2000Since0AD12hSec = 63113904000.0
J2000Since0AD12hMilsec = 63113904000000.0
J2000LeapSeconds = 32.0
dT = 32.184
dTinNanoSecs = 32184000000
MJDbase = 2400000.5
SECinNanoSecs = 1000000000
SECinNanoSecsD = 1000000000.0
DAYinNanoSecs = int(86400000000000)
HOURinNanoSecs = int(3600000000000)
MINUTEinNanoSecs = int(60000000000)
T12hinNanoSecs = int(43200000000000)
# Julian days for 1707-09-22 and 2292-04-11, the valid TT2000 range
JDY17070922 = 2344793
JDY22920411 = 2558297
DEFAULT_TT2000_PADVALUE = int(-9223372036854775807)
FILLED_TT2000_VALUE = int(-9223372036854775808)
NERA1 = 14
LTS = []
with open(LEAPSEC_FILE) as lsfile:
lsreader = csv.reader(lsfile, delimiter=" ")
for csv_row in lsreader:
if csv_row[0] == ";":
continue
csv_row = list(filter(("").__ne__, csv_row))
row: List[Union[int, float]] = []
for r in csv_row[:3]:
row.append(int(r))
for r in csv_row[3:6]:
row.append(float(r))
LTS.append(row)
NDAT = len(LTS)
NST: Optional[List[int]] = None
currentDay = -1
currentJDay = -1
currentLeapSeconds: float = -1
@staticmethod
def encode(epochs: epoch_types, iso_8601: bool = True) -> encoded_type:
"""
Converts one or more epochs into UTC strings. The input epoch
format is deduced from the argument type.
Parameters
----------
epochs: int, float, list, complex
One or more ECD epochs in one of three formats:
1. CDF_EPOCH: The input should be either a float or list of floats
(in numpy, a np.float64 or a np.ndarray of np.float64)
2. CDF_EPOCH16: The input should be either a complex or list of
complex(in numpy, a np.complex128 or a np.ndarray of np.complex128)
3. TT2000: The input should be either a int or list of ints
(in numpy, a np.int64 or a np.ndarray of np.int64)
iso_8601: bool
The return time format. If ISO 8601 is True, the format is,
for example, 2008-02-02T06:08:10.10.012014016, otherwise
the format is 02-Feb-2008 06:08:10.012.014.016.
"""
epochs = np.array(epochs)
if epochs.dtype == np.int64:
return CDFepoch.encode_tt2000(epochs, iso_8601)
elif epochs.dtype == np.float64:
return CDFepoch.encode_epoch(epochs, iso_8601)
elif epochs.dtype == np.complex128:
return CDFepoch.encode_epoch16(epochs, iso_8601)
else:
raise TypeError(f"Not sure how to handle type {epochs.dtype}")
@staticmethod
def breakdown(epochs: epoch_types) -> np.ndarray:
"""
Returns
-------
np.ndarray
1D if scalar input, 2D otherwise.
"""
epochs = np.array(epochs)
if epochs.dtype.type == np.int64:
return CDFepoch.breakdown_tt2000(epochs)
elif epochs.dtype.type == np.float64:
return CDFepoch.breakdown_epoch(epochs)
elif epochs.dtype.type == np.complex128:
return CDFepoch.breakdown_epoch16(epochs)
else:
raise TypeError(f"Not sure how to handle type {epochs.dtype}")
@staticmethod
def _compose_date(
nat_positions: npt.NDArray,
years: npt.NDArray,
months: npt.NDArray,
days: npt.NDArray,
hours: Optional[npt.NDArray] = None,
minutes: Optional[npt.NDArray] = None,
seconds: Optional[npt.NDArray] = None,
milliseconds: Optional[npt.NDArray] = None,
microseconds: Optional[npt.NDArray] = None,
nanoseconds: Optional[npt.NDArray] = None,
) -> npt.NDArray[np.datetime64]:
"""
Take date components and return a numpy datetime array.
"""
years = np.asarray(years) - 1970
months = np.asarray(months) - 1
days = np.asarray(days) - 1
types = ("<M8[Y]", "<m8[M]", "<m8[D]", "<m8[h]", "<m8[m]", "<m8[s]", "<m8[ms]", "<m8[us]", "<m8[ns]")
vals = (v for v in (years, months, days, hours, minutes, seconds, milliseconds, microseconds, nanoseconds) if v is not None)
arrays: List[npt.NDArray[np.datetime64]] = [np.array(v, dtype=t) for t, v in zip(types, vals)]
total_datetime = np.array(sum(arrays))
total_datetime = np.where(nat_positions, np.datetime64("NaT"), total_datetime)
return total_datetime
@classmethod
def to_datetime(cls, cdf_time: epoch_types) -> npt.NDArray[np.datetime64]:
"""
Converts CDF epoch argument to numpy.datetime64.
Parameters:
cdf_time: NumPy scalar/arrays to convert. np.int64 will be converted to cdf_tt2000, np.complex128 will be converted to cdf_epoch16, and floats will be converted to cdf_epoch.
Notes:
Because of datetime64 limitations, CDF_EPOCH16 precision is only kept to the nearest nanosecond.
"""
times = cls.breakdown(cdf_time)
times = np.atleast_2d(times)
fillval_locations = np.all((times[:, 0:7] == [9999, 12, 31, 23, 59, 59, 999]), axis=1)
padval_locations = np.all((times[:, 0:7] == [0, 1, 1, 0, 0, 0, 0]), axis=1)
nan_locations = np.logical_or(fillval_locations, padval_locations)
return cls._compose_date(nan_locations, *times.T[:9]).astype("datetime64[ns]")
@staticmethod
def unixtime(cdf_time: npt.ArrayLike) -> Union[float, npt.NDArray]:
"""
Converts CDF epoch argument into seconds after 1970-01-01. This method
converts a scalar, or array-like. Precision is only kept to the
nearest microsecond.
"""
cdf_time = np.atleast_1d(cdf_time)
time_list = np.atleast_2d(CDFepoch.breakdown(cdf_time))
unixtime = []
utc = datetime.timezone(datetime.timedelta())
for t in time_list:
date: List[int] = [0] * 7
for i in range(0, len(t)):
if i > 7:
continue
elif i == 6:
date[i] = 1000 * t[i]
elif i == 7:
date[i - 1] += t[i]
else:
date[i] = t[i]
unixtime.append(
datetime.datetime(date[0], date[1], date[2], date[3], date[4], date[5], date[6], tzinfo=utc).timestamp()
)
return _squeeze_or_scalar_real(unixtime)
@staticmethod
def timestamp_to_cdfepoch(unixtime_data: npt.ArrayLike) -> np.ndarray:
"""
Converts a unix timestamp to CDF_EPOCH, the number of milliseconds since the year 0.
"""
# Make sure the object is iterable. Sometimes numpy arrays claim to be iterable when they aren't.
times = np.atleast_1d(unixtime_data)
cdf_time_data = []
for ud in times:
if not np.isnan(ud):
dt = np.datetime64(int(ud * 1000), "ms")
dt_item: datetime.datetime = dt.item()
dt_to_convert = [
dt_item.year,
dt_item.month,
dt_item.day,
dt_item.hour,
dt_item.minute,
dt_item.second,
int(dt_item.microsecond / 1000),
]
converted_data = CDFepoch.compute(dt_to_convert)
else:
converted_data = np.nan
cdf_time_data.append(converted_data)
return np.array(cdf_time_data)
@staticmethod
def timestamp_to_cdfepoch16(unixtime_data: npt.ArrayLike) -> np.ndarray:
"""
Converts a unix timestamp to CDF_EPOCH16
"""
# Make sure the object is iterable. Sometimes numpy arrays claim to be iterable when they aren't.
times = np.atleast_1d(unixtime_data)
cdf_time_data = []
for ud in times:
if not np.isnan(ud):
dt = np.datetime64(int(ud * 1000000), "us")
dt_item: datetime.datetime = dt.item()
dt_to_convert = [
dt_item.year,
dt_item.month,
dt_item.day,
dt_item.hour,
dt_item.minute,
dt_item.second,
int(dt_item.microsecond / 1000),
int(dt_item.microsecond % 1000),
0,
0,
]
converted_data = CDFepoch.compute(dt_to_convert)
else:
converted_data = np.nan
cdf_time_data.append(converted_data)
return np.array(cdf_time_data)
@staticmethod
def timestamp_to_tt2000(unixtime_data: npt.ArrayLike) -> np.ndarray:
"""
Converts a unix timestamp to CDF_TIME_TT2000
"""
# Make sure the object is iterable. Sometimes numpy arrays claim to be iterable when they aren't.
times = np.atleast_1d(unixtime_data)
cdf_time_data = []
for ud in times:
if not np.isnan(ud):
dt = np.datetime64(int(ud * 1000000), "us")
dt_item: datetime.datetime = dt.item()
dt_to_convert = [
dt_item.year,
dt_item.month,
dt_item.day,
dt_item.hour,
dt_item.minute,
dt_item.second,
int(dt_item.microsecond / 1000),
int(dt_item.microsecond % 1000),
0,
]
converted_data = CDFepoch.compute(dt_to_convert)
else:
converted_data = np.nan
cdf_time_data.append(converted_data)
return np.array(cdf_time_data)
@staticmethod
def compute(datetimes: npt.ArrayLike) -> Union[int, float, complex, npt.NDArray]:
"""
Computes the provided date/time components into CDF epoch value(s).
For CDF_EPOCH:
For computing into CDF_EPOCH value, each date/time elements should
have exactly seven (7) components, as year, month, day, hour, minute,
second and millisecond, in a list. For example:
[[2017,1,1,1,1,1,111],[2017,2,2,2,2,2,222]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to seven) components. The last component, if
not the 7th, can be a float that can have a fraction of the unit.
For CDF_EPOCH16:
They should have exactly ten (10) components, as year,
month, day, hour, minute, second, millisecond, microsecond, nanosecond
and picosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789,999],[2017,2,2,2,2,2,987,654,321,999]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to ten) components. The last component, if
not the 10th, can be a float that can have a fraction of the unit.
For TT2000:
Each TT2000 typed date/time should have exactly nine (9) components, as
year, month, day, hour, minute, second, millisecond, microsecond,
and nanosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789],[2017,2,2,2,2,2,987,654,321]]
Or, call function compute_tt2000 directly, instead, with at least three
(3) first (up to nine) components. The last component, if
not the 9th, can be a float that can have a fraction of the unit.
"""
if not isinstance(datetimes, (list, tuple, np.ndarray)):
raise TypeError("datetime must be in list form")
datetimes = np.atleast_2d(datetimes)
items = datetimes.shape[1]
if items == 7:
return _squeeze_or_scalar_real(CDFepoch.compute_epoch(datetimes))
elif items == 10:
return _squeeze_or_scalar_complex(CDFepoch.compute_epoch16(datetimes))
elif items == 9:
return _squeeze_or_scalar_real(CDFepoch.compute_tt2000(datetimes))
else:
raise TypeError("Unknown input")
@staticmethod
def findepochrange(
epochs: epochs_type, starttime: Optional[epoch_types] = None, endtime: Optional[epoch_types] = None
) -> np.ndarray:
"""
Finds the record range within the start and end time from values
of a CDF epoch data type. It returns a list of record numbers.
If the start time is not provided, then it is
assumed to be the minimum possible value. If the end time is not
provided, then the maximum possible value is assumed. The epoch is
assumed to be in the chronological order. The start and end times
should have the proper number of date/time components, corresponding
to the epoch's data type.
The start/end times should be in either be in epoch units, or in the list
format described in "compute_epoch/epoch16/tt2000" section.
"""
epochs = np.array(epochs)
if epochs.dtype == np.int64:
return CDFepoch.epochrange_tt2000(epochs, starttime, endtime)
elif epochs.dtype == np.float64:
return CDFepoch.epochrange_epoch(epochs, starttime, endtime)
elif epochs.dtype == np.complex128:
return CDFepoch.epochrange_epoch16(epochs, starttime, endtime)
else:
raise TypeError("Bad input")
@staticmethod
def encode_tt2000(tt2000: cdf_tt2000_type, iso_8601: bool = True) -> encoded_type:
new_tt2000 = np.atleast_1d(tt2000)
count = len(new_tt2000)
encodeds = []
for x in range(count):
nanoSecSinceJ2000 = new_tt2000[x]
if nanoSecSinceJ2000 == CDFepoch.FILLED_TT2000_VALUE:
if iso_8601:
return "9999-12-31T23:59:59.999999999"
else:
return "31-Dec-9999 23:59:59.999.999.999"
if nanoSecSinceJ2000 == CDFepoch.DEFAULT_TT2000_PADVALUE:
if iso_8601:
return "0000-01-01T00:00:00.000000000"
else:
return "01-Jan-0000 00:00:00.000.000.000"
datetime = CDFepoch.breakdown_tt2000(nanoSecSinceJ2000)
ly = datetime[0]
lm = datetime[1]
ld = datetime[2]
lh = datetime[3]
ln = datetime[4]
ls = datetime[5]
ll = datetime[6]
lu = datetime[7]
la = datetime[8]
if iso_8601:
# yyyy-mm-ddThh:mm:ss.mmmuuunnn
encoded = str(ly).zfill(4)
encoded += "-"
encoded += str(lm).zfill(2)
encoded += "-"
encoded += str(ld).zfill(2)
encoded += "T"
encoded += str(lh).zfill(2)
encoded += ":"
encoded += str(ln).zfill(2)
encoded += ":"
encoded += str(ls).zfill(2)
encoded += "."
encoded += str(ll).zfill(3)
encoded += str(lu).zfill(3)
encoded += str(la).zfill(3)
else:
# dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn
encoded = str(ld).zfill(2)
encoded += "-"
encoded += CDFepoch.month_Token[lm - 1]
encoded += "-"
encoded += str(ly).zfill(4)
encoded += " "
encoded += str(lh).zfill(2)
encoded += ":"
encoded += str(ln).zfill(2)
encoded += ":"
encoded += str(ls).zfill(2)
encoded += "."
encoded += str(ll).zfill(3)
encoded += "."
encoded += str(lu).zfill(3)
encoded += "."
encoded += str(la).zfill(3)
if count == 1:
return encoded
else:
encodeds.append(encoded)
return encodeds
@staticmethod
def breakdown_tt2000(tt2000: cdf_tt2000_type) -> np.ndarray:
"""
Breaks down the epoch(s) into UTC components.
Calculate date and time from cdf_time_tt2000 integers
Parameters
----------
epochs : array-like
Single, list, tuple, or np.array of tt2000 values
Returns
-------
components : ndarray
List or array of date and time values. The last axis contains
(in order): year, month, day, hour, minute, second, millisecond,
microsecond, and nanosecond
Notes
-----
If a bad epoch is supplied, a fill date of 9999-12-31 23:59:59 and 999 ms, 999 us, and
999 ns is returned.
"""
new_tt2000 = np.atleast_1d(tt2000).astype(np.int64)
count = len(new_tt2000)
toutcs = np.zeros((9, count), dtype=int)
datxs = CDFepoch._LeapSecondsfromJ2000(new_tt2000)
# Do some computations on arrays to speed things up
post2000 = new_tt2000 > 0
nanoSecsSinceJ2000 = new_tt2000.copy()
nanoSecsSinceJ2000[~post2000] += CDFepoch.T12hinNanoSecs
nanoSecsSinceJ2000[~post2000] -= CDFepoch.dTinNanoSecs
secsSinceJ2000 = (nanoSecsSinceJ2000 / CDFepoch.SECinNanoSecsD).astype(np.int64)
nansecs = (nanoSecsSinceJ2000 - (secsSinceJ2000 * CDFepoch.SECinNanoSecs)).astype(np.int64) # type: ignore
posNanoSecs = new_tt2000 > 0
secsSinceJ2000[posNanoSecs] -= 32
secsSinceJ2000[posNanoSecs] += 43200
nansecs[posNanoSecs] -= 184000000
negNanoSecs = nansecs < 0
nansecs[negNanoSecs] += CDFepoch.SECinNanoSecs
secsSinceJ2000[negNanoSecs] -= 1
t2s = secsSinceJ2000 * CDFepoch.SECinNanoSecs + nansecs
post72: np.ndarray = datxs[:, 0] > 0
secsSinceJ2000[post72] -= datxs[post72, 0].astype(int)
epochs = CDFepoch.J2000Since0AD12hSec + secsSinceJ2000
datxzero = datxs[:, 1] == 0.0
epochs[post72 & ~datxzero] -= 1
xdates = CDFepoch._EPOCHbreakdownTT2000(epochs)
# If 1 second was subtracted, add 1 second back in
# Be careful not to go 60 or above
xdates[5, post72 & ~datxzero] += 1
xdates[4, post72 & ~datxzero] += np.floor(xdates[5, post72 & ~datxzero] / 60.0)
xdates[5, post72 & ~datxzero] = xdates[5, post72 & ~datxzero] % 60
# Set toutcs, then loop through and correct for pre-1972
toutcs[:6, :] = xdates[:6, :]
for x in np.nonzero(~post72)[0]:
if datxs[x, 0] <= 0.0:
# pre-1972...
epoch = epochs[x]
t2 = t2s[x]
t3 = new_tt2000[x]
nansec = nansecs[x]
xdate = np.zeros(9)
xdate[:6] = xdates[:, x]
xdate[8] = nansec
tmpNanosecs = CDFepoch.compute_tt2000(xdate)
if tmpNanosecs != t3:
dat0 = CDFepoch._LeapSecondsfromYMD(xdate[0], xdate[1], xdate[2])
tmpx = t2 - int(dat0 * CDFepoch.SECinNanoSecs)
tmpy = int(float(tmpx / CDFepoch.SECinNanoSecsD))
nansec = int(tmpx - tmpy * CDFepoch.SECinNanoSecs)
if nansec < 0:
nansec = CDFepoch.SECinNanoSecs + nansec
tmpy = tmpy - 1
epoch = tmpy + CDFepoch.J2000Since0AD12hSec
xdate = np.zeros(9)
xdate[:6] = CDFepoch._EPOCHbreakdownTT2000(epoch)[:, 0]
xdate[8] = nansec
tmpNanosecs = CDFepoch.compute_tt2000(xdate)
if tmpNanosecs != t3:
dat0 = CDFepoch._LeapSecondsfromYMD(xdate[0], xdate[1], xdate[2])
tmpx = t2 - int(dat0 * CDFepoch.SECinNanoSecs)
tmpy = int((1.0 * tmpx) / CDFepoch.SECinNanoSecsD)
nansec = int(tmpx - tmpy * CDFepoch.SECinNanoSecs)
if nansec < 0:
nansec = CDFepoch.SECinNanoSecs + nansec
tmpy = tmpy - 1
epoch = tmpy + CDFepoch.J2000Since0AD12hSec
xdate = np.zeros(9)
xdate[:6] = CDFepoch._EPOCHbreakdownTT2000(epoch)[:, 0]
xdate[8] = nansec
tmpNanosecs = CDFepoch.compute_tt2000(xdate)
if tmpNanosecs != t3:
dat0 = CDFepoch._LeapSecondsfromYMD(xdate[0], xdate[1], xdate[2])
tmpx = t2 - int(dat0 * CDFepoch.SECinNanoSecs)
tmpy = int((1.0 * tmpx) / CDFepoch.SECinNanoSecsD)
nansec = int(tmpx - tmpy * CDFepoch.SECinNanoSecs)
if nansec < 0:
nansec = CDFepoch.SECinNanoSecs + nansec
tmpy = tmpy - 1
epoch = tmpy + CDFepoch.J2000Since0AD12hSec
# One more determination
xdate = CDFepoch._EPOCHbreakdownTT2000(epoch).ravel()
nansecs[x] = nansec
toutcs[:6, x] = xdate[:6]
# Finished pre-1972 correction
ml1 = nansecs // 1000000
tmp1 = nansecs - (1000000 * ml1)
overflow = ml1 > 1000
ml1[overflow] -= 1000
toutcs[6, :] = ml1
toutcs[5, overflow] += 1
ma1 = tmp1 // 1000
na1 = tmp1 - 1000 * ma1
toutcs[7, :] = ma1
toutcs[8, :] = na1
# Check standard fill and pad values
cdf_epoch_time_tt2000 = np.atleast_2d(toutcs.T)
fillval_locations = np.all(cdf_epoch_time_tt2000 == [1707, 9, 22, 12, 12, 10, 961, 224, 192], axis=1)
cdf_epoch_time_tt2000[fillval_locations] = [9999, 12, 31, 23, 59, 59, 999, 999, 999]
padval_locations = np.all(cdf_epoch_time_tt2000 == [1707, 9, 22, 12, 12, 10, 961, 224, 193], axis=1)
cdf_epoch_time_tt2000[padval_locations] = [0, 1, 1, 0, 0, 0, 0, 0, 0]
return np.squeeze(cdf_epoch_time_tt2000)
@staticmethod
def compute_tt2000(datetimes: npt.ArrayLike) -> Union[int, npt.NDArray[np.int64]]:
if not isinstance(datetimes, (list, tuple, np.ndarray)):
raise TypeError("datetime must be in list form")
new_datetimes = np.atleast_2d(datetimes)
count = len(new_datetimes)
nanoSecSinceJ2000s = []
for x in range(count):
datetime = new_datetimes[x]
year = int(datetime[0])
month = int(datetime[1])
items = len(datetime)
if items > 8:
# y m d h m s ms us ns
day = int(datetime[2])
hour = int(datetime[3])
minute = int(datetime[4])
second = int(datetime[5])
msec = int(datetime[6])
usec = int(datetime[7])
nsec = int(datetime[8])
elif items > 7:
# y m d h m s ms us
day = int(datetime[2])
hour = int(datetime[3])
minute = int(datetime[4])
second = int(datetime[5])
msec = int(datetime[6])
usec = int(datetime[7])
nsec = int(1000.0 * (datetime[7] - usec))
elif items > 6:
# y m d h m s ms
day = int(datetime[2])
hour = int(datetime[3])
minute = int(datetime[4])
second = int(datetime[5])
msec = int(datetime[6])
xxx = float(1000.0 * (datetime[6] - msec))
usec = int(xxx)
nsec = int(1000.0 * (xxx - usec))
elif items > 5:
# y m d h m s
day = int(datetime[2])
hour = int(datetime[3])
minute = int(datetime[4])
second = int(datetime[5])
xxx = float(1000.0 * (datetime[5] - second))
msec = int(xxx)
xxx = float(1000.0 * (xxx - msec))
usec = int(xxx)
nsec = int(1000.0 * (xxx - usec))
elif items > 4:
# y m d h m
day = int(datetime[2])
hour = int(datetime[3])
minute = int(datetime[4])
xxx = float(60.0 * (datetime[4] - minute))
second = int(xxx)
xxx = float(1000.0 * (xxx - second))
msec = int(xxx)
xxx = float(1000.0 * (xxx - msec))
usec = int(xxx)
nsec = int(1000.0 * (xxx - usec))
elif items > 3:
# y m d h
day = int(datetime[2])
hour = int(datetime[3])
xxx = float(60.0 * (datetime[3] - hour))
minute = int(xxx)
xxx = float(60.0 * (xxx - minute))
second = int(xxx)
xxx = float(1000.0 * (xxx - second))
msec = int(xxx)
xxx = float(1000.0 * (xxx - msec))
usec = int(xxx)
nsec = int(1000.0 * (xxx - usec))
elif items > 2:
# y m d
day = int(datetime[2])
xxx = float(24.0 * (datetime[2] - day))
hour = int(xxx)
xxx = float(60.0 * (xxx - hour))
minute = int(xxx)
xxx = float(60.0 * (xxx - minute))
second = int(xxx)
xxx = float(1000.0 * (xxx - second))
msec = int(xxx)
xxx = float(1000.0 * (xxx - msec))
usec = int(xxx)
nsec = int(1000.0 * (xxx - usec))
else:
raise ValueError("Invalid tt2000 components")
if month == 0:
month = 1
if (
year == 9999
and month == 12
and day == 31
and hour == 23
and minute == 59
and second == 59
and msec == 999
and usec == 999
and nsec == 999
):
nanoSecSinceJ2000 = CDFepoch.FILLED_TT2000_VALUE
elif (
year == 0
and month == 1
and day == 1
and hour == 0
and minute == 0
and second == 0
and msec == 0
and usec == 0
and nsec == 0
):
nanoSecSinceJ2000 = CDFepoch.DEFAULT_TT2000_PADVALUE
else:
iy = 10000000 * month + 10000 * day + year
if iy != CDFepoch.currentDay:
CDFepoch.currentDay = iy
CDFepoch.currentLeapSeconds = CDFepoch._LeapSecondsfromYMD(year, month, day)
CDFepoch.currentJDay = CDFepoch._JulianDay(year, month, day)
jd = CDFepoch.currentJDay
jd = jd - CDFepoch.JulianDateJ2000_12h
subDayinNanoSecs = int(
hour * CDFepoch.HOURinNanoSecs
+ minute * CDFepoch.MINUTEinNanoSecs
+ second * CDFepoch.SECinNanoSecs
+ msec * 1000000
+ usec * 1000
+ nsec
)
nanoSecSinceJ2000 = int(jd * CDFepoch.DAYinNanoSecs + subDayinNanoSecs)
t2 = int(CDFepoch.currentLeapSeconds * CDFepoch.SECinNanoSecs)
if nanoSecSinceJ2000 < 0:
nanoSecSinceJ2000 = int(nanoSecSinceJ2000 + t2)
nanoSecSinceJ2000 = int(nanoSecSinceJ2000 + CDFepoch.dTinNanoSecs)
nanoSecSinceJ2000 = int(nanoSecSinceJ2000 - CDFepoch.T12hinNanoSecs)
else:
nanoSecSinceJ2000 = int(nanoSecSinceJ2000 - CDFepoch.T12hinNanoSecs)
nanoSecSinceJ2000 = int(nanoSecSinceJ2000 + t2)
nanoSecSinceJ2000 = int(nanoSecSinceJ2000 + CDFepoch.dTinNanoSecs)
nanoSecSinceJ2000s.append(int(nanoSecSinceJ2000))
return np.squeeze(nanoSecSinceJ2000s)
@staticmethod
def _LeapSecondsfromYMD(year: int, month: int, day: int) -> float:
j = -1
m = 12 * year + month
for i, _ in reversed(list(enumerate(CDFepoch.LTS))):
n = 12 * CDFepoch.LTS[i][0] + CDFepoch.LTS[i][1]
if m >= n:
j = i
break
if j == -1:
return 0
da = CDFepoch.LTS[j][3]
# pre-1972
if j < CDFepoch.NERA1:
jda = CDFepoch._JulianDay(year, month, day)
da = da + ((jda - CDFepoch.MJDbase) - CDFepoch.LTS[j][4]) * CDFepoch.LTS[j][5]
return da
@staticmethod
def _LeapSecondsfromJ2000(nanosecs: npt.ArrayLike) -> npt.NDArray:
nanosecs = np.atleast_1d(nanosecs)
da = np.zeros((nanosecs.size, 2))
j = -1 * np.ones(nanosecs.size, dtype=int)
if CDFepoch.NST is None:
CDFepoch._LoadLeapNanoSecondsTable()
for i, _ in reversed(list(enumerate(CDFepoch.NST))):
idxs = (j == -1) & (nanosecs >= CDFepoch.NST[i])
j[idxs] = i
if i < (CDFepoch.NDAT - 1):
overflow = nanosecs + 1000000000 >= CDFepoch.NST[i + 1]
da[overflow, 1] = 1.0
if np.all(j > 0):
break
LTS = np.array(CDFepoch.LTS)
da[:, 0] = LTS[j, 3]
da[j <= CDFepoch.NERA1, 0] = 0
return da
@staticmethod
def _LoadLeapNanoSecondsTable() -> None:
CDFepoch.NST = []
for ix in range(0, CDFepoch.NERA1):
CDFepoch.NST.append(CDFepoch.FILLED_TT2000_VALUE)
for ix in range(CDFepoch.NERA1, CDFepoch.NDAT):
CDFepoch.NST.append(
int(
CDFepoch.compute_tt2000(
[int(CDFepoch.LTS[ix][0]), int(CDFepoch.LTS[ix][1]), int(CDFepoch.LTS[ix][2]), 0, 0, 0, 0, 0, 0]
)
)
)
@staticmethod
def _EPOCHbreakdownTT2000(epoch: npt.ArrayLike) -> npt.NDArray:
epoch = np.atleast_1d(epoch)
minute_AD, second_AD = np.divmod(epoch, 60)
hour_AD, minute_AD = np.divmod(minute_AD, 60)
day_AD, hour_AD = np.divmod(hour_AD, 24)
# minute_AD = second_AD / 60.0
# hour_AD = minute_AD / 60.0
# day_AD = hour_AD / 24.0
l = 1721060 + 68569 + day_AD
n = (4 * l / 146097).astype(int)
l = l - ((146097 * n + 3) / 4).astype(int)
i = (4000 * (l + 1) / 1461001).astype(int)
l = l - (1461 * i / 4).astype(int) + 31
j = (80 * l / 2447).astype(int)
k = l - (2447 * j / 80).astype(int)
l = (j / 11).astype(int)
j = j + 2 - 12 * l
i = 100 * (n - 49) + i + l
date = np.array([i, j, k, hour_AD, minute_AD, second_AD])
return date
@staticmethod
def epochrange_tt2000(
epochs: cdf_tt2000_type, starttime: Optional[epoch_types] = None, endtime: Optional[epoch_types] = None
) -> npt.NDArray:
if isinstance(epochs, int) or isinstance(epochs, np.int64):
pass
elif isinstance(epochs, list) or isinstance(epochs, tuple) or isinstance(epochs, np.ndarray):
if isinstance(epochs[0], int) or isinstance(epochs[0], np.int64):
pass
else:
raise ValueError("Bad data")
else:
raise ValueError("Bad data")
stime: Union[int, np.int64]
if starttime is None:
stime = int(-9223372036854775807)
else:
if isinstance(starttime, int) or isinstance(starttime, np.int64):
stime = starttime
elif isinstance(starttime, list):
stime = int(CDFepoch.compute_tt2000(starttime))
else:
raise ValueError("Bad start time")
if endtime is not None:
if isinstance(endtime, int) or isinstance(endtime, np.int64):
etime = endtime
elif isinstance(endtime, list) or isinstance(endtime, tuple):
etime = int(CDFepoch.compute_tt2000(endtime))
else:
raise ValueError("Bad end time")
else:
etime = int(9223372036854775807)
if stime > etime:
raise ValueError("Invalid start/end time")
new_epochs = np.array(epochs)
return np.where(np.logical_and(new_epochs >= stime, new_epochs <= etime))[0]
@staticmethod
def encode_epoch16(epochs: cdf_epoch16_type, iso_8601: bool = True) -> encoded_type:
new_epochs = np.atleast_1d(epochs)
count = len(new_epochs)
encodeds = []
for x in range(count):
# complex
if (new_epochs[x].real == -1.0e31) and (new_epochs[x].imag == -1.0e31):
if iso_8601:
encoded = "9999-12-31T23:59:59.999999999999"
else:
encoded = "31-Dec-9999 23:59:59.999.999.999.999"
else:
encoded = CDFepoch._encodex_epoch16(new_epochs[x], iso_8601)
if count == 1:
return encoded
else:
encodeds.append(encoded)
return encodeds
@staticmethod
def _encodex_epoch16(epoch16: cdf_epoch16_type, iso_8601: bool = True) -> str:
components = CDFepoch.breakdown_epoch16(epoch16)
if iso_8601:
# year-mm-ddThh:mm:ss.mmmuuunnnppp
encoded = str(components[0]).zfill(4)
encoded += "-"
encoded += str(components[1]).zfill(2)
encoded += "-"
encoded += str(components[2]).zfill(2)
encoded += "T"
encoded += str(components[3]).zfill(2)
encoded += ":"
encoded += str(components[4]).zfill(2)
encoded += ":"
encoded += str(components[5]).zfill(2)
encoded += "."
encoded += str(components[6]).zfill(3)
encoded += str(components[7]).zfill(3)
encoded += str(components[8]).zfill(3)
encoded += str(components[9]).zfill(3)
else:
# dd-mmm-year hh:mm:ss.mmm.uuu.nnn.ppp
encoded = str(components[2]).zfill(2)
encoded += "-"
encoded += CDFepoch.month_Token[components[1] - 1]
encoded += "-"
encoded += str(components[0]).zfill(4)
encoded += " "
encoded += str(components[3]).zfill(2)
encoded += ":"
encoded += str(components[4]).zfill(2)
encoded += ":"
encoded += str(components[5]).zfill(2)
encoded += "."
encoded += str(components[6]).zfill(3)
encoded += "."
encoded += str(components[7]).zfill(3)
encoded += "."
encoded += str(components[8]).zfill(3)
encoded += "."
encoded += str(components[9]).zfill(3)
return encoded
@staticmethod
def _JulianDay(y: int, m: int, d: int) -> int:
a1 = int(7 * (int(y + int((m + 9) / 12))) / 4)
a2 = int(3 * (int(int(y + int((m - 9) / 7)) / 100) + 1) / 4)
a3 = int(275 * m / 9)
return 367 * y - a1 - a2 + a3 + d + 1721029
@staticmethod
def compute_epoch16(datetimes: npt.ArrayLike) -> Union[complex, npt.NDArray[np.complex128]]:
new_dates = np.atleast_2d(datetimes)
count = len(new_dates)
epochs = []
for x in range(count):
epoch = []
date = new_dates[x]
items = len(date)
year = date[0]
month = date[1]
xxx: Union[float, int] = 0
if items > 9:
# y m d h m s ms us ns ps
day = int(date[2])
hour = int(date[3])
minute = int(date[4])
second = int(date[5])
msec = int(date[6])
usec = int(date[7])
nsec = int(date[8])
psec = int(date[9])
elif items > 8:
# y m d h m s ms us ns
day = int(date[2])
hour = int(date[3])
minute = int(date[4])
second = int(date[5])
msec = int(date[6])
usec = int(date[7])
nsec = int(date[8])
psec = int(1000.0 * (date[8] - nsec))
elif items > 7:
# y m d h m s ms us
day = int(date[2])
hour = int(date[3])
minute = int(date[4])
second = int(date[5])
msec = int(date[6])
usec = int(date[7])
xxx = int(1000.0 * (date[7] - usec))
nsec = int(xxx)
psec = int(1000.0 * (xxx - nsec))
elif items > 6:
# y m d h m s ms
day = int(date[2])
hour = int(date[3])
minute = int(date[4])
second = int(date[5])
msec = int(date[6])
xxx = float(1000.0 * (date[6] - msec))
usec = int(xxx)
xxx = int(1000.0 * (xxx - usec))
nsec = int(xxx)
psec = int(1000.0 * (xxx - nsec))
elif items > 5:
# y m d h m s
day = int(date[2])
hour = int(date[3])
minute = int(date[4])
second = int(date[5])
xxx = float(1000.0 * (date[5] - second))
msec = int(xxx)
xxx = float(1000.0 * (xxx - msec))
usec = int(xxx)
xxx = int(1000.0 * (xxx - usec))
nsec = int(xxx)
psec = int(1000.0 * (xxx - nsec))
elif items > 4:
# y m d h m
day = int(date[2])
hour = int(date[3])
minute = int(date[4])
xxx = float(60.0 * (date[4] - minute))
second = int(xxx)
xxx = float(1000.0 * (xxx - second))
msec = int(xxx)
xxx = float(1000.0 * (xxx - msec))
usec = int(xxx)
xxx = int(1000.0 * (xxx - usec))
nsec = int(xxx)
psec = int(1000.0 * (xxx - nsec))
elif items > 3:
# y m d h
day = int(date[2])
hour = int(date[3])
xxx = float(60.0 * (date[3] - hour))
minute = int(xxx)
xxx = float(60.0 * (xxx - minute))
second = int(xxx)
xxx = float(1000.0 * (xxx - second))
msec = int(xxx)
xxx = float(1000.0 * (xxx - msec))
usec = int(xxx)
xxx = int(1000.0 * (xxx - usec))
nsec = int(xxx)
psec = int(1000.0 * (xxx - nsec))
elif items > 2:
# y m d
day = int(date[2])
xxx = float(24.0 * (date[2] - day))
hour = int(xxx)
xxx = float(60.0 * (xxx - hour))
minute = int(xxx)
xxx = float(60.0 * (xxx - minute))
second = int(xxx)
xxx = float(1000.0 * (xxx - second))
msec = int(xxx)
xxx = float(1000.0 * (xxx - msec))
usec = int(xxx)
xxx = int(1000.0 * (xxx - usec))
nsec = int(xxx)
psec = int(1000.0 * (xxx - nsec))
else:
raise ValueError("Invalid epoch16 components")
if year < 0:
raise ValueError("Illegal epoch field")
if (
year == 9999
and month == 12
and day == 31
and hour == 23
and minute == 59
and second == 59
and msec == 999
and usec == 999
and nsec == 999
and psec == 999
):
epoch.append(-1.0e31)
epoch.append(-1.0e31)
elif (
(year > 9999)
or (month < 0 or month > 12)
or (hour < 0 or hour > 23)
or (minute < 0 or minute > 59)
or (second < 0 or second > 59)
or (msec < 0 or msec > 999)
or (usec < 0 or usec > 999)
or (nsec < 0 or nsec > 999)
or (psec < 0 or psec > 999)
):
epoch = CDFepoch._computeEpoch16(year, month, day, hour, minute, second, msec, usec, nsec, psec)
else:
if month == 0:
if day < 1 or day > 366:
epoch = CDFepoch._computeEpoch16(year, month, day, hour, minute, second, msec, usec, nsec, psec)
else:
if day < 1 or day > 31:
epoch = CDFepoch._computeEpoch16(year, month, day, hour, minute, second, msec, usec, nsec, psec)
if month == 0:
daysSince0AD = CDFepoch._JulianDay(year, 1, 1) + (day - 1) - 1721060
else:
daysSince0AD = CDFepoch._JulianDay(year, month, day) - 1721060
secInDay = (3600 * hour) + (60 * minute) + second
epoch16_0 = float(86400.0 * daysSince0AD) + float(secInDay)
epoch16_1 = float(psec) + float(1000.0 * nsec) + float(1000000.0 * usec) + float(1000000000.0 * msec)
epoch.append(epoch16_0)
epoch.append(epoch16_1)
cepoch = complex(epoch[0], epoch[1])
epochs.append(cepoch)
return _squeeze_or_scalar_complex(epochs)
@staticmethod
def _calc_from_julian(epoch0: npt.ArrayLike, epoch1: npt.ArrayLike) -> npt.NDArray:
"""Calculate the date and time from epoch input
Parameters
----------
epoch0 : int, float, array-like
First element of an epoch array (epoch time in seconds)
epoch1 : float, array-like
Second element of an epoch array (epoch time in picoseconds)
Returns
-------
out : array-like
Array of 10 integers (year, month, day, hour, minute, second,
millisecond, microsecond, nanosecond, picosecond) if a single value
is input. For array input, the shape is altered by adding another
axis of length 10 (holding the same values).
"""
# Cast input as an array for consistent handling of scalars and lists
second_ce = np.asarray(epoch0)
# Determine epoch minutes, hours, and days
minute_ce = second_ce / 60.0
hour_ce = minute_ce / 60.0
day_ce = hour_ce / 24.0
# Calculate the juian day, using integer rounding
jd = (1721060 + day_ce).astype(int)
l = jd + 68569
n = (4 * l / 146097).astype(int)
l = l - ((146097 * n + 3) / 4).astype(int)
i = (4000 * (l + 1) / 1461001).astype(int)
l += 31 - (1461 * i / 4).astype(int)
j = (80 * l / 2447).astype(int)
dy = l - (2447 * j / 80).astype(int)
# Continue to get month and year
l = (j / 11).astype(int)
mo = j + 2 - 12 * l
yr = 100 * (n - 49) + i + l
# Finish calculating the epoch hours, minutes, and seconds
hr = (hour_ce % 24.0).astype(int)
mn = (minute_ce % 60.0).astype(int)
sc = (second_ce % 60.0).astype(int)
# Get the fractional seconds
msec = np.asarray(epoch1)
ps = (msec % 1000.0).astype(int)
msec = msec / 1000.0
ns = (msec % 1000.0).astype(int)
msec = msec / 1000.0
mus = (msec % 1000.0).astype(int)
msec = msec / 1000.0
ms = msec.astype(int)
# Recast the output as integers or lists
if second_ce.shape == ():
out = np.array([int(yr), int(mo), int(dy), int(hr), int(mn), int(sc), int(ms), int(mus), int(ns), int(ps)])
else:
out = np.array([yr, mo, dy, hr, mn, sc, ms, mus, ns, ps]).transpose()
return out
@staticmethod
def breakdown_epoch16(epochs: cdf_epoch16_type) -> npt.NDArray:
"""
Calculate date and time from epochs
Parameters
----------
epochs : array-like
Single, list, tuple, or np.array of epoch values
Returns
-------
components : ndarray
List or array of date and time values. The last axis contains
(in order): year, month, day, hour, minute, second, millisecond,
microsecond, nanosecond, and picosecond
Notes
-----
If a bad epoch (-1.0e31 for the real and imaginary components) is
supplied, a fill date of 9999-12-31 23:59:59 and 999 ms, 999 us,
999 ns, and 999 ps is returned
"""
if isinstance(epochs, (complex, np.complex128)) or isinstance(epochs, (list, tuple, np.ndarray)):
new_epochs = np.asarray(epochs)
if new_epochs.shape == ():
cshape = []
new_epochs = np.array([epochs])
else:
cshape = list(new_epochs.shape)
else:
raise TypeError("Bad data for epochs: {:}".format(type(epochs)))
cshape.append(10)
components = np.full(shape=cshape, fill_value=[9999, 12, 31, 23, 59, 59, 999, 999, 999, 999])
for i, epoch16 in enumerate(new_epochs):
# Ignore fill values
if (epoch16.real != -1.0e31) or (epoch16.imag != -1.0e31) or np.isnan(epoch16):
if (epoch16.imag == -1.0e30) or (epoch16.imag == -1.0e30):
components[i] = [0, 1, 1, 0, 0, 0, 0, 0, 0, 0]
continue
esec = -epoch16.real if epoch16.real < 0.0 else epoch16.real
efra = -epoch16.imag if epoch16.imag < 0.0 else epoch16.imag
if len(components.shape) == 1:
components = CDFepoch._calc_from_julian(esec, efra)
else:
components[i] = CDFepoch._calc_from_julian(esec, efra)
return components
@staticmethod
def _computeEpoch16(y: int, m: int, d: int, h: int, mn: int, s: int, ms: int, msu: int, msn: int, msp: int) -> List[float]:
if m == 0:
daysSince0AD = CDFepoch._JulianDay(y, 1, 1) + (d - 1) - 1721060
else:
if m < 0:
y = y - 1
m = 13 + m
daysSince0AD = CDFepoch._JulianDay(y, m, d) - 1721060
if daysSince0AD < 0:
raise ValueError("Illegal epoch")
epoch = []
epoch.append(float(86400.0 * daysSince0AD + 3600.0 * h + 60.0 * mn) + float(s))
epoch.append(float(msp) + float(1000.0 * msn) + float(1000000.0 * msu) + math.pow(10.0, 9) * ms)
if epoch[1] < 0.0 or epoch[1] >= math.pow(10.0, 12):
if epoch[1] < 0.0:
sec = int(epoch[1] / math.pow(10.0, 12))
tmp = epoch[1] - sec * math.pow(10.0, 12)
if tmp != 0.0 and tmp != -0.0:
epoch[0] = epoch[0] + sec - 1
epoch[1] = math.pow(10.0, 12.0) + tmp
else:
epoch[0] = epoch[0] + sec
epoch[1] = 0.0
else:
sec = int(epoch[1] / math.pow(10.0, 12))
tmp = epoch[1] - sec * math.pow(10.0, 12)
if tmp != 0.0 and tmp != -0.0:
epoch[1] = tmp
epoch[0] = epoch[0] + sec
else:
epoch[1] = 0.0
epoch[0] = epoch[0] + sec
if epoch[0] < 0.0:
raise ValueError("Illegal epoch")
else:
return epoch
@staticmethod
def epochrange_epoch16(
epochs: cdf_epoch16_type, starttime: Optional[epoch_types] = None, endtime: Optional[epoch_types] = None
) -> Optional[np.ndarray]:
new_epochs = np.atleast_1d(epochs)
stime: Tuple[Union[float, np.float64], Union[float, np.float64]]
etime: Tuple[Union[float, np.float64], Union[float, np.float64]]
if starttime is None:
stime = (-1.0e31, -1.0e31)
else:
if isinstance(starttime, complex) or isinstance(starttime, np.complex128):
stime = (starttime.real, starttime.imag)
elif isinstance(starttime, list):
sstime = complex(CDFepoch.compute_epoch16(starttime))
stime = (sstime.real, sstime.imag)
else:
raise ValueError("Bad start time")
if endtime is not None:
if isinstance(endtime, complex) or isinstance(endtime, np.complex128):
etime = (endtime.real, endtime.imag)
elif isinstance(endtime, list):
eetime = complex(CDFepoch.compute_epoch16(endtime))
etime = (eetime.real, eetime.imag)
else:
raise ValueError("Bad start time")
else:
etime = (1.0e31, 1.0e31)
if stime[0] > etime[0] or (stime[0] == etime[0] and stime[1] > etime[1]):
raise ValueError("Invalid start/end time")
count = len(new_epochs)
epoch16 = []
for x in range(0, count):
epoch16.append(new_epochs[x].real)
epoch16.append(new_epochs[x].imag)
count = count * 2
indx = []
if epoch16[0] > etime[0] or (epoch16[0] == etime[0] and epoch16[1] > etime[1]):
return None
if epoch16[count - 2] < stime[0] or (epoch16[count - 2] == stime[0] and epoch16[count - 1] < stime[1]):
return None
for x in range(0, count, 2):
if epoch16[x] < stime[0]:
continue
elif epoch16[x] == stime[0]:
if epoch16[x + 1] < stime[1]:
continue
else:
indx.append(int(x / 2))
break
else:
indx.append(int(x / 2))
break
if len(indx) == 0:
indx.append(0)
hasadded = False
for x in range(0, count, 2):
if epoch16[x] < etime[0]:
continue
elif epoch16[x] == etime[0]:
if epoch16[x + 1] > etime[1]:
indx.append(int((x - 1) / 2))
hasadded = True
break
else:
indx.append(int((x - 1) / 2))
hasadded = True
break
if not hasadded:
indx.append(int(count / 2) - 1)
return np.arange(indx[0], indx[1] + 1, step=1)
@staticmethod
def encode_epoch(epochs: cdf_epoch_type, iso_8601: bool = True) -> encoded_type:
new_epochs = np.atleast_1d(epochs)
count = len(new_epochs)
encodeds = []
for x in range(0, count):
epoch = new_epochs[x]
if epoch == -1.0e31:
if iso_8601:
encoded = "9999-12-31T23:59:59.999"
else:
encoded = "31-Dec-9999 23:59:59.999"
else:
encoded = CDFepoch._encodex_epoch(epoch, iso_8601)
if count == 1:
return encoded
encodeds.append(encoded)
return encodeds
@staticmethod
def _encodex_epoch(epoch: cdf_epoch_type, iso_8601: bool = True) -> str:
components = CDFepoch.breakdown_epoch(epoch)
if iso_8601:
# year-mm-ddThh:mm:ss.mmm
encoded = str(components[0]).zfill(4)
encoded += "-"
encoded += str(components[1]).zfill(2)
encoded += "-"
encoded += str(components[2]).zfill(2)
encoded += "T"
encoded += str(components[3]).zfill(2)
encoded += ":"
encoded += str(components[4]).zfill(2)
encoded += ":"
encoded += str(components[5]).zfill(2)
encoded += "."
encoded += str(components[6]).zfill(3)
else:
# dd-mmm-year hh:mm:ss.mmm
encoded = str(components[2]).zfill(2)
encoded += "-"
encoded += CDFepoch.month_Token[components[1] - 1]
encoded += "-"
encoded += str(components[0]).zfill(4)
encoded += " "
encoded += str(components[3]).zfill(2)
encoded += ":"
encoded += str(components[4]).zfill(2)
encoded += ":"
encoded += str(components[5]).zfill(2)
encoded += "."
encoded += str(components[6]).zfill(3)
return encoded
@staticmethod
def compute_epoch(dates: npt.ArrayLike) -> Union[float, npt.NDArray]:
# TODO Add docstring. What is the output format?
new_dates = np.atleast_2d(dates)
count = new_dates.shape[0]
epochs = []
for x in range(0, count):
date = new_dates[x]
year = date[0]
month = date[1]
items = len(date)
if items > 6:
# y m d h m s ms
day = int(date[2])
hour = int(date[3])
minute = int(date[4])
second = int(date[5])
msec = int(date[6])
elif items > 5:
# y m d h m s
day = int(date[2])
hour = int(date[3])
minute = int(date[4])
second = int(date[5])
msec = int(1000.0 * (date[5] - second))
elif items > 4:
# y m d h m
day = int(date[2])
hour = int(date[3])
minute = int(date[4])
xxx = float(60.0 * (date[4] - minute))
second = int(xxx)
msec = int(1000.0 * (xxx - second))
elif items > 3:
# y m d h
day = int(date[2])
hour = int(date[3])
xxx = float(60.0 * (date[3] - hour))
minute = int(xxx)
xxx = float(60.0 * (xxx - minute))
second = int(xxx)
msec = int(1000.0 * (xxx - second))
elif items > 2:
# y m d
day = int(date[2])
xxx = float(24.0 * (date[2] - day))
hour = int(xxx)
xxx = float(60.0 * (xxx - hour))
minute = int(xxx)
xxx = float(60.0 * (xxx - minute))
second = int(xxx)
msec = int(1000.0 * (xxx - second))
else:
raise ValueError("Invalid epoch components")
if year == 9999 and month == 12 and day == 31 and hour == 23 and minute == 59 and second == 59 and msec == 999:
epochs.append(-1.0e31)
if year < 0:
raise ValueError("ILLEGAL_EPOCH_FIELD")
if (
(year > 9999)
or (month < 0 or month > 12)
or (hour < 0 or hour > 23)
or (minute < 0 or minute > 59)
or (second < 0 or second > 59)
or (msec < 0 or msec > 999)
):
epochs.append(CDFepoch._computeEpoch(year, month, day, hour, minute, second, msec))
if month == 0:
if day < 1 or day > 366:
epochs.append(CDFepoch._computeEpoch(year, month, day, hour, minute, second, msec))
else:
if day < 1 or day > 31:
epochs.append(CDFepoch._computeEpoch(year, month, day, hour, minute, second, msec))
if hour == 0 and minute == 0 and second == 0:
if msec < 0 or msec > 86399999:
epochs.append(CDFepoch._computeEpoch(year, month, day, hour, minute, second, msec))
if month == 0:
daysSince0AD = CDFepoch._JulianDay(year, 1, 1) + (day - 1) - 1721060
else:
daysSince0AD = CDFepoch._JulianDay(year, month, day) - 1721060
if hour == 0 and minute == 0 and second == 0:
msecInDay = msec
else:
msecInDay = (3600000 * hour) + (60000 * minute) + (1000 * second) + msec
if count == 1:
return np.array(86400000.0 * daysSince0AD + msecInDay)
epochs.append(86400000.0 * daysSince0AD + msecInDay)
return _squeeze_or_scalar_real(epochs)
@staticmethod
def _computeEpoch(y: int, m: int, d: int, h: int, mn: int, s: int, ms: int) -> float:
if m == 0:
daysSince0AD = CDFepoch._JulianDay(y, 1, 1) + (d - 1) - 1721060
else:
if m < 0:
--y
m = 13 + m
daysSince0AD = CDFepoch._JulianDay(y, m, d) - 1721060
if daysSince0AD < 1:
raise ValueError("ILLEGAL_EPOCH_FIELD")
msecInDay = float(3600000.0 * h + 60000.0 * mn + 1000.0 * s) + float(ms)
msecFromEpoch = float(86400000.0 * daysSince0AD + msecInDay)
if msecFromEpoch < 0.0:
return -1.0
else:
return msecFromEpoch
@staticmethod
def breakdown_epoch(epochs: cdf_epoch_type) -> np.ndarray:
"""Calculate date and time from epochs
Parameters
----------
epochs : int, float, or array-like
Single, list, tuple, or np.array of epoch values
Returns
-------
components : list
List or array of date and time values. The last axis contains
(in order): year, month, day, hour, minute, second, and millisecond
Notes
-----
If a bad epoch (-1.0e31) is supplied, a fill date of
9999-12-31 23:59:59 and 999 ms is returned.
"""
# Test input and cast it as an array of floats
if (
isinstance(epochs, float)
or isinstance(epochs, np.float64)
or isinstance(epochs, list)
or isinstance(epochs, tuple)
or isinstance(epochs, np.ndarray)
or isinstance(epochs, int)
):
new_epochs = np.asarray(epochs).astype(float)
if new_epochs.shape == ():
cshape = []
new_epochs = np.array([epochs], dtype=float)
else:
cshape = list(new_epochs.shape)
else:
raise TypeError("Bad data for epochs: {:}".format(type(epochs)))
# Initialize output to default values
cshape.append(7)
components = np.full(shape=cshape, fill_value=[9999, 12, 31, 23, 59, 59, 999])
for i, epoch in enumerate(new_epochs):
# Ignore fill values and NaNs
if (epoch != -1.0e31) and not np.isnan(epoch):
esec = -epoch / 1000.0 if epoch < 0.0 else epoch / 1000.0
date_time = CDFepoch._calc_from_julian(esec, 0.0)
ms = (epoch % 1000.0).astype(int)
date_time[..., 6] = int(ms) if ms.shape == () else ms
if len(components.shape) == 1:
components = date_time[..., :7]
else:
components[i] = date_time[..., :7]
elif epoch == 0:
components[i] = [0, 1, 1, 0, 0, 0, 0]
return np.squeeze(components)
@staticmethod
def epochrange_epoch(
epochs: epoch_types, starttime: Optional[epoch_types] = None, endtime: Optional[epoch_types] = None
) -> np.ndarray:
if isinstance(epochs, (float, np.float64)):
pass
elif isinstance(epochs, (list, tuple, np.ndarray)):
if isinstance(epochs[0], (float, np.float64)):
pass
else:
raise TypeError("Bad data")
else:
raise TypeError("Bad data")
stime: Union[float, np.float64]
if starttime is None:
stime = 0.0
else:
if isinstance(starttime, (float, int, np.float64)):
stime = starttime
elif isinstance(starttime, (list, tuple)):
stime = float(CDFepoch.compute_epoch(starttime))
else:
raise TypeError("Bad start time")
if endtime is not None:
if isinstance(endtime, (float, int, np.float64)):
etime = endtime
elif isinstance(endtime, (list, tuple)):
etime = float(CDFepoch.compute_epoch(endtime))
else:
raise TypeError("Bad end time")
else:
etime = 1.0e31
if stime > etime:
raise ValueError("Invalid start/end time")
new_epochs = np.array(epochs)
return np.where(np.logical_and(new_epochs >= stime, new_epochs <= etime))[0]
@staticmethod
def parse(value: Union[str, Tuple[str, ...], List[str]]) -> np.ndarray:
"""
Parses the provided date/time string(s) into CDF epoch value(s).
For CDF_EPOCH:
The string has to be in the form of 'dd-mmm-yyyy hh:mm:ss.xxx' or
'yyyy-mm-ddThh:mm:ss.xxx' (in iso_8601). The string is the output
from encode function.
For CDF_EPOCH16:
The string has to be in the form of
'dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn.ppp' or
'yyyy-mm-ddThh:mm:ss.mmmuuunnnppp' (in iso_8601). The string is
the output from encode function.
For TT2000:
The string has to be in the form of
'dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn' or
'yyyy-mm-ddThh:mm:ss.mmmuuunnn' (in iso_8601). The string is
the output from encode function.
"""
if isinstance(value, (list, tuple)) and not isinstance(value[0], str):
raise TypeError("should be a string or a list of string")
elif not isinstance(value, (list, tuple, str)):
raise TypeError("Invalid value... should be a string or a list of string")
else:
if isinstance(value, (list, tuple)):
num = len(value)
epochs = []
for x in range(num):
epochs.append(CDFepoch._parse_epoch(value[x]))
return np.squeeze(epochs)
else:
return np.squeeze(CDFepoch._parse_epoch(value))
@staticmethod
def _parse_epoch(value: str) -> Union[int, float, complex]:
if len(value) in (23, 24):
# CDF_EPOCH
if value.lower() in ("31-dec-9999 23:59:59.999", "9999-12-31t23:59:59.999"):
return -1.0e31
else:
if len(value) == 24:
date = re.findall(r"(\d+)\-(.+)\-(\d+) (\d+)\:(\d+)\:(\d+)\.(\d+)", value)
dd = int(date[0][0])
mm = CDFepoch._month_index(date[0][1])
yy = int(date[0][2])
hh = int(date[0][3])
mn = int(date[0][4])
ss = int(date[0][5])
ms = int(date[0][6])
else:
date = re.findall(r"(\d+)\-(\d+)\-(\d+)T(\d+)\:(\d+)\:(\d+)\.(\d+)", value)
yy = int(date[0][0])
mm = int(date[0][1])
dd = int(date[0][2])
hh = int(date[0][3])
mn = int(date[0][4])
ss = int(date[0][5])
ms = int(date[0][6])
return float(CDFepoch.compute_epoch([yy, mm, dd, hh, mn, ss, ms]))
elif len(value) == 36 or (len(value) == 32 and value[10].lower() == "t"):
# CDF_EPOCH16
if value.lower() in ("31-dec-9999 23:59:59.999.999.999.999", "9999-12-31t23:59:59.999999999999"):
return -1.0e31 - 1.0e31j
else:
if len(value) == 36:
date = re.findall(r"(\d+)\-(.+)\-(\d+) (\d+)\:(\d+)\:(\d+)\.(\d+)\.(\d+)\.(\d+)\.(\d+)", value)
dd = int(date[0][0])
mm = CDFepoch._month_index(date[0][1])
yy = int(date[0][2])
hh = int(date[0][3])
mn = int(date[0][4])
ss = int(date[0][5])
ms = int(date[0][6])
us = int(date[0][7])
ns = int(date[0][8])
ps = int(date[0][9])
else:
date = re.findall(r"(\d+)\-(\d+)\-(\d+)T(\d+)\:(\d+)\:(\d+)\.(\d+)", value)
yy = int(date[0][0])
mm = int(date[0][1])
dd = int(date[0][2])
hh = int(date[0][3])
mn = int(date[0][4])
ss = int(date[0][5])
subs = int(date[0][6])
ms = int(subs / 1000000000)
subms = int(subs % 1000000000)
us = int(subms / 1000000)
subus = int(subms % 1000000)
ns = int(subus / 1000)
ps = int(subus % 1000)
return complex(CDFepoch.compute_epoch16([yy, mm, dd, hh, mn, ss, ms, us, ns, ps]))
elif len(value) == 29 or (len(value) == 32 and value[11] == " "):
# CDF_TIME_TT2000
value = value.lower()
if value == "9999-12-31t23:59:59.999999999" or value == "31-dec-9999 23:59:59.999.999.999":
return -9223372036854775808
elif value == "0000-01-01t00:00.000000000" or value == "01-jan-0000 00:00.000.000.000":
return -9223372036854775807
else:
if len(value) == 29:
date = re.findall(r"(\d+)\-(\d+)\-(\d+)t(\d+)\:(\d+)\:(\d+)\.(\d+)", value)
yy = int(date[0][0])
mm = int(date[0][1])
dd = int(date[0][2])
hh = int(date[0][3])
mn = int(date[0][4])
ss = int(date[0][5])
subs = int(date[0][6])
ms = int(subs / 1000000)
subms = int(subs % 1000000)
us = int(subms / 1000)
ns = int(subms % 1000)
else:
date = re.findall(r"(\d+)\-(.+)\-(\d+) (\d+)\:(\d+)\:(\d+)\.(\d+)\.(\d+)\.(\d+)", value)
dd = int(date[0][0])
mm = CDFepoch._month_index(date[0][1])
yy = int(date[0][2])
hh = int(date[0][3])
mn = int(date[0][4])
ss = int(date[0][5])
ms = int(date[0][6])
us = int(date[0][7])
ns = int(date[0][8])
return int(CDFepoch.compute_tt2000([yy, mm, dd, hh, mn, ss, ms, us, ns]))
else:
raise ValueError("Invalid cdf epoch type...")
@staticmethod
def _month_index(month: str) -> int:
if month.lower() == "jan":
return 1
elif month.lower() == "feb":
return 2
elif month.lower() == "mar":
return 3
elif month.lower() == "apr":
return 4
elif month.lower() == "may":
return 5
elif month.lower() == "jun":
return 6
elif month.lower() == "jul":
return 7
elif month.lower() == "aug":
return 8
elif month.lower() == "sep":
return 9
elif month.lower() == "oct":
return 10
elif month.lower() == "nov":
return 11
elif month.lower() == "dec":
return 12
else:
return -1
|
class CDFepoch:
'''
Convert between CDF-based epochs, np.datetime64, and Unix time.
There are three (3) epoch data types in CDF:
1. CDF_EPOCH is milliseconds since Year 0 represented as a
single double (float in Python),
2. CDF_EPOCH16 is picoseconds since Year 0 represented as
2-doubles (complex in Python), and
3. CDF_TIME_TT2000 (TT2000 as short) is nanoseconds since J2000 with
leap seconds, represented by an 8-byte integer (int in Python).
In Numpy, they are np.float64, np.complex128 and np.int64, respectively.
All these epoch values can come from from CDF.varget function.
Example
-------
>>> import cdflib
# Convert to an epoch
>>> epoch = cdflib.cdfepoch.compute_epoch([2017,1,1,1,1,1,111])
# Convert from an epoch
>>> time = cdflib.cdfepoch.to_datetime(epoch) # Or pass epochs via CDF.varget.
'''
@staticmethod
def encode(epochs: epoch_types, iso_8601: bool = True) -> encoded_type:
'''
Converts one or more epochs into UTC strings. The input epoch
format is deduced from the argument type.
Parameters
----------
epochs: int, float, list, complex
One or more ECD epochs in one of three formats:
1. CDF_EPOCH: The input should be either a float or list of floats
(in numpy, a np.float64 or a np.ndarray of np.float64)
2. CDF_EPOCH16: The input should be either a complex or list of
complex(in numpy, a np.complex128 or a np.ndarray of np.complex128)
3. TT2000: The input should be either a int or list of ints
(in numpy, a np.int64 or a np.ndarray of np.int64)
iso_8601: bool
The return time format. If ISO 8601 is True, the format is,
for example, 2008-02-02T06:08:10.10.012014016, otherwise
the format is 02-Feb-2008 06:08:10.012.014.016.
'''
pass
@staticmethod
def breakdown(epochs: epoch_types) -> np.ndarray:
'''
Returns
-------
np.ndarray
1D if scalar input, 2D otherwise.
'''
pass
@staticmethod
def _compose_date(
nat_positions: npt.NDArray,
years: npt.NDArray,
months: npt.NDArray,
days: npt.NDArray,
hours: Optional[npt.NDArray] = None,
minutes: Optional[npt.NDArray] = None,
seconds: Optional[npt.NDArray] = None,
milliseconds: Optional[npt.NDArray] = None,
microseconds: Optional[npt.NDArray] = None,
nanoseconds: Optional[npt.NDArray] = None,
) -> npt.NDArray[np.datetime64]:
'''
Take date components and return a numpy datetime array.
'''
pass
@classmethod
def to_datetime(cls, cdf_time: epoch_types) -> npt.NDArray[np.datetime64]:
'''
Converts CDF epoch argument to numpy.datetime64.
Parameters:
cdf_time: NumPy scalar/arrays to convert. np.int64 will be converted to cdf_tt2000, np.complex128 will be converted to cdf_epoch16, and floats will be converted to cdf_epoch.
Notes:
Because of datetime64 limitations, CDF_EPOCH16 precision is only kept to the nearest nanosecond.
'''
pass
@staticmethod
def unixtime(cdf_time: npt.ArrayLike) -> Union[float, npt.NDArray]:
'''
Converts CDF epoch argument into seconds after 1970-01-01. This method
converts a scalar, or array-like. Precision is only kept to the
nearest microsecond.
'''
pass
@staticmethod
def timestamp_to_cdfepoch(unixtime_data: npt.ArrayLike) -> np.ndarray:
'''
Converts a unix timestamp to CDF_EPOCH, the number of milliseconds since the year 0.
'''
pass
@staticmethod
def timestamp_to_cdfepoch16(unixtime_data: npt.ArrayLike) -> np.ndarray:
'''
Converts a unix timestamp to CDF_EPOCH16
'''
pass
@staticmethod
def timestamp_to_tt2000(unixtime_data: npt.ArrayLike) -> np.ndarray:
'''
Converts a unix timestamp to CDF_TIME_TT2000
'''
pass
@staticmethod
def compute(datetimes: npt.ArrayLike) -> Union[int, float, complex, npt.NDArray]:
'''
Computes the provided date/time components into CDF epoch value(s).
For CDF_EPOCH:
For computing into CDF_EPOCH value, each date/time elements should
have exactly seven (7) components, as year, month, day, hour, minute,
second and millisecond, in a list. For example:
[[2017,1,1,1,1,1,111],[2017,2,2,2,2,2,222]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to seven) components. The last component, if
not the 7th, can be a float that can have a fraction of the unit.
For CDF_EPOCH16:
They should have exactly ten (10) components, as year,
month, day, hour, minute, second, millisecond, microsecond, nanosecond
and picosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789,999],[2017,2,2,2,2,2,987,654,321,999]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to ten) components. The last component, if
not the 10th, can be a float that can have a fraction of the unit.
For TT2000:
Each TT2000 typed date/time should have exactly nine (9) components, as
year, month, day, hour, minute, second, millisecond, microsecond,
and nanosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789],[2017,2,2,2,2,2,987,654,321]]
Or, call function compute_tt2000 directly, instead, with at least three
(3) first (up to nine) components. The last component, if
not the 9th, can be a float that can have a fraction of the unit.
'''
pass
@staticmethod
def findepochrange(
epochs: epochs_type, starttime: Optional[epoch_types] = None, endtime: Optional[epoch_types] = None
) -> np.ndarray:
'''
Finds the record range within the start and end time from values
of a CDF epoch data type. It returns a list of record numbers.
If the start time is not provided, then it is
assumed to be the minimum possible value. If the end time is not
provided, then the maximum possible value is assumed. The epoch is
assumed to be in the chronological order. The start and end times
should have the proper number of date/time components, corresponding
to the epoch's data type.
The start/end times should be in either be in epoch units, or in the list
format described in "compute_epoch/epoch16/tt2000" section.
'''
pass
@staticmethod
def encode_tt2000(tt2000: cdf_tt2000_type, iso_8601: bool = True) -> encoded_type:
pass
@staticmethod
def breakdown_tt2000(tt2000: cdf_tt2000_type) -> np.ndarray:
'''
Breaks down the epoch(s) into UTC components.
Calculate date and time from cdf_time_tt2000 integers
Parameters
----------
epochs : array-like
Single, list, tuple, or np.array of tt2000 values
Returns
-------
components : ndarray
List or array of date and time values. The last axis contains
(in order): year, month, day, hour, minute, second, millisecond,
microsecond, and nanosecond
Notes
-----
If a bad epoch is supplied, a fill date of 9999-12-31 23:59:59 and 999 ms, 999 us, and
999 ns is returned.
'''
pass
@staticmethod
def compute_tt2000(datetimes: npt.ArrayLike) -> Union[int, npt.NDArray[np.int64]]:
pass
@staticmethod
def _LeapSecondsfromYMD(year: int, month: int, day: int) -> float:
pass
@staticmethod
def _LeapSecondsfromJ2000(nanosecs: npt.ArrayLike) -> npt.NDArray:
pass
@staticmethod
def _LoadLeapNanoSecondsTable() -> None:
pass
@staticmethod
def _EPOCHbreakdownTT2000(epoch: npt.ArrayLike) -> npt.NDArray:
pass
@staticmethod
def epochrange_tt2000(
epochs: cdf_tt2000_type, starttime: Optional[epoch_types] = None, endtime: Optional[epoch_types] = None
) -> npt.NDArray:
pass
@staticmethod
def encode_epoch16(epochs: cdf_epoch16_type, iso_8601: bool = True) -> encoded_type:
pass
@staticmethod
def _encodex_epoch16(epoch16: cdf_epoch16_type, iso_8601: bool = True) -> str:
pass
@staticmethod
def _JulianDay(y: int, m: int, d: int) -> int:
pass
@staticmethod
def compute_epoch16(datetimes: npt.ArrayLike) -> Union[complex, npt.NDArray[np.complex128]]:
pass
@staticmethod
def _calc_from_julian(epoch0: npt.ArrayLike, epoch1: npt.ArrayLike) -> npt.NDArray:
'''Calculate the date and time from epoch input
Parameters
----------
epoch0 : int, float, array-like
First element of an epoch array (epoch time in seconds)
epoch1 : float, array-like
Second element of an epoch array (epoch time in picoseconds)
Returns
-------
out : array-like
Array of 10 integers (year, month, day, hour, minute, second,
millisecond, microsecond, nanosecond, picosecond) if a single value
is input. For array input, the shape is altered by adding another
axis of length 10 (holding the same values).
'''
pass
@staticmethod
def breakdown_epoch16(epochs: cdf_epoch16_type) -> npt.NDArray:
'''
Calculate date and time from epochs
Parameters
----------
epochs : array-like
Single, list, tuple, or np.array of epoch values
Returns
-------
components : ndarray
List or array of date and time values. The last axis contains
(in order): year, month, day, hour, minute, second, millisecond,
microsecond, nanosecond, and picosecond
Notes
-----
If a bad epoch (-1.0e31 for the real and imaginary components) is
supplied, a fill date of 9999-12-31 23:59:59 and 999 ms, 999 us,
999 ns, and 999 ps is returned
'''
pass
@staticmethod
def _computeEpoch16(y: int, m: int, d: int, h: int, mn: int, s: int, ms: int, msu: int, msn: int, msp: int) -> List[float]:
pass
@staticmethod
def epochrange_epoch16(
epochs: cdf_epoch16_type, starttime: Optional[epoch_types] = None, endtime: Optional[epoch_types] = None
) -> Optional[np.ndarray]:
pass
@staticmethod
def encode_epoch16(epochs: cdf_epoch16_type, iso_8601: bool = True) -> encoded_type:
pass
@staticmethod
def _encodex_epoch16(epoch16: cdf_epoch16_type, iso_8601: bool = True) -> str:
pass
@staticmethod
def compute_epoch16(datetimes: npt.ArrayLike) -> Union[complex, npt.NDArray[np.complex128]]:
pass
@staticmethod
def _computeEpoch16(y: int, m: int, d: int, h: int, mn: int, s: int, ms: int, msu: int, msn: int, msp: int) -> List[float]:
pass
@staticmethod
def breakdown_epoch16(epochs: cdf_epoch16_type) -> npt.NDArray:
'''Calculate date and time from epochs
Parameters
----------
epochs : int, float, or array-like
Single, list, tuple, or np.array of epoch values
Returns
-------
components : list
List or array of date and time values. The last axis contains
(in order): year, month, day, hour, minute, second, and millisecond
Notes
-----
If a bad epoch (-1.0e31) is supplied, a fill date of
9999-12-31 23:59:59 and 999 ms is returned.
'''
pass
@staticmethod
def epochrange_epoch16(
epochs: cdf_epoch16_type, starttime: Optional[epoch_types] = None, endtime: Optional[epoch_types] = None
) -> Optional[np.ndarray]:
pass
@staticmethod
def parse(value: Union[str, Tuple[str, ...], List[str]]) -> np.ndarray:
'''
Parses the provided date/time string(s) into CDF epoch value(s).
For CDF_EPOCH:
The string has to be in the form of 'dd-mmm-yyyy hh:mm:ss.xxx' or
'yyyy-mm-ddThh:mm:ss.xxx' (in iso_8601). The string is the output
from encode function.
For CDF_EPOCH16:
The string has to be in the form of
'dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn.ppp' or
'yyyy-mm-ddThh:mm:ss.mmmuuunnnppp' (in iso_8601). The string is
the output from encode function.
For TT2000:
The string has to be in the form of
'dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn' or
'yyyy-mm-ddThh:mm:ss.mmmuuunnn' (in iso_8601). The string is
the output from encode function.
'''
pass
@staticmethod
def _parse_epoch(value: str) -> Union[int, float, complex]:
pass
@staticmethod
def _month_index(month: str) -> int:
pass
| 71 | 16 | 46 | 3 | 36 | 7 | 7 | 0.18 | 0 | 17 | 0 | 0 | 0 | 0 | 35 | 35 | 1,742 | 145 | 1,350 | 379 | 1,260 | 248 | 1,069 | 324 | 1,033 | 21 | 0 | 5 | 236 |
147,652 |
MAVENSDC/cdflib
|
cdflib/dataclasses.py
|
cdflib.dataclasses.VDRInfo
|
class VDRInfo:
"""
Variable data record info.
"""
#: Name of the variable.
Variable: str
#: Variable number.
Num: int
#: Variable type: zVariable or rVariable.
Var_Type: str
#: Variable CDF data type.
Data_Type: int
Data_Type_Description: str
#: Number of elements of the variable.
Num_Elements: int
#: Dimensionality of variable record.
Num_Dims: int
#: Shape of the variable record.
Dim_Sizes: List[int]
Sparse: str
#: Maximum written variable number (0-based).
Last_Rec: int
#: Record variance.
Rec_Vary: int
#: Dimensional variance(s).
Dim_Vary: Union[List[int], List[bool]] #: a doc
Compress: int
#: Padded value (if set).
Pad: Optional[Union[str, np.ndarray]] = None
#: Blocking factor (if variable is compressed).
Block_Factor: Optional[int] = None
|
class VDRInfo:
'''
Variable data record info.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 1 | 16 | 3 | 15 | 16 | 16 | 3 | 15 | 0 | 0 | 0 | 0 |
147,653 |
MAVENSDC/cdflib
|
cdflib/cdfwrite.py
|
cdflib.cdfwrite.CDF
|
class CDF:
"""
Creates an empty CDF file.
Parameters
----------
path :
The path name of the CDF (with or without .cdf extension)
cdf_spec : dict
The optional specification of the CDF file.
The keys for the dictionary are:
- ['Majority']: 'row_major' or 'column_major', or its
corresponding value. The default is 'column_major'.
- ['Encoding']: Data encoding scheme. See the CDF
documentation about the valid values.
Can be in string or its numeric corresponding value.
The default is 'host', which will be determined when
the script runs.
- ['Checksum']: Whether to set the data validation upon
file creation. The default is False.
- ['rDim_sizes']: The dimensional sizes, applicable
only to rVariables.
- ['Compressed']: Whether to compress the CDF at the file
level. A value of 0-9 or True/False, the
default is 0/False.
"""
version = 3
release = 9
increment = 0
CDF_VAR_NAME_LEN256 = 256
CDF_ATTR_NAME_LEN256 = 256
CDF_COPYRIGHT_LEN = 256
# CDF_STATUSTEXT_LEN = 200
CDF_PATHNAME_LEN = 512
CDF_INT1 = 1
CDF_INT2 = 2
CDF_INT4 = 4
CDF_INT8 = 8
CDF_UINT1 = 11
CDF_UINT2 = 12
CDF_UINT4 = 14
CDF_REAL4 = 21
CDF_REAL8 = 22
CDF_EPOCH = 31
CDF_EPOCH16 = 32
CDF_TIME_TT2000 = 33
CDF_BYTE = 41
CDF_FLOAT = 44
CDF_DOUBLE = 45
CDF_CHAR = 51
CDF_UCHAR = 52
NETWORK_ENCODING = 1
SUN_ENCODING = 2
VAX_ENCODING = 3
DECSTATION_ENCODING = 4
SGi_ENCODING = 5
IBMPC_ENCODING = 6
IBMRS_ENCODING = 7
HOST_ENCODING = 8
PPC_ENCODING = 9
HP_ENCODING = 11
NeXT_ENCODING = 12
ALPHAOSF1_ENCODING = 13
ALPHAVMSd_ENCODING = 14
ALPHAVMSg_ENCODING = 15
ALPHAVMSi_ENCODING = 16
ARM_LITTLE_ENCODING = 17
ARM_BIG_ENCODING = 18
VARY = -1
NOVARY = 0
ROW_MAJOR = 1
COLUMN_MAJOR = 2
# SINGLE_FILE = 1
# MULTI_FILE = 2
NO_CHECKSUM = 0
MD5_CHECKSUM = 1
OTHER_CHECKSUM = 2
GLOBAL_SCOPE = 1
VARIABLE_SCOPE = 2
# NO_COMPRESSION = 0
# RLE_COMPRESSION = 1
# HUFF_COMPRESSION = 2
# AHUFF_COMPRESSION = 3
GZIP_COMPRESSION = 5
# RLE_OF_ZEROs = 0
# NO_SPARSEARRAYS = 0
NO_SPARSERECORDS = 0
PAD_SPARSERECORDS = 1
PREV_SPARSERECORDS = 2
V3magicNUMBER_1 = "cdf30001"
V3magicNUMBER_2 = "0000ffff"
V3magicNUMBER_2c = "cccc0001"
CDR_ = 1
GDR_ = 2
rVDR_ = 3
ADR_ = 4
AgrEDR_ = 5
VXR_ = 6
VVR_ = 7
zVDR_ = 8
AzEDR_ = 9
CCR_ = 10
CPR_ = 11
SPR_ = 12
CVVR_ = 13
NUM_VXR_ENTRIES = 7
NUM_VXRlvl_ENTRIES = 3
UUIR_BASE_SIZE64 = 12
UIR_BASE_SIZE64 = 28
CDR_BASE_SIZE64 = 56
GDR_BASE_SIZE64 = 84
zVDR_BASE_SIZE64 = 88 + CDF_VAR_NAME_LEN256
rVDR_BASE_SIZE64 = 84 + CDF_VAR_NAME_LEN256
VXR_BASE_SIZE64 = 28
VVR_BASE_SIZE64 = 12
ADR_BASE_SIZE64 = 68 + CDF_ATTR_NAME_LEN256
AEDR_BASE_SIZE64 = 56
CCR_BASE_SIZE64 = 32
CPR_BASE_SIZE64 = 24
SPR_BASE_SIZE64 = 24
CVVR_BASE_SIZE64 = 24
# BLOCKING_BYTES = 131072
BLOCKING_BYTES = 65536
level = 0
def __init__(self, path: Union[str, Path], cdf_spec: Optional[Dict[str, Any]] = None, delete: bool = False):
path = pathlib.Path(path).expanduser()
major = 1
if cdf_spec is not None:
major = cdf_spec.get("Majority", major)
if isinstance(major, str):
major = self._majority_token(major)
encoding = cdf_spec.get("Encoding", 8) # default is host
if isinstance(encoding, str):
encoding = self._encoding_token(encoding)
checksum = cdf_spec.get("Checksum", False)
cdf_compression = cdf_spec.get("Compressed", 0)
if isinstance(cdf_compression, int):
if not 0 <= cdf_compression <= 9:
cdf_compression = 0
else:
cdf_compression = 6 if cdf_compression else 0
rdim_sizes: Optional[List[int]] = cdf_spec.get("rDim_sizes", None)
num_rdim: int = len(rdim_sizes) if rdim_sizes is not None else 0
else:
encoding = 8
checksum = False
cdf_compression = 0
num_rdim = 0
rdim_sizes = None
if major not in [1, 2]:
raise RuntimeError(f"Bad major: {major}")
osSystem = pf.system()
osMachine = pf.uname()[5]
if encoding == 8:
if osSystem != "SunOS" or osMachine != "sparc":
self._encoding = self.IBMPC_ENCODING
else:
self._encoding = self.SUN_ENCODING
else:
self._encoding = encoding
if self._encoding == -1:
raise OSError("Bad encoding.")
if not isinstance(checksum, bool):
raise ValueError("Bad checksum.")
if path.suffix != ".cdf":
path = path.with_suffix(".cdf")
if len(str(path)) > self.CDF_PATHNAME_LEN:
raise OSError("CDF:", path, " longer than allowed length.")
if path.is_file():
if not delete:
raise OSError("file: ", path, " already exists....\n", "Delete it or specify the 'delete=False' option.")
else:
path.unlink()
self.path = path
self.compressed_file = path.with_suffix(".tmp") if cdf_compression > 0 else None
# Dictionary objects, these contains name, offset, and dimension information
self.zvarsinfo: Dict[int, Tuple[str, int, int, List[int], List[bool]]] = {}
self.rvarsinfo: Dict[int, Tuple[str, int, int, List[int], List[bool]]] = {}
# Dictionary object, contains name, offset, and scope (global or variable)
self.attrsinfo: Dict[int, Tuple[str, int, int]] = {}
self.gattrs: List[str] = [] # List of global attributes
self.vattrs: List[str] = [] # List of variable attributes
self.attrs: List[str] = [] # List of ALL attributes
self.zvars: List[str] = [] # List of z variable names
self.rvars: List[str] = [] # List of r variable names
self.checksum = checksum # Boolean, whether or not to include the checksum at the end
self.compression = cdf_compression # Compression level (or True/False)
self.num_rdim = num_rdim # Number of r dimensions
self.rdim_sizes = rdim_sizes # Size of r dimensions
self.majority = major
with path.open("wb") as f:
f.write(binascii.unhexlify(self.V3magicNUMBER_1))
f.write(binascii.unhexlify(self.V3magicNUMBER_2))
self.cdr_head = self._write_cdr(f, major, self._encoding, checksum)
self.gdr_head = self._write_gdr(f)
self.offset = f.tell()
self.is_closed = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return
def close(self) -> None:
"""
Closes the CDF Class.
1. If compression was set, this is where the compressed file is
written.
2. If a checksum is needed, this will place the checksum at the end
of the file.
"""
if self.is_closed:
return
if self.compressed_file is None:
with self.path.open("rb+") as f:
f.seek(0, 2)
eof = f.tell()
self._update_offset_value(f, self.gdr_head + 36, 8, eof)
if self.checksum:
f.write(self._md5_compute(f))
self.is_closed = True
return
with self.path.open("rb+") as f:
f.seek(0, 2)
eof = f.tell()
self._update_offset_value(f, self.gdr_head + 36, 8, eof)
with self.compressed_file.open("wb+") as g:
g.write(bytearray.fromhex(self.V3magicNUMBER_1))
g.write(bytearray.fromhex(self.V3magicNUMBER_2c))
self._write_ccr(f, g, self.compression)
if self.checksum:
g.seek(0, 2)
g.write(self._md5_compute(g))
self.path.unlink() # NOTE: for Windows this is necessary
self.compressed_file.rename(self.path)
self.is_closed = True
@is_open
def write_globalattrs(self, globalAttrs):
"""
Writes the global attributes.
Parameters
----------
globalAttrs: dict
Global attribute name(s) and their value(s) pair(s).
The value(s) is a dictionary of entry number and value pair(s).
For example::
globalAttrs={}
globalAttrs['Global1']={0: 'Global Value 1'}
globalAttrs['Global2']={0: 'Global Value 2'}
For a non-string value, use a list with the value and its
CDF data type. For example::
globalAttrs['Global3']={0: [12, 'cdf_int4']}
globalAttrs['Global4']={0: [12.34, 'cdf_double']}
If the data type is not provided, a corresponding
CDF data type is assumed::
globalAttrs['Global3']={0: 12} as 'cdf_int4'
globalAttrs['Global4']={0: 12.34} as 'cdf_double'
CDF allows multi-values for non-string data for an attribute::
globalAttrs['Global5']={0: [[12.34,21.43], 'cdf_double']}
For multi-entries from a global variable, they should be
presented in this form::
GA6={}
GA6[0]='abcd'
GA6[1]=[12, 'cdf_int2']
GA6[2]=[12.5, 'cdf_float']
GA6[3]=[[0,1,2], 'cdf_int8']
globalAttrs['Global6']=GA6
....
f.write_globalattrs(globalAttrs)
"""
if not (isinstance(globalAttrs, dict)):
raise ValueError("Global attribute(s) not in dictionary form")
dataType = None
numElems = None
with self.path.open("rb+") as f:
f.seek(0, 2) # EOF (appending)
for attr, entry in globalAttrs.items():
if attr in self.gattrs:
raise ValueError(f"Global attribute: {attr} already exists.")
if attr in self.vattrs:
logging.warning(f"Attribute: {attr} already defined as a variable attribute.")
continue
attrNum, offsetADR = self._write_adr(f, True, attr)
entries = 0
if entry is None:
continue
entryNumMaX = -1
poffset = -1
for entryNum, value in entry.items():
if entryNumMaX < entryNum:
entryNumMaX = entryNum
if hasattr(value, "__len__") and not isinstance(value, str):
if len(value) == 2:
# Check if the second value is a valid data type
value2 = value[1]
dataType = self._datatype_token(value2)
if dataType > 0:
# Data Type found
data = value[0]
if dataType == self.CDF_CHAR or dataType == self.CDF_UCHAR:
if isinstance(data, list) or isinstance(data, tuple):
logger.warning("Invalid global attribute value")
return
numElems = len(data)
elif dataType == self.CDF_EPOCH or dataType == self.CDF_EPOCH16 or dataType == self.CDF_TIME_TT2000:
cvalue = []
if isinstance(data, list) or isinstance(data, tuple):
numElems = len(data)
for x in range(0, numElems):
if isinstance(data[x], str):
cvalue.append(cdfepoch.CDFepoch.parse(data[x]))
else:
cvalue.append(data[x])
data = cvalue
else:
if isinstance(data, str):
data = cdfepoch.CDFepoch.parse(data)
numElems = 1
else:
if isinstance(data, list) or isinstance(data, tuple):
numElems = len(data)
else:
numElems = 1
else:
# Data type not found, both values are data.
data = value
numElems, dataType = self._datatype_define(value[0])
numElems = len(value)
else:
# Length greater than 2, so it is all data.
data = value
numElems, dataType = self._datatype_define(value[0])
numElems = len(value)
else:
# Just one value
data = value
numElems, dataType = self._datatype_define(value)
if numElems is None:
logger.warning("Unknown data")
return
offset = self._write_aedr(f, True, attrNum, entryNum, data, dataType, numElems, None)
if entries == 0:
# ADR's AgrEDRhead
self._update_offset_value(f, offsetADR + 20, 8, offset)
else:
# ADR's ADRnext
self._update_offset_value(f, poffset + 12, 8, offset)
poffset = offset
entries = entries + 1
# ADR's NgrEntries
self._update_offset_value(f, offsetADR + 36, 4, entries)
# ADR's MAXgrEntry
self._update_offset_value(f, offsetADR + 40, 4, entryNumMaX)
@is_open
def write_variableattrs(self, variableAttrs):
"""
Writes a variable's attributes, provided the variable already exists.
Parameters
----------
variableAttrs : dict
Variable attribute name and its entry value pair(s).
The entry value is also a dictionary of variable id and value
pair(s). Variable id can be the variable name or its id number
in the file. Use write_var function if the variable does not exist.
For example::
variableAttrs={}
entries_1={}
entries_1['var_name_1'] = 'abcd'
entries_1['var_name_2'] = [12, 'cdf_int4']
....
variableAttrs['attr_name_1']=entries_1
entries_2={}
entries_2['var_name_1'] = 'xyz'
entries_2['var_name_2'] = [[12, 34], 'cdf_int4']
....
variableAttrs['attr_name_2']=entries_2
....
....
f.write_variableattrs(variableAttrs)
"""
if not (isinstance(variableAttrs, dict)):
raise ValueError("Variable attribute(s) not in dictionary form")
dataType = None
numElems = None
with self.path.open("rb+") as f:
f.seek(0, 2) # EOF (appending)
for attr, attrs in variableAttrs.items():
if not (isinstance(attr, str)):
raise ValueError("Attribute name must be a string")
return
if attr in self.gattrs:
raise ValueError(f"Variable attribute: {attr}" + " is already a global variable")
return
if attr in self.vattrs:
attrNum = self.vattrs.index(attr)
offsetA = self.attrsinfo[attrNum][2]
else:
attrNum, offsetA = self._write_adr(f, False, attr)
entries = 0
if attrs is None:
continue
if not (isinstance(attrs, dict)):
raise ValueError("An attribute" "s attribute(s) not in dictionary form")
entryNumX = -1
poffset = -1
for entryID, value in attrs.items():
if isinstance(entryID, str) and (not (entryID in self.zvars) and not (entryID in self.rvars)):
raise KeyError(f"{entryID} not found in the CDF")
if isinstance(entryID, numbers.Number) and (len(self.zvars) > 0 and len(self.rvars) > 0):
raise ValueError(f"{entryID} can not be used as the CDF has both zVariables and rVariables")
if isinstance(entryID, str):
try:
entryNum = self.zvars.index(entryID)
zVar = True
except Exception:
try:
entryNum = self.rvars.index(entryID)
zVar = False
except Exception:
raise KeyError(f"{entryID} not found")
else:
entryNum = int(entryID)
if len(self.zvars) > 0 and len(self.rvars) > 0:
raise ValueError(
"Can not use integer form for variable id as there ", "are both zVariables and rVaribales"
)
if len(self.zvars) > 0:
if entryNum >= len(self.zvars):
raise ValueError("Variable id: ", entryID, " not found")
else:
zVar = True
else:
if entryNum >= len(self.rvars):
raise ValueError("Variable id: ", entryID, " not found")
else:
zVar = False
if entryNum > entryNumX:
entryNumX = entryNum
if hasattr(value, "__len__") and not isinstance(value, str):
if len(value) == 2:
value2 = value[1]
dataType = self._datatype_token(value2)
if dataType > 0:
data = value[0]
if dataType == self.CDF_CHAR or dataType == self.CDF_UCHAR:
if isinstance(data, list) or isinstance(data, tuple):
raise ValueError("Invalid variable attribute value")
numElems = len(data)
elif dataType == self.CDF_EPOCH or dataType == self.CDF_EPOCH16 or dataType == self.CDF_TIME_TT2000:
cvalue = []
if isinstance(data, list) or isinstance(data, tuple):
numElems = len(data)
for x in range(0, numElems):
if isinstance(data[x], str):
avalue = cdfepoch.CDFepoch.parse(data[x])
else:
avalue = data[x]
if dataType == self.CDF_EPOCH16:
cvalue.append(avalue.real)
cvalue.append(avalue.imag)
else:
cvalue.append(avalue)
data = cvalue
else:
if isinstance(data, str):
data = cdfepoch.CDFepoch.parse(data)
numElems = 1
else:
if isinstance(data, list) or isinstance(data, tuple):
numElems = len(data)
else:
numElems = 1
else:
data = value
numElems, dataType = self._datatype_define(value[0])
numElems = len(value)
else:
data = value
numElems, dataType = self._datatype_define(value[0])
numElems = len(value)
else:
data = value
numElems, dataType = self._datatype_define(value)
if numElems is None:
logger.warning("Unknown data")
return
offset = self._write_aedr(f, False, attrNum, entryNum, data, dataType, numElems, zVar)
if entries == 0:
if zVar:
# ADR's AzEDRhead
self._update_offset_value(f, offsetA + 48, 8, offset)
else:
# ADR's AgrEDRhead
self._update_offset_value(f, offsetA + 20, 8, offset)
else:
# ADR's ADRnext
self._update_offset_value(f, poffset + 12, 8, offset)
poffset = offset
entries = entries + 1
if zVar:
# ADR's NzEntries
self._update_offset_value(f, offsetA + 56, 4, entries)
# ADR's MAXzEntry
self._update_offset_value(f, offsetA + 60, 4, entryNumX)
else:
# ADR's NgrEntries
self._update_offset_value(f, offsetA + 36, 4, entries)
# ADR's MAXgrEntry
self._update_offset_value(f, offsetA + 40, 4, entryNumX)
@is_open
def write_var(self, var_spec, var_attrs=None, var_data=None):
"""
Writes a variable, along with variable attributes and data.
Parameters
----------
var_spec : dict
The specifications of the variable.
The required/optional keys for creating a variable:
Required keys:
- ['Variable']: The name of the variable
- ['Data_Type']: the CDF data type
- ['Num_Elements']: The number of elements. Always 1 the
for numeric type. The char length for string type.
- ['Rec_Vary']: Record variance
For zVariables:
- ['Dim_Sizes']: The dimensional sizes for zVariables only.
Use [] for 0-dimension. Each and
every dimension is varying for zVariables.
For rVariables:
- ['Dim_Vary']: The dimensional variances for rVariables only.
Optional keys:
- ['Var_Type']: Whether the variable is a zVariable or
rVariable. Valid values: "zVariable" and
"rVariable". The default is "zVariable".
- ['Sparse']: Whether the variable has sparse records.
Valid values are "no_sparse", "pad_sparse",
and "prev_sparse". The default is 'no_sparse'.
- ['Compress']: Set the gzip compression level (0 to 9), 0 for
no compression. The default is to compress
with level 6 (done only if the compressed
data is less than the uncompressed data).
- ['Block_Factor']: The blocking factor, the number of
records in a chunk when the variable is compressed.
- ['Pad']: The padded value (in bytes, numpy.ndarray or string)
var_attrs : dict
{attribute:value} pairs.
The attribute is the name of a variable attribute.
The value can have its data type specified for the
numeric data. If not, based on Python's type, a
corresponding CDF type is assumed: CDF_INT4 for int,
CDF_DOUBLE for float, CDF_EPOCH16 for complex and
and CDF_INT8 for long.
For example, the following defined attributes will
have the same types in the CDF::
var_attrs= { 'attr1': 'value1',
'attr2': 12.45,
'attr3': [3,4,5],
.....
}
With data type (in the list form)::
var_attrs= { 'attr1': 'value1',
'attr2': [12.45, 'CDF_DOUBLE'],
'attr3': [[3,4,5], 'CDF_INT4'],
.....
}
var_data :
The data for the variable. If the variable is
a regular variable without sparse records, it must
be in a single structure of bytes, or numpy.ndarray
for numeric variable, or str or list of strs for
string variable.
If the variable has sparse records, var_data should
be presented in a list/tuple with two elements,
the first being a list/tuple that contains the
physical record number(s), the second being the variable
data in bytes, numpy.ndarray, or a list of strings. Variable
data can have just physical records' data (with the same
number of records as the first element) or have data from both
physical records and virtual records (which with filled data).
The var_data has the form::
[[rec_#1,rec_#2,rec_#3,...],
[data_#1,data_#2,data_#3,...]]
See the sample for its setup.
"""
if not isinstance(var_spec, dict):
raise TypeError("Variable should be in dictionary form.")
# Get variable info from var_spec
try:
dataType = int(var_spec["Data_Type"])
numElems = int(var_spec["Num_Elements"])
name = var_spec["Variable"]
recVary = var_spec["Rec_Vary"]
except Exception as e:
raise ValueError("Missing/invalid required spec for creating variable.") from e
# Get whether or not it is a z variable
var_type = var_spec.setdefault("Var_Type", "zvariable")
if var_type.lower() == "zvariable":
zVar = True
else:
var_spec["Var_Type"] = "rVariable"
zVar = False
if dataType == self.CDF_CHAR or dataType == self.CDF_UCHAR:
if numElems < 1:
raise ValueError("Invalid Num_Elements for string data type variable")
else:
if numElems != 1:
raise ValueError("Invalid Num_Elements for numeric data type variable")
# If its a z variable, get the dimension info
# Otherwise, use r variable info
if zVar:
try:
dimSizes = var_spec["Dim_Sizes"]
numDims = len(dimSizes)
dimVary = []
for _ in range(0, numDims):
dimVary.append(True)
except Exception:
raise ValueError("Missing/invalid required spec for creating variable.")
else:
dimSizes = self.rdim_sizes
numDims = self.num_rdim
try:
dimVary = var_spec["Dim_Vary"]
if len(dimVary) != numDims:
raise ValueError("Invalid Dim_Vary size for the rVariable.")
except Exception:
raise ValueError("Missing/invalid required spec for Dim_Vary for rVariable")
# Get Sparseness info
sparse = self._sparse_token(var_spec.get("Sparse", "no_sparse"))
# Get compression info
compression = var_spec.get("Compress", 6)
if isinstance(compression, int):
if not 0 <= compression <= 9:
compression = 0
else:
compression = 6 if compression else 0
# Get blocking factor
blockingfactor = int(var_spec.get("Block_Factor", 1))
# Get pad value
pad = var_spec.get("Pad", None)
if hasattr(pad, "__len__"):
pad = pad[0]
if name in self.zvars or name in self.rvars:
raise ValueError(f"{name} already exists")
with self.path.open("rb+") as f:
f.seek(0, 2) # EOF (appending)
varNum, offset = self._write_vdr(
f, dataType, numElems, numDims, dimSizes, name, dimVary, recVary, sparse, blockingfactor, compression, pad, zVar
)
# Update the GDR pointers if needed
if zVar:
if len(self.zvars) == 1:
# GDR's zVDRhead
self._update_offset_value(f, self.gdr_head + 20, 8, offset)
else:
if len(self.rvars) == 1:
# GDR's rVDRhead
self._update_offset_value(f, self.gdr_head + 12, 8, offset)
# Write the variable attributes
if var_attrs is not None:
self._write_var_attrs(f, varNum, var_attrs, zVar)
# Write the actual data to the file
if not (var_data is None):
if sparse == 0:
varMaxRec = self._write_var_data_nonsparse(
f, zVar, varNum, dataType, numElems, recVary, compression, blockingfactor, var_data
)
else:
notsupport = False
if not isinstance(var_data, (list, tuple)):
notsupport = True
if notsupport or len(var_data) != 2:
logger.warning(
"Sparse record #s and data are not of list/tuple form:\n"
" [ [rec_#1, rec_#2, rec_#3, ],\n"
" [data_#1, data_#2, data_#3, ....] ]"
)
return
# Format data into: [[recstart1, recend1, data1],
# [recstart2,recend2,data2], ...]
var_data = self._make_sparse_blocks(var_spec, var_data[0], var_data[1])
for block in var_data:
varMaxRec = self._write_var_data_sparse(f, zVar, varNum, dataType, numElems, recVary, block)
# Update GDR MaxRec if writing an r variable
if not zVar:
# GDR's rMaxRec
f.seek(self.gdr_head + 52)
maxRec = int.from_bytes(f.read(4), "big", signed=True)
if maxRec < varMaxRec:
self._update_offset_value(f, self.gdr_head + 52, 4, varMaxRec)
def _write_var_attrs(self, f: io.BufferedWriter, varNum: int, var_attrs: Dict[str, Any], zVar: bool) -> None:
"""
Writes ADRs and AEDRs for variables
Parameters:
f : file
The open CDF file
varNum : int
The variable number for adding attributes
var_attrs : dict
A dictionary object full of variable attributes
zVar : bool
True if varNum is referencing a z variable
Returns: None
"""
if not isinstance(var_attrs, dict):
raise TypeError("Variable attribute(s) should be in dictionary form.")
for attr, entry in var_attrs.items():
if attr in self.gattrs:
logger.warning(f"Attribute: {attr}" + " already defined as a global attribute... Skipping attribute.")
continue
if not (attr in self.attrs):
attrNum, offset = self._write_adr(f, False, attr)
if len(self.attrs) == 0:
# GDR's ADRhead
# TODO: fix this, grid_offset doesn't exsit
self._update_offset_value(f, self.grid_offset + 28, 8, offset) # type: ignore
else:
attrNum = self.attrs.index(attr)
offset = self.attrsinfo[attrNum][2]
if entry is None:
logger.warning(
f"Attribute: {attr}" + " is None type, which does not have an equivalent in CDF... Skipping attribute."
)
continue
# Check if dataType was provided
dataType = 0
if hasattr(entry, "__len__") and not isinstance(entry, str):
items = len(entry)
if items == 2:
dataType = self._datatype_token(entry[1])
# Handle user setting datatype
if dataType > 0:
# CDF data type defined in entry
data = entry[0]
if self._checklistofNums(data):
# Data needs no pre-processing and is good to go
if hasattr(data, "__len__") and not isinstance(data, str):
numElems = len(data)
else:
numElems = 1
else:
# Data needs some sort of pre-processing to proceed
if dataType == self.CDF_CHAR or dataType == self.CDF_UCHAR:
if hasattr(data, "__len__") and not isinstance(data, str):
# Reformat strings
items = len(data)
odata = data
data = ""
for x in range(0, items):
if x > 0:
data += "\\N "
data += str(odata[x])
else:
data = str(odata[x])
numElems = len(data)
elif dataType == self.CDF_EPOCH or dataType == self.CDF_EPOCH16 or dataType == self.CDF_TIME_TT2000:
# Convert data to CDF time
cvalue = []
if hasattr(data, "__len__") and not isinstance(data, str):
numElems = len(data)
for x in range(0, numElems):
cvalue.append(cdfepoch.CDFepoch.parse(data[x]))
data = cvalue
else:
data = cdfepoch.CDFepoch.parse(data)
numElems = 1
elif isinstance(data, str):
# One possibility is that the user wants to convert a string to a number
numElems = 1
data = np.array(float(data))
else:
# The final possibility I can think of is that the user wants to convert a list of strings to a list of numbers
try:
numElems = 1
data = np.array([float(item) for item in data])
except:
logger.warning(
f"Cannot determine how to convert {str(data)} to specified type of {dataType}. Ignoring the specified datatype, and continuing."
)
dataType = 0
if dataType == 0:
# No data type defined...
data = entry
if hasattr(data, "__len__") and not isinstance(data, str):
numElems, dataType = self._datatype_define(entry[0])
if dataType == self.CDF_CHAR or dataType == self.CDF_UCHAR:
data = ""
for x in range(0, len(entry)):
if x > 0:
data += "\\N "
data += str(entry[x])
else:
data = str(entry[x])
numElems = len(data)
else:
numElems, dataType = self._datatype_define(entry)
offset = self._write_aedr(f, False, attrNum, varNum, data, dataType, numElems, zVar)
self._update_aedr_link(f, attrNum, zVar, varNum, offset)
def _write_var_data_nonsparse(
self,
f: io.BufferedWriter,
zVar: bool,
var: int,
dataType: int,
numElems: int,
recVary: bool,
compression: int,
blockingfactor: int,
indata: Union[np.ndarray, bytearray, Dict[str, np.ndarray]],
) -> int:
"""
Creates VVRs and the corresponding VXRs full of "indata" data.
If there is no compression, creates exactly one VXR and VVR
If there is compression
Parameters
----------
f : file
The open CDF file
zVar : bool
True if this is z variable data
var : str
The name of the variable
dataType : int
the CDF variable type
numElems : int
number of elements in each record
recVary : bool
True if each record is unque
compression : int
The amount of compression
blockingfactor: int
The size (in number of records) of a VVR data block
indata : varies
the data to write, should be a numpy or byte array
Returns
-------
recs : int
The number of records
"""
numValues = self._num_values(zVar, var)
dataTypeSize = self._datatype_size(dataType, numElems)
if isinstance(indata, dict):
indata = indata["Data"]
# Deal with EPOCH16 data types
if dataType == self.CDF_EPOCH16:
epoch16 = []
if hasattr(indata, "__len__") and not isinstance(indata, str):
adata = indata[0]
if not isinstance(adata, complex):
try:
indata = np.asarray(indata).astype(np.complex128)
except:
raise ValueError(
f"Data for variable {var} must be castable to a 128-bit complex number when data type is CDF_EPOCH16."
)
recs = len(indata)
for x in range(0, recs):
epoch16.append(indata[x].real)
epoch16.append(indata[x].imag)
indata = np.array(epoch16)
else:
if isinstance(indata, complex):
epoch16.append(indata.real)
epoch16.append(indata.imag)
indata = np.array(epoch16)
# Convert to byte stream
recs, data = self._convert_data(dataType, numElems, numValues, indata)
if not recVary and len(data) != 0:
recs = 1
if zVar:
vdr_offset = self.zvarsinfo[var][1]
else:
vdr_offset = self.rvarsinfo[var][1]
usedEntries = 0
numVXRs = 0
if compression > 0:
default_blockingfactor = math.ceil(self.BLOCKING_BYTES / (numValues * dataTypeSize))
# If the given blocking factor is too small, use the default one
# Will re-adjust if the records are less than this computed BF.
if blockingfactor < default_blockingfactor:
blockingfactor = default_blockingfactor
if blockingfactor == 0:
blockingfactor = 1
# Update the blocking factor
f.seek(vdr_offset + 80, 0)
# VDR's BlockingFactor
self._update_offset_value(f, vdr_offset + 80, 4, blockingfactor)
# set blocking factor
if recs < blockingfactor:
blockingfactor = recs
blocks = math.ceil(recs / blockingfactor)
nEntries = self.NUM_VXR_ENTRIES
VXRhead = None
# Loop through blocks, create VVRs/CVVRs
for x in range(0, blocks):
startrec = x * blockingfactor
startloc = startrec * numValues * dataTypeSize
endrec = (x + 1) * blockingfactor - 1
if endrec > (recs - 1):
endrec = recs - 1
endloc = (endrec + 1) * numValues * dataTypeSize
if endloc > len(data):
endrec = recs - 1
endloc = len(data)
bdata = data[startloc:endloc]
cdata = gzip_deflate(bdata, compression)
if len(cdata) < len(bdata):
n1offset = self._write_cvvr(f, cdata)
else:
n1offset = self._write_vvr(f, bdata)
if x == 0:
# Create a VXR
VXRoffset = self._write_vxr(f)
VXRhead = VXRoffset
numVXRs = 1
self._update_vdr_vxrheadtail(f, vdr_offset, VXRoffset)
if usedEntries < nEntries:
# Use the exisitng VXR
usedEntries = self._use_vxrentry(f, VXRoffset, startrec, endrec, n1offset)
else:
# Create a new VXR and an upper level VXR, if needed.
# Two levels of VXRs are the maximum, which is simpler
# to implement.
savedVXRoffset = VXRoffset
VXRoffset = self._write_vxr(f)
numVXRs += 1
usedEntries = self._use_vxrentry(f, VXRoffset, startrec, endrec, n1offset)
# Edit the VXRnext field of the previous VXR
self._update_offset_value(f, savedVXRoffset + 12, 8, VXRoffset)
# Edit the VXRtail of the VDR
self._update_offset_value(f, vdr_offset + 36, 8, VXRoffset)
# After we're done with the blocks, check the way
# we have VXRs set up
if numVXRs > self.NUM_VXRlvl_ENTRIES:
newvxrhead, newvxrtail = self._add_vxr_levels_r(f, VXRhead, numVXRs)
self._update_offset_value(f, vdr_offset + 28, 8, newvxrhead)
self._update_offset_value(f, vdr_offset + 36, 8, newvxrtail)
else:
# Create one VVR and VXR, with one VXR entry
offset = self._write_vvr(f, data)
VXRoffset = self._write_vxr(f)
usedEntries = self._use_vxrentry(f, VXRoffset, 0, recs - 1, offset)
self._update_vdr_vxrheadtail(f, vdr_offset, VXRoffset)
# VDR's MaxRec
self._update_offset_value(f, vdr_offset + 24, 4, recs - 1)
return recs - 1
def _write_var_data_sparse(
self,
f: io.BufferedWriter,
zVar: bool,
var: int,
dataType: int,
numElems: int,
recVary: bool,
oneblock: Tuple[int, int, np.ndarray],
) -> int:
"""
Writes a VVR and a VXR for this block of sparse data
Parameters:
f : file
The open CDF file
zVar : bool
True if this is for a z variable
var : int
The variable number
dataType : int
The CDF data type of this variable
numElems : int
The number of elements in each record
recVary : bool
True if the value varies across records
oneblock: list
A list of data in the form [startrec, endrec, [data]]
Returns:
recend : int
Just the "endrec" value input by the user in "oneblock"
"""
rec_start = oneblock[0]
rec_end = oneblock[1]
indata = oneblock[2]
numValues = self._num_values(zVar, var)
# Convert oneblock[2] into a byte stream
_, data = self._convert_data(dataType, numElems, numValues, indata)
# Gather dimension information
if zVar:
vdr_offset = self.zvarsinfo[var][1]
else:
vdr_offset = self.rvarsinfo[var][1]
# Write one VVR
offset = self._write_vvr(f, data)
f.seek(vdr_offset + 28, 0)
# Get first VXR
vxrOne = int.from_bytes(f.read(8), "big", signed=True)
foundSpot = 0
usedEntries = 0
currentVXR = 0
# Search through VXRs to find an open one
while foundSpot == 0 and vxrOne > 0:
# have a VXR
f.seek(vxrOne, 0)
currentVXR = f.tell()
f.seek(vxrOne + 12, 0)
vxrNext = int.from_bytes(f.read(8), "big", signed=True)
nEntries = int.from_bytes(f.read(4), "big", signed=True)
usedEntries = int.from_bytes(f.read(4), "big", signed=True)
if usedEntries == nEntries:
# all entries are used -- check the next vxr in link
vxrOne = vxrNext
else:
# found a vxr with an vailable entry spot
foundSpot = 1
# vxrOne == 0 from vdr's vxrhead vxrOne == -1 from a vxr's vxrnext
if vxrOne == 0 or vxrOne == -1:
# no available vxr... create a new one
currentVXR = self._create_vxr(f, rec_start, rec_end, vdr_offset, currentVXR, offset)
else:
self._use_vxrentry(f, currentVXR, rec_start, rec_end, offset)
# Modify the VDR's MaxRec if needed
f.seek(vdr_offset + 24, 0)
recNumc = int.from_bytes(f.read(4), "big", signed=True)
if rec_end > recNumc:
self._update_offset_value(f, vdr_offset + 24, 4, rec_end)
return rec_end
def _create_vxr(self, f: io.BufferedWriter, recStart: int, recEnd: int, currentVDR: int, priorVXR: int, vvrOffset: int) -> int:
"""
Create a VXR AND use a VXR
Parameters:
f : file
The open CDF file
recStart : int
The start record of this block
recEnd : int
The ending record of this block
currentVDR : int
The byte location of the variables VDR
priorVXR : int
The byte location of the previous VXR
vvrOffset : int
The byte location of ther VVR
Returns:
vxroffset : int
The byte location of the created vxr
"""
# add a VXR, use an entry, and link it to the prior VXR if it exists
vxroffset = self._write_vxr(f)
self._use_vxrentry(f, vxroffset, recStart, recEnd, vvrOffset)
if priorVXR == 0:
# VDR's VXRhead
self._update_offset_value(f, currentVDR + 28, 8, vxroffset)
else:
# VXR's next
self._update_offset_value(f, priorVXR + 12, 8, vxroffset)
# VDR's VXRtail
self._update_offset_value(f, currentVDR + 36, 8, vxroffset)
return vxroffset
def _use_vxrentry(self, f: io.BufferedWriter, VXRoffset: int, recStart: int, recEnd: int, offset: int) -> int:
"""
Adds a VVR pointer to a VXR
"""
# Select the next unused entry in a VXR for a VVR/CVVR
f.seek(VXRoffset + 20)
# num entries
numEntries = int.from_bytes(f.read(4), "big", signed=True)
# used entries
usedEntries = int.from_bytes(f.read(4), "big", signed=True)
# VXR's First
self._update_offset_value(f, VXRoffset + 28 + 4 * usedEntries, 4, recStart)
# VXR's Last
self._update_offset_value(f, VXRoffset + 28 + 4 * numEntries + 4 * usedEntries, 4, recEnd)
# VXR's Offset
self._update_offset_value(f, VXRoffset + 28 + 2 * 4 * numEntries + 8 * usedEntries, 8, offset)
# VXR's NusedEntries
usedEntries += 1
self._update_offset_value(f, VXRoffset + 24, 4, usedEntries)
return usedEntries
def _add_vxr_levels_r(self, f: io.BufferedWriter, vxrhead: int, numVXRs: int) -> Tuple[int, int]:
"""
Build a new level of VXRs... make VXRs more tree-like
From:
VXR1 -> VXR2 -> VXR3 -> VXR4 -> ... -> VXRn
To:
new VXR1
/ | \
VXR2 VXR3 VXR4
/ | \
...
VXR5 .......... VXRn
Parameters
----------
f : file
The open CDF file
vxrhead : int
The byte location of the first VXR for a variable
numVXRs : int
The total number of VXRs
Returns
-------
newVXRhead : int
The byte location of the newest VXR head
newvxroff : int
The byte location of the last VXR head
"""
newNumVXRs = int(numVXRs / self.NUM_VXRlvl_ENTRIES)
remaining = int(numVXRs % self.NUM_VXRlvl_ENTRIES)
vxroff = vxrhead
prevxroff = -1
if remaining != 0:
newNumVXRs += 1
self.level += 1
for x in range(0, newNumVXRs):
newvxroff = self._write_vxr(f, numEntries=self.NUM_VXRlvl_ENTRIES)
if x > 0:
self._update_offset_value(f, prevxroff + 12, 8, newvxroff)
else:
newvxrhead = newvxroff
prevxroff = newvxroff
if x == (newNumVXRs - 1):
if remaining == 0:
endEntry = self.NUM_VXRlvl_ENTRIES
else:
endEntry = remaining
else:
endEntry = self.NUM_VXRlvl_ENTRIES
for _ in range(0, endEntry):
recFirst, recLast = self._get_recrange(f, vxroff)
self._use_vxrentry(f, newvxroff, recFirst, recLast, vxroff)
vxroff = self._read_offset_value(f, vxroff + 12, 8)
vxroff = vxrhead
# Break the horizontal links
for x in range(0, numVXRs):
nvxroff = self._read_offset_value(f, vxroff + 12, 8)
self._update_offset_value(f, vxroff + 12, 8, 0)
vxroff = nvxroff
# Iterate this process if we're over NUM_VXRlvl_ENTRIES
if newNumVXRs > self.NUM_VXRlvl_ENTRIES:
return self._add_vxr_levels_r(f, newvxrhead, newNumVXRs)
else:
return newvxrhead, newvxroff
def _update_vdr_vxrheadtail(self, f: io.BufferedWriter, vdr_offset: int, VXRoffset: int) -> None:
"""
This sets a VXR to be the first and last VXR in the VDR
"""
# VDR's VXRhead
self._update_offset_value(f, vdr_offset + 28, 8, VXRoffset)
# VDR's VXRtail
self._update_offset_value(f, vdr_offset + 36, 8, VXRoffset)
def _get_recrange(self, f: io.BufferedWriter, VXRoffset: int) -> Tuple[int, int]:
"""
Finds the first and last record numbers pointed by the VXR
Assumes the VXRs are in order
"""
f.seek(VXRoffset + 20)
# Num entries
numEntries = int.from_bytes(f.read(4), "big", signed=True)
# used entries
usedEntries = int.from_bytes(f.read(4), "big", signed=True)
# VXR's First record
firstRec = int.from_bytes(f.read(4), "big", signed=True)
# VXR's Last record
f.seek(VXRoffset + 28 + (4 * numEntries + 4 * (usedEntries - 1)))
lastRec = int.from_bytes(f.read(4), "big", signed=True)
return firstRec, lastRec
@staticmethod
def _majority_token(major: str) -> int:
"""
Returns the numberical type for a CDF row/column major type
"""
majors = {"ROW_MAJOR": 1, "COLUMN_MAJOR": 2}
try:
return majors[major.upper()]
except Exception:
logger.warning(f"bad major.... {major}")
return 0
@staticmethod
def _encoding_token(encoding: str) -> int:
"""
Returns the numberical type for a CDF encoding type
"""
encodings = {
"NETWORK_ENCODING": 1,
"SUN_ENCODING": 2,
"VAX_ENCODING": 3,
"DECSTATION_ENCODING": 4,
"SGI_ENCODING": 5,
"IBMPC_ENCODING": 6,
"IBMRS_ENCODING": 7,
"HOST_ENCODING": 8,
"PPC_ENCODING": 9,
"HP_ENCODING": 11,
"NEXT_ENCODING": 12,
"ALPHAOSF1_ENCODING": 13,
"ALPHAVMSD_ENCODING": 14,
"ALPHAVMSG_ENCODING": 15,
"ALPHAVMSI_ENCODING": 16,
"ARM_LITTLE_ENCODING": 17,
"ARM_BIG_ENCODING": 18,
}
try:
return encodings[encoding.upper()]
except Exception:
logger.warning(f"bad encoding.... {encoding}")
return 0
@staticmethod
def _datatype_token(datatype: str) -> int:
"""
Returns the numberical type for a CDF data type
"""
datatypes = {
"CDF_INT1": 1,
"CDF_INT2": 2,
"CDF_INT4": 4,
"CDF_INT8": 8,
"CDF_UINT1": 11,
"CDF_UINT2": 12,
"CDF_UINT4": 14,
"CDF_REAL4": 21,
"CDF_REAL8": 22,
"CDF_EPOCH": 31,
"CDF_EPOCH16": 32,
"CDF_TIME_TT2000": 33,
"CDF_BYTE": 41,
"CDF_FLOAT": 44,
"CDF_DOUBLE": 45,
"CDF_CHAR": 51,
"CDF_UCHAR": 52,
}
try:
return datatypes[datatype.upper()]
except Exception:
return 0
def _datatype_define(self, value: Union[str, int, float, complex, np.ndarray]) -> Tuple[int, int]:
if isinstance(value, str):
return len(value), self.CDF_CHAR
else:
numElems = 1
if isinstance(value, int):
return numElems, self.CDF_INT8
elif isinstance(value, float):
return numElems, self.CDF_DOUBLE
elif isinstance(value, complex):
return numElems, self.CDF_EPOCH16
elif hasattr(value, "dtype"):
# We are likely dealing with a numpy number at this point
if value.dtype.type in (np.int8, np.int16, np.int32, np.int64):
return numElems, self.CDF_INT8
elif value.dtype.type == np.complex128:
return numElems, self.CDF_EPOCH16
elif value.dtype.type in (np.uint8, np.uint16, np.uint32):
return numElems, self.CDF_UINT4
elif value.dtype.type in (np.float16, np.float32, np.float64):
return numElems, self.CDF_DOUBLE
elif value.dtype.type == np.str_:
return numElems, self.CDF_CHAR
else:
logger.warning(f"Invalid data type for data {value.dtype.type}.... Skip")
return None, None
else:
logger.warning("Invalid data type for data.... Skip")
return None, None
@staticmethod
def _datatype_size(datatype: int, numElms: int) -> int:
"""
Gets datatype size
Parameters:
datatype : int
CDF variable data type
numElms : int
number of elements
Returns:
numBytes : int
The number of bytes for the data
"""
sizes = {1: 1, 2: 2, 4: 4, 8: 8, 11: 1, 12: 2, 14: 4, 21: 4, 22: 8, 31: 8, 32: 16, 33: 8, 41: 1, 44: 4, 45: 8, 51: 1, 52: 1}
try:
if isinstance(datatype, int):
if datatype == 51 or datatype == 52:
return numElms
else:
return sizes[datatype] * numElms
else:
datatype = datatype.upper()
if datatype == "CDF_INT1" or datatype == "CDF_UINT1" or datatype == "CDF_BYTE":
return 1 * numElms
elif datatype == "CDF_INT2" or datatype == "CDF_UINT2":
return 2 * numElms
elif datatype == "CDF_INT4" or datatype == "CDF_UINT4":
return 4 * numElms
elif datatype == "CDF_INT8" or datatype == "CDF_TIME_TT2000":
return 8 * numElms
elif datatype == "CDF_REAL4" or datatype == "CDF_FLOAT":
return 4 * numElms
elif datatype == "CDF_REAL8" or datatype == "CDF_DOUBLE" or datatype == "CDF_EPOCH":
return 8 * numElms
elif datatype == "CDF_EPOCH16":
return 16 * numElms
elif datatype == "CDF_CHAR" or datatype == "CDF_UCHAR":
return numElms
else:
return -1
except Exception:
return -1
@staticmethod
def _sparse_token(sparse: str) -> int:
"""
Returns the numerical CDF value for sparseness.
"""
sparses = {"no_sparse": 0, "pad_sparse": 1, "prev_sparse": 2}
try:
return sparses[sparse.lower()]
except Exception:
return 0
def _write_cdr(self, f: io.BufferedWriter, major: int, encoding: int, checksum: int) -> int:
f.seek(0, 2)
byte_loc = f.tell()
block_size = self.CDR_BASE_SIZE64 + self.CDF_COPYRIGHT_LEN
section_type = self.CDR_
gdr_loc = block_size + 8
version = self.version
release = self.release
flag = 0
if major == 1:
flag = self._set_bit(flag, 0)
flag = self._set_bit(flag, 1)
if checksum:
flag = self._set_bit(flag, 2)
flag = self._set_bit(flag, 3)
rfuA = 0
rfuB = 0
increment = self.increment
identifier = 2
rfuE = -1
copy_right = (
"\nCommon Data Format (CDF)\nhttps://cdf.gsfc.nasa.gov\n"
+ "Space Physics Data Facility\n"
+ "NASA/Goddard Space Flight Center\n"
+ "Greenbelt, Maryland 20771 USA\n"
+ "(User support: gsfc-cdf-support@lists.nasa.gov)\n"
)
cdr = bytearray(block_size)
cdr[0:8] = struct.pack(">q", block_size)
cdr[8:12] = struct.pack(">i", section_type)
cdr[12:20] = struct.pack(">q", gdr_loc)
cdr[20:24] = struct.pack(">i", version)
cdr[24:28] = struct.pack(">i", release)
cdr[28:32] = struct.pack(">i", encoding)
cdr[32:36] = struct.pack(">i", flag)
cdr[36:40] = struct.pack(">i", rfuA)
cdr[40:44] = struct.pack(">i", rfuB)
cdr[44:48] = struct.pack(">i", increment)
cdr[48:52] = struct.pack(">i", identifier)
cdr[52:56] = struct.pack(">i", rfuE)
tofill = self.CDF_COPYRIGHT_LEN - len(copy_right)
cdr[56:block_size] = (copy_right + "\0" * tofill).encode()
f.write(cdr)
return byte_loc
def _write_gdr(self, f: io.BufferedWriter) -> int:
f.seek(0, 2)
byte_loc = f.tell()
block_size = self.GDR_BASE_SIZE64 + 4 * self.num_rdim
section_type = self.GDR_
first_rvariable = 0
first_zvariable = 0
first_adr = 0
eof = byte_loc + block_size
num_rvariable = 0
num_att = 0
rMaxRec = -1
num_rdim = self.num_rdim
num_zvariable = 0
UIR_head = 0
rfuC = 0
leapsecondlastupdate = 20170101
rfuE = -1
gdr = bytearray(block_size)
gdr[0:8] = struct.pack(">q", block_size)
gdr[8:12] = struct.pack(">i", section_type)
gdr[12:20] = struct.pack(">q", first_rvariable)
gdr[20:28] = struct.pack(">q", first_zvariable)
gdr[28:36] = struct.pack(">q", first_adr)
gdr[36:44] = struct.pack(">q", eof)
gdr[44:48] = struct.pack(">i", num_rvariable)
gdr[48:52] = struct.pack(">i", num_att)
gdr[52:56] = struct.pack(">i", rMaxRec)
gdr[56:60] = struct.pack(">i", num_rdim)
gdr[60:64] = struct.pack(">i", num_zvariable)
gdr[64:72] = struct.pack(">q", UIR_head)
gdr[72:76] = struct.pack(">i", rfuC)
gdr[76:80] = struct.pack(">i", leapsecondlastupdate)
gdr[80:84] = struct.pack(">i", rfuE)
if num_rdim > 0:
for i in range(0, num_rdim):
gdr[84 + i * 4 : 84 + (i + 1) * 4] = struct.pack(">i", self.rdim_sizes[i])
f.write(gdr)
return byte_loc
def _write_adr(self, f: io.BufferedWriter, gORv: bool, name: str) -> Tuple[int, int]:
"""
Writes and ADR to the end of the file.
Additionally, it will update the offset values to either the previous ADR
or the ADRhead field in the GDR.
Parameters
----------
f : file
The open CDF file
gORv : bool
True if a global attribute, False if variable attribute
name : str
name of the attribute
Returns
-------
num : int
The attribute number
byte_loc : int
The current location in file f
"""
f.seek(0, 2)
byte_loc = f.tell()
block_size = self.ADR_BASE_SIZE64
section_type = self.ADR_
nextADR = 0
headAgrEDR = 0
if gORv:
scope = 1
else:
scope = 2
num = len(self.attrs)
ngrEntries = 0
maxgrEntry = -1
rfuA = 0
headAzEDR = 0
nzEntries = 0
maxzEntry = -1
rfuE = -1
adr = bytearray(block_size)
adr[0:8] = struct.pack(">q", block_size)
adr[8:12] = struct.pack(">i", section_type)
adr[12:20] = struct.pack(">q", nextADR)
adr[20:28] = struct.pack(">q", headAgrEDR)
adr[28:32] = struct.pack(">i", scope)
adr[32:36] = struct.pack(">i", num)
adr[36:40] = struct.pack(">i", ngrEntries)
adr[40:44] = struct.pack(">i", maxgrEntry)
adr[44:48] = struct.pack(">i", rfuA)
adr[48:56] = struct.pack(">q", headAzEDR)
adr[56:60] = struct.pack(">i", nzEntries)
adr[60:64] = struct.pack(">i", maxzEntry)
adr[64:68] = struct.pack(">i", rfuE)
tofill = 256 - len(name)
adr[68:324] = (name + "\0" * tofill).encode()
f.write(adr)
info = (name, scope, byte_loc)
self.attrsinfo[num] = info
if scope == 1:
self.gattrs.append(name)
else:
self.vattrs.append(name)
self.attrs.append(name)
if num > 0:
# ADR's ADRnext
self._update_offset_value(f, self.attrsinfo[num - 1][2] + 12, 8, byte_loc)
else:
# GDR's ADRhead
self._update_offset_value(f, self.gdr_head + 28, 8, byte_loc)
# GDR's NumAttr
self._update_offset_value(f, self.gdr_head + 48, 4, num + 1)
return num, byte_loc
def _write_aedr(
self,
f: io.BufferedWriter,
gORz: bool,
attrNum: int,
entryNum: int,
value: Union[Number, np.ndarray],
pdataType: int,
pnumElems: int,
zVar: bool,
) -> int:
"""
Writes an aedr into the end of the file.
Parameters
----------
f : file
The current open CDF file
gORz : bool
True if this entry is for a global or z variable, False if r variable
attrNum : int
Number of the attribute this aedr belongs to.
entryNum : int
Number of the entry
value :
The value of this entry
pdataType : int
The CDF data type of the value
pnumElems : int
Number of elements in the value.
zVar : bool
True if this entry belongs to a z variable
Returns
-------
byte_loc : int
This current location in the file after writing the aedr.
"""
f.seek(0, 2)
byte_loc = f.tell()
if gORz or zVar != True:
section_type = self.AgrEDR_
else:
section_type = self.AzEDR_
nextAEDR = 0
if pdataType is None:
# Figure out Data Type if not supplied
if not isinstance(value, Number):
avalue = value[0]
else:
avalue = value
if isinstance(avalue, int):
pdataType = self.CDF_INT8
elif isinstance(avalue, float):
pdataType = self.CDF_FLOAT
elif isinstance(avalue, complex):
pdataType = self.CDF_EPOCH16
else:
# assume a boolean
pdataType = self.CDF_INT1
if pnumElems is None:
# Figure out number of elements if not supplied
if isinstance(value, str):
pdataType = self.CDF_CHAR
pnumElems = 1 if len(value) == 0 else len(value)
else:
if hasattr(value, "__len__") and not isinstance(value, str):
pnumElems = len(value)
else:
pnumElems = 1
dataType = pdataType
numElems = pnumElems
rfuB = 0
rfuC = 0
rfuD = -1
rfuE = -1
if gORz:
numStrings = 0
else:
if isinstance(value, str):
numStrings = value.count("\\N ") + 1
else:
numStrings = 0
recs, cdata = self._convert_data(dataType, numElems, 1, value)
if dataType == 51:
numElems = 1 if len(cdata) == 0 else len(cdata)
if len(cdata) == 0:
value_size = 1
cdata = "\x00".encode()
else:
value_size = recs * self._datatype_size(dataType, numElems)
block_size = value_size + 56
aedr = bytearray(block_size)
aedr[0:8] = struct.pack(">q", block_size)
aedr[8:12] = struct.pack(">i", section_type)
aedr[12:20] = struct.pack(">q", nextAEDR)
aedr[20:24] = struct.pack(">i", attrNum)
aedr[24:28] = struct.pack(">i", dataType)
aedr[28:32] = struct.pack(">i", entryNum)
aedr[32:36] = struct.pack(">i", numElems)
aedr[36:40] = struct.pack(">i", numStrings)
aedr[40:44] = struct.pack(">i", rfuB)
aedr[44:48] = struct.pack(">i", rfuC)
aedr[48:52] = struct.pack(">i", rfuD)
aedr[52:56] = struct.pack(">i", rfuE)
aedr[56:block_size] = cdata
f.write(aedr)
return byte_loc
def _write_vdr(
self,
f: io.BufferedWriter,
cdataType: int,
numElems: int,
numDims: int,
dimSizes: List[int],
name: str,
dimVary: List[bool],
recVary: bool,
sparse: int,
blockingfactor: int,
compression: int,
pad: str,
zVar: bool,
) -> Tuple[int, int]:
"""
Writes a VDR block to the end of the file.
Parameters
----------
f : file
The open CDF file
cdataType : int
The CDF data type
numElems : int
The number of elements in the variable
numDims : int
The number of dimensions in the variable
dimSizes : int
The size of each dimension
name : str
The name of the variable
dimVary : array of bool
Bool array of size numDims.
True if a dimension is physical, False if a dimension is not physical
recVary : bool
True if each record is unique
sparse : bool
True if using sparse records
blockingfactor: int
No idea
compression : int
The level of compression between 0-9
pad : num
The pad values to insert
zVar : bool
True if this variable is a z variable
Returns
-------
num : int
The number of the variable
byte_loc : int
The current byte location within the file
"""
if zVar:
block_size = self.zVDR_BASE_SIZE64
section_type = self.zVDR_
else:
block_size = self.rVDR_BASE_SIZE64
section_type = self.rVDR_
nextVDR = 0
dataType = cdataType
if dataType == -1:
raise ValueError("Bad data type.")
maxRec = -1
headVXR = 0
tailVXR = 0
flags = 0
if recVary:
flags = self._set_bit(flags, 0)
flags = self._set_bit(flags, 1)
sRecords = sparse
rfuB = 0
rfuC = -1
rfuF = -1
if zVar:
num = len(self.zvars)
else:
num = len(self.rvars)
if compression > 0:
offsetCPRorSPR = self._write_cpr(f, self.GZIP_COMPRESSION, compression)
flags = self._set_bit(flags, 2)
else:
offsetCPRorSPR = -1
if blockingfactor is None:
blockingFactor = 1
else:
blockingFactor = blockingfactor
# Increase the block size to account for "zDimSizes" and "DimVarys" fields
if numDims > 0:
if zVar:
block_size = block_size + numDims * 8
else:
block_size = block_size + numDims * 4
# Determine pad value
if pad is not None:
if dataType == 51 or dataType == 52:
# pad needs to be the correct number of elements
if len(pad) < numElems:
pad += "\0" * (numElems - len(pad))
elif len(pad) > numElems:
pad = pad[:numElems]
pad_bytes = pad.encode()
else:
dummy, pad_bytes = self._convert_data(dataType, numElems, 1, pad)
else:
pad_bytes = self._default_pad(dataType, numElems)
f.seek(0, 2)
byte_loc = f.tell()
block_size += len(pad_bytes)
vdr = bytearray(block_size)
# if (dataType == 51):
# numElems = len(pad)
vdr[0:8] = struct.pack(">q", block_size)
vdr[8:12] = struct.pack(">i", section_type)
vdr[12:20] = struct.pack(">q", nextVDR)
vdr[20:24] = struct.pack(">i", dataType)
vdr[24:28] = struct.pack(">i", maxRec)
vdr[28:36] = struct.pack(">q", headVXR)
vdr[36:44] = struct.pack(">q", tailVXR)
vdr[44:48] = struct.pack(">i", flags)
vdr[48:52] = struct.pack(">i", sRecords)
vdr[52:56] = struct.pack(">i", rfuB)
vdr[56:60] = struct.pack(">i", rfuC)
vdr[60:64] = struct.pack(">i", rfuF)
vdr[64:68] = struct.pack(">i", numElems)
vdr[68:72] = struct.pack(">i", num)
vdr[72:80] = struct.pack(">q", offsetCPRorSPR)
vdr[80:84] = struct.pack(">i", blockingFactor)
tofill = 256 - len(name)
vdr[84:340] = (name + "\0" * tofill).encode()
if zVar:
vdr[340:344] = struct.pack(">i", numDims)
if numDims > 0:
for i in range(0, numDims):
vdr[344 + i * 4 : 344 + (i + 1) * 4] = struct.pack(">i", dimSizes[i])
ist = 344 + numDims * 4
for i in range(0, numDims):
vdr[ist + i * 4 : ist + (i + 1) * 4] = struct.pack(">i", self.VARY)
ist = 344 + 8 * numDims
else:
if numDims > 0:
for i in range(0, numDims):
if dimVary[i] or dimVary[i] != 0:
vdr[340 + i * 4 : 344 + i * 4] = struct.pack(">i", self.VARY)
else:
vdr[340 + i * 4 : 344 + i * 4] = struct.pack(">i", self.NOVARY)
ist = 340 + 4 * numDims
vdr[ist:block_size] = pad_bytes
f.write(vdr)
# Set variable info
if not zVar:
numDims = self.num_rdim
dimSizes = self.rdim_sizes
info = (name, byte_loc, numDims, dimSizes, dimVary)
# Update the pointers from the CDR/previous VDR
if zVar:
self.zvarsinfo[num] = info
self.zvars.append(name)
if num > 0:
# VDR's VDRnext
self._update_offset_value(f, self.zvarsinfo[num - 1][1] + 12, 8, byte_loc)
# GDR's NzVars
self._update_offset_value(f, self.gdr_head + 60, 4, num + 1)
else:
self.rvarsinfo[num] = info
self.rvars.append(name)
if num > 0:
# VDR's VDRnext
self._update_offset_value(f, self.rvarsinfo[num - 1][1] + 12, 8, byte_loc)
# GDR's NrVars
self._update_offset_value(f, self.gdr_head + 44, 4, num + 1)
return num, byte_loc
def _write_vxr(self, f: io.BufferedWriter, numEntries: Optional[int] = None) -> int:
"""
Creates a VXR at the end of the file.
Returns byte location of the VXR
The First, Last, and Offset fields will need to be filled in later
"""
f.seek(0, 2)
byte_loc = f.tell()
section_type = self.VXR_
nextVXR = 0
if numEntries is None:
nEntries = self.NUM_VXR_ENTRIES
else:
nEntries = int(numEntries)
block_size = self.VXR_BASE_SIZE64 + (4 + 4 + 8) * nEntries
nUsedEntries = 0
firsts = [-1] * nEntries
lasts = [-1] * nEntries
offsets = [-1] * nEntries
vxr = bytearray(block_size)
vxr[0:8] = struct.pack(">q", block_size)
vxr[8:12] = struct.pack(">i", section_type)
vxr[12:20] = struct.pack(">q", nextVXR)
vxr[20:24] = struct.pack(">i", nEntries)
vxr[24:28] = struct.pack(">i", nUsedEntries)
estart = 28 + 4 * nEntries
vxr[28:estart] = struct.pack(">%si" % nEntries, *firsts)
eend = estart + 4 * nEntries
vxr[estart:eend] = struct.pack(">%si" % nEntries, *lasts)
vxr[eend:block_size] = struct.pack(">%sq" % nEntries, *offsets)
f.write(vxr)
return byte_loc
def _write_vvr(self, f: io.BufferedWriter, data: bytes) -> int:
"""
Writes a vvr to the end of file "f" with the byte stream "data".
"""
f.seek(0, 2)
byte_loc = f.tell()
block_size = self.VVR_BASE_SIZE64 + len(data)
section_type = self.VVR_
vvr1 = bytearray(12)
vvr1[0:8] = struct.pack(">q", block_size)
vvr1[8:12] = struct.pack(">i", section_type)
f.write(vvr1)
f.write(data)
return byte_loc
def _write_cpr(self, f: io.BufferedWriter, cType: int, parameter: int) -> int:
"""
Write compression info to the end of the file in a CPR.
"""
f.seek(0, 2)
byte_loc = f.tell()
block_size = self.CPR_BASE_SIZE64 + 4
section_type = self.CPR_
rfuA = 0
pCount = 1
cpr = bytearray(block_size)
cpr[0:8] = struct.pack(">q", block_size)
cpr[8:12] = struct.pack(">i", section_type)
cpr[12:16] = struct.pack(">i", cType)
cpr[16:20] = struct.pack(">i", rfuA)
cpr[20:24] = struct.pack(">i", pCount)
cpr[24:28] = struct.pack(">i", parameter)
f.write(cpr)
return byte_loc
def _write_cvvr(self, f: io.BufferedWriter, data: Any) -> int:
"""
Write compressed "data" variable to the end of the file in a CVVR
"""
f.seek(0, 2)
byte_loc = f.tell()
cSize = len(data)
block_size = self.CVVR_BASE_SIZE64 + cSize
section_type = self.CVVR_
rfuA = 0
cvvr1 = bytearray(24)
cvvr1[0:8] = struct.pack(">q", block_size)
cvvr1[8:12] = struct.pack(">i", section_type)
cvvr1[12:16] = struct.pack(">i", rfuA)
cvvr1[16:24] = struct.pack(">q", cSize)
f.write(cvvr1)
f.write(data)
return byte_loc
def _write_ccr(self, f: io.BufferedWriter, g: io.BufferedWriter, level: int) -> None:
"""
Write a CCR to file "g" from file "f" with level "level".
Currently, only handles gzip compression.
Parameters:
f : file
Uncompressed file to read from
g : file
File to read the compressed file into
level : int
The level of the compression from 0 to 9
Returns: None
"""
f.seek(8)
data = f.read()
uSize = len(data)
section_type = self.CCR_
rfuA = 0
cData = gzip_deflate(data, level)
block_size = self.CCR_BASE_SIZE64 + len(cData)
cprOffset = 0
ccr1 = bytearray(32)
# ccr1[0:4] = binascii.unhexlify(CDF.V3magicNUMBER_1)
# ccr1[4:8] = binascii.unhexlify(CDF.V3magicNUMBER_2c)
ccr1[0:8] = struct.pack(">q", block_size)
ccr1[8:12] = struct.pack(">i", section_type)
ccr1[12:20] = struct.pack(">q", cprOffset)
ccr1[20:28] = struct.pack(">q", uSize)
ccr1[28:32] = struct.pack(">i", rfuA)
g.seek(0, 2)
g.write(ccr1)
g.write(cData)
cprOffset = self._write_cpr(g, self.GZIP_COMPRESSION, level)
self._update_offset_value(g, 20, 8, cprOffset)
def _convert_option(self) -> str:
"""
Determines which symbol to use for numpy conversions
> : a little endian system to big endian ordering
< : a big endian system to little endian ordering
= : No conversion
"""
data_endian = "little"
if (
self._encoding == 1
or self._encoding == 2
or self._encoding == 5
or self._encoding == 7
or self._encoding == 9
or self._encoding == 11
or self._encoding == 12
or self._encoding == 18
):
data_endian = "big"
if sys.byteorder == "little" and data_endian == "big":
# big->little
order = ">"
elif sys.byteorder == "big" and data_endian == "little":
# little->big
order = "<"
else:
# no conversion
order = "="
return order
@staticmethod
def _convert_type(data_type: int) -> str:
"""
Converts CDF data types into python types
"""
if data_type in (1, 41):
dt_string = "b"
elif data_type == 2:
dt_string = "h"
elif data_type == 4:
dt_string = "i"
elif data_type in (8, 33):
dt_string = "q"
elif data_type == 11:
dt_string = "B"
elif data_type == 12:
dt_string = "H"
elif data_type == 14:
dt_string = "I"
elif data_type in (21, 44):
dt_string = "f"
elif data_type in (22, 45, 31):
dt_string = "d"
elif data_type == 32:
dt_string = "d"
elif data_type in (51, 52):
dt_string = "s"
else:
dt_string = ""
return dt_string
@staticmethod
def _convert_nptype(data_type: int, data: Any) -> bytes:
"""
Converts "data" of CDF type "data_type" into a numpy array
"""
if data_type in (1, 41):
return np.int8(data).tobytes()
elif data_type == 2:
return np.int16(data).tobytes()
elif data_type == 4:
return np.int32(data).tobytes()
elif (data_type == 8) or (data_type == 33):
return np.int64(data).tobytes()
elif data_type == 11:
return np.uint8(data).tobytes()
elif data_type == 12:
return np.uint16(data).tobytes()
elif data_type == 14:
return np.uint32(data).tobytes()
elif (data_type == 21) or (data_type == 44):
return np.float32(data).tobytes()
elif (data_type == 22) or (data_type == 45) or (data_type == 31):
return np.float64(data).tobytes()
elif data_type == 32:
return np.complex128(data).tobytes()
elif ((data_type) == 51) or ((data_type) == 52):
utf8_bytes = np.asarray(data).astype("U").tobytes()
return utf8_bytes.decode().replace("\x00", "").encode("ASCII")
else:
return data
def _default_pad(self, data_type: int, numElems: int) -> bytes:
"""
Determines the default pad data for a "data_type"
"""
order = self._convert_option()
if (data_type == 1) or (data_type == 41):
pad_value = struct.pack(order + "b", -127)
elif data_type == 2:
pad_value = struct.pack(order + "h", -32767)
elif data_type == 4:
pad_value = struct.pack(order + "i", -2147483647)
elif (data_type == 8) or (data_type == 33):
pad_value = struct.pack(order + "q", -9223372036854775807)
elif data_type == 11:
pad_value = struct.pack(order + "B", 254)
elif data_type == 12:
pad_value = struct.pack(order + "H", 65534)
elif data_type == 14:
pad_value = struct.pack(order + "I", 4294967294)
elif (data_type == 21) or (data_type == 44):
pad_value = struct.pack(order + "f", -1.0e30)
elif (data_type == 22) or (data_type == 45):
pad_value = struct.pack(order + "d", -1.0e30)
elif data_type == 31:
pad_value = struct.pack(order + "d", 0.0)
elif data_type == 32:
pad_value = struct.pack(order + "2d", *[0.0, 0.0])
elif (data_type == 51) or (data_type == 52):
tmpPad = str(" " * numElems).encode()
form = str(numElems)
pad_value = struct.pack(form + "b", *tmpPad)
return pad_value
def _numpy_to_bytes(self, data_type: int, num_values: int, num_elems: int, indata: npt.NDArray) -> Tuple[int, bytes]:
"""
Converts a numpy array of numbers into a byte stream
Parameters
----------
data_type : int
The CDF file data type
num_values : int
The number of values in each record
num_elems: int
The number of elements in each value
indata : (varies)
The data to be converted
Returns
-------
recs : int
The number of records generated by converting indata
odata : byte stream
The stream of bytes to write to the CDF file
"""
tofrom = self._convert_option()
npdata = self._convert_nptype(data_type, indata)
if indata.size == 0: # Check if the data being read in is zero size
recs = 0
elif indata.size == num_values * num_elems: # Check if only one record is being read in
recs = 1
else:
recs = len(indata)
dt_string = self._convert_type(data_type)
if data_type == self.CDF_EPOCH16:
num_elems = 2 * num_elems
form = str(recs * num_values * num_elems) + dt_string
form2 = tofrom + str(recs * num_values * num_elems) + dt_string
datau = struct.unpack(form, npdata)
return recs, struct.pack(form2, *datau)
def _convert_data(self, data_type: int, num_elems: int, num_values: int, indata: Any) -> Tuple[int, bytes]:
"""
Converts "indata" into a byte stream
Parameters
----------
data_type : int
The CDF file data type
num_elems : int
The number of elements in the data
num_values : int
The number of values in each record
indata : (varies)
The data to be converted
Returns
-------
recs : int
The number of records generated by converting indata
odata : byte stream
The stream of bytes to write to the CDF file
"""
recSize = self._datatype_size(data_type, num_elems) * num_values
# List or Tuple data
if isinstance(indata, list) or isinstance(indata, tuple):
if (num_values != 1) and (len(indata) != num_values):
raise Exception("Use numpy for inputting multidimensional arrays")
size = len(indata)
if data_type == self.CDF_CHAR or data_type == self.CDF_UCHAR:
odata = ""
for x in range(0, size):
adata = indata[x]
if isinstance(adata, list) or isinstance(adata, tuple):
size2 = len(adata)
for y in range(0, size2):
odata += adata[y].ljust(num_elems, "\x00")
else:
size2 = 1
odata += adata.ljust(num_elems, "\x00")
recs = int((size * size2) / num_values)
return recs, odata.encode()
elif all(isinstance(item, str) for item in indata):
# Attempt to convert to a numpy array of numbers
try:
return self._numpy_to_bytes(data_type, num_values, num_elems, np.array([float(item) for item in indata]))
except:
# Do the best we can, create bytes from the string.
# It will probably come out to be jibberish
outdata = ("".join(indata)).ljust(num_elems, "\x00").encode()
recs = int(len(outdata) / recSize)
return recs, outdata
else:
try:
return self._numpy_to_bytes(data_type, num_values, num_elems, np.array(indata))
except:
tofrom = self._convert_option()
dt_string = self._convert_type(data_type)
recs = int(size / num_values)
if data_type == self.CDF_EPOCH16 and isinstance(indata[0], complex):
complex_data = []
for x in range(0, recs):
complex_data.append(indata[x].real)
complex_data.append(indata[x].imag)
size = 2 * size
indata = complex_data
if data_type == self.CDF_EPOCH16 and not isinstance(indata[0], complex):
recs = int(recs / 2)
form = tofrom + str(size) + dt_string
return recs, struct.pack(form, *indata)
elif isinstance(indata, bytes):
tofrom = self._convert_option()
recs = int(len(indata) / recSize)
dt_string = self._convert_type(data_type)
size = recs * num_values * num_elems
if data_type == self.CDF_EPOCH16:
size = size * 2
form = str(size) + dt_string
form2 = tofrom + form
datau = struct.unpack(form, indata)
return recs, struct.pack(form2, *datau)
elif isinstance(indata, np.ndarray):
if data_type == self.CDF_CHAR or data_type == self.CDF_UCHAR:
size = len(np.atleast_1d(indata))
odata = ""
if size >= 1:
for x in range(0, size):
if indata.ndim > 0:
adata = indata[x]
else:
adata = indata
if isinstance(adata, list) or isinstance(adata, tuple):
size2 = len(adata)
for y in range(0, size2):
odata += str(adata[y]).ljust(num_elems, "\x00")
elif isinstance(adata, np.ndarray):
size2 = adata.size
for y in range(0, size2):
if adata.ndim > 0:
bdata = adata[y]
else:
bdata = adata
odata += str(bdata).ljust(num_elems, "\x00")
else:
size2 = 1
odata += str(adata).ljust(num_elems, "\x00")
else:
adata = ""
size2 = 1
odata += str(adata).ljust(num_elems, "\x00")
recs = int((size * size2) / num_values)
return recs, odata.encode()
else:
return self._numpy_to_bytes(data_type, num_values, num_elems, indata)
elif isinstance(indata, str) and (data_type == self.CDF_CHAR or data_type == self.CDF_UCHAR):
# Just convert the string directly to bytes
return 1, indata.ljust(num_elems, "\x00").encode()
elif isinstance(indata, str) and data_type != self.CDF_CHAR and data_type == self.CDF_UCHAR:
# Try to convert the single string to a numerical type.
try:
return self._numpy_to_bytes(data_type, num_values, num_elems, np.array([float(indata)]))
except:
# Do the best we can, create bytes from the string.
# It will probably come out to be jibberish
outdata = indata.ljust(num_elems, "\x00").encode()
recs = int(len(outdata) / recSize)
return recs, outdata
else:
try:
# Try converting the data to numpy
return self._numpy_to_bytes(data_type, num_values, num_elems, np.array(indata))
except:
tofrom = self._convert_option()
dt_string = self._convert_type(data_type)
if data_type == self.CDF_EPOCH16:
num_elems = 2 * num_elems
try:
recs = int(len(indata) / recSize)
except Exception:
recs = 1
if data_type == self.CDF_EPOCH16:
complex_data = []
if recs > 1:
for x in range(0, recs):
complex_data.append(indata[x].real)
complex_data.append(indata[x].imag)
else:
complex_data.append(indata.real)
complex_data.append(indata.imag)
indata = complex_data
form = tofrom + str(recs * num_values * num_elems) + dt_string
try:
if recs * num_values * num_elems > 1:
return recs, struct.pack(form, *indata)
else:
return recs, struct.pack(form, indata)
except struct.error:
raise ValueError("Unable to convert data to CDF format, data object cannot be of type string.")
def _num_values(self, zVar: bool, varNum: int) -> int:
"""
Determines the number of values in a record.
Set zVar=True if this is a zvariable.
"""
values = 1
if zVar:
numDims = self.zvarsinfo[varNum][2]
dimSizes = self.zvarsinfo[varNum][3]
dimVary = self.zvarsinfo[varNum][4]
else:
numDims = self.rvarsinfo[varNum][2]
dimSizes = self.rvarsinfo[varNum][3]
dimVary = self.rvarsinfo[varNum][4]
if numDims < 1:
return values
else:
for x in range(0, numDims):
if zVar:
values = values * dimSizes[x]
else:
if dimVary[x] != 0:
values = values * dimSizes[x]
return values
def _read_offset_value(self, f: io.BufferedWriter, offset: int, size: int) -> int:
"""
Reads an integer value from file "f" at location "offset".
"""
f.seek(offset, 0)
if size == 8:
return int.from_bytes(f.read(8), "big", signed=True)
else:
return int.from_bytes(f.read(4), "big", signed=True)
def _update_offset_value(self, f: io.BufferedWriter, offset: int, size: int, value: Any) -> None:
"""
Writes "value" into location "offset" in file "f".
"""
f.seek(offset, 0)
if size == 8:
f.write(struct.pack(">q", value))
else:
f.write(struct.pack(">i", value))
def _update_aedr_link(self, f: io.BufferedWriter, attrNum: int, zVar: bool, varNum: int, offset: int) -> None:
"""
Updates variable aedr links
Parameters
----------
f : file
The open CDF file
attrNum : int
The number of the attribute to change
zVar : bool
True if we are updating a z variable attribute
varNum : int
The variable number associated with this aedr
offset : int
The offset in the file to the AEDR
"""
# The offset to this AEDR's ADR
adr_offset = self.attrsinfo[attrNum][2]
# Get the number of entries
if zVar:
f.seek(adr_offset + 56, 0)
# ADR's NzEntries
entries = int.from_bytes(f.read(4), "big", signed=True)
# ADR's MAXzEntry
maxEntry = int.from_bytes(f.read(4), "big", signed=True)
else:
f.seek(adr_offset + 36, 0)
# ADR's NgrEntries
entries = int.from_bytes(f.read(4), "big", signed=True)
# ADR's MAXgrEntry
maxEntry = int.from_bytes(f.read(4), "big", signed=True)
if entries == 0:
# If this is the first entry, update the ADR to reflect
if zVar:
# AzEDRhead
self._update_offset_value(f, adr_offset + 48, 8, offset)
# NzEntries
self._update_offset_value(f, adr_offset + 56, 4, 1)
# MaxzEntry
self._update_offset_value(f, adr_offset + 60, 4, varNum)
else:
# AgrEDRhead
self._update_offset_value(f, adr_offset + 20, 8, offset)
# NgrEntries
self._update_offset_value(f, adr_offset + 36, 4, 1)
# MaxgrEntry
self._update_offset_value(f, adr_offset + 40, 4, varNum)
else:
if zVar:
f.seek(adr_offset + 48, 0)
head = int.from_bytes(f.read(8), "big", signed=True)
else:
f.seek(adr_offset + 20, 0)
head = int.from_bytes(f.read(8), "big", signed=True)
aedr = head
previous_aedr = head
done = False
# For each entry, re-adjust file offsets if needed
for _ in range(0, entries):
f.seek(aedr + 28, 0)
# Get variable number for entry
num = int.from_bytes(f.read(4), "big", signed=True)
if num > varNum:
# insert an aedr to the chain
# AEDRnext
self._update_offset_value(f, previous_aedr + 12, 8, offset)
# AEDRnext
self._update_offset_value(f, offset + 12, 8, aedr)
done = True
break
else:
# move to the next aedr in chain
f.seek(aedr + 12, 0)
previous_aedr = aedr
aedr = int.from_bytes(f.read(8), "big", signed=True)
# If no link was made, update the last found aedr
if not done:
self._update_offset_value(f, previous_aedr + 12, 8, offset)
if zVar:
self._update_offset_value(f, adr_offset + 56, 4, entries + 1)
if maxEntry < varNum:
self._update_offset_value(f, adr_offset + 60, 4, varNum)
else:
self._update_offset_value(f, adr_offset + 36, 4, entries + 1)
if maxEntry < varNum:
self._update_offset_value(f, adr_offset + 40, 4, varNum)
@staticmethod
def _set_bit(value: int, bit: int) -> int:
return value | (1 << bit)
@staticmethod
def _clear_bit(value: int, bit: int) -> int:
return value & ~(1 << bit)
@staticmethod
def _checklistofstrs(obj: Any) -> bool:
return bool(obj) and all(isinstance(elem, str) for elem in obj)
@staticmethod
def _checklistofNums(obj: Any) -> bool:
"""
This method checks if a list is ready to be immediately converted to binary format,
or if any pre-processing needs to occur. Numbers and datetime64 objects can be immediately converted.
"""
if hasattr(obj, "__len__"):
return all((isinstance(elem, numbers.Number) or isinstance(elem, np.datetime64)) for elem in obj)
else:
return isinstance(obj, numbers.Number) or isinstance(obj, np.datetime64)
def _md5_compute(self, f: io.BufferedWriter) -> bytes:
"""
Computes the checksum of the file
"""
md5 = hashlib.md5()
block_size = 16384
f.seek(0, 2)
remaining = f.tell()
f.seek(0)
while remaining > block_size:
data = f.read(block_size)
remaining = remaining - block_size
md5.update(data)
if remaining > 0:
data = f.read(remaining)
md5.update(data)
return md5.digest()
@staticmethod
def _make_blocks(records) -> List[Tuple[int, int]]: # type: ignore[no-untyped-def]
"""
Organizes the physical records into blocks in a list by
placing consecutive physical records into a single block, so
lesser VXRs will be created.
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Parameters
----------
records: list
A list of records that there is data for
Returns
-------
sparse_blocks: list of list
A list of ranges we have physical values for.
Example:
Input: [1,2,3,4,10,11,12,13,50,51,52,53]
Output: [[1,4],[10,13],[50,53]]
"""
sparse_blocks = []
total = len(records)
if total == 0:
return []
x = 0
while x < total:
recstart = records[x]
y = x
recnum = recstart
# Find the location in the records before the next gap
# Call this value "y"
while (y + 1) < total:
y = y + 1
nextnum = records[y]
diff = nextnum - recnum
if diff == 1:
recnum = nextnum
else:
y = y - 1
break
if (y + 1) == total:
recend = records[total - 1]
else:
recend = records[y]
x = y + 1
sparse_blocks.append((recstart, recend))
return sparse_blocks
def _make_sparse_blocks(self, variable, records, data: List[Tuple[int, int, np.ndarray]]): # type: ignore[no-untyped-def]
"""
Handles the data for the variable with sparse records.
Organizes the physical record numbers into blocks in a list:
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Place consecutive physical records into a single block
If all records are physical, this calls _make_sparse_blocks_with_physical
If any records are virtual, this calls _make_sparse_blocks_with_virtual
Parameters:
variable : dict
the variable dictionary, with 'Num_Dims', 'Dim_Sizes',
'Data_Type', 'Num_Elements' key words, typically
returned from a call to cdf read's varinq('variable',
expand=True)
records : list
a list of physical records
data : varies
bytes array, numpy.ndarray or list of str form with all physical
data or embedded virtual data (returned from call to
varget('variable') for a sparse variable)
Returns:
sparse_blocks: list
A list of sparse records/data in the form
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
"""
if isinstance(data, dict):
try:
data = data["Data"]
except Exception:
logger.warning("Unknown dictionary.... Skip")
return None
if isinstance(data, np.ndarray):
if len(records) == len(data):
# All are physical data
return self._make_sparse_blocks_with_physical(variable, records, data)
elif len(records) < len(data):
# There are some virtual data
return self._make_sparse_blocks_with_virtual(variable, records, data)
else:
logger.warning("Invalid sparse data... " "Less data than the specified records... Skip")
elif isinstance(data, bytes):
record_length = len(records)
for z in range(0, variable["Num_Dims"]):
record_length = record_length * variable["Dim_Sizes"][z]
if record_length == len(data):
# All are physical data
return self._make_sparse_blocks_with_physical(variable, records, data)
elif record_length < len(data):
# There are some virtual data
return self._make_sparse_blocks_with_virtual(variable, records, data)
else:
logger.warning("Invalid sparse data... " "Less data than the specified records... Skip")
elif isinstance(data, list):
if isinstance(data[0], list):
if not (all(isinstance(el, str) for el in data[0])):
raise RuntimeError("Can not handle list data.... ", "Only support list of str...")
else:
if not (all(isinstance(el, str) for el in data)):
raise RuntimeError("Can not handle list data.... ", "Only support list of str...")
record_length = len(records)
# for z in range(0, variable['Num_Dims']):
# record_length = record_length * variable['Dim_Sizes'][z]
if record_length == len(data):
# All are physical data
return self._make_sparse_blocks_with_physical(variable, records, data)
elif record_length < len(data):
# There are some virtual data
return self._make_sparse_blocks_with_virtual(variable, records, data)
else:
logger.warning("Invalid sparse data... ", "Less data than the specified records... Skip")
else:
logger.warning("Invalid sparse data... ", "Less data than the specified records... Skip")
return
def _make_sparse_blocks_with_virtual(self, variable, records, data) -> List[Tuple[int, int, np.ndarray]]: # type: ignore[no-untyped-def]
"""
Handles the data for the variable with sparse records.
Organizes the physical record numbers into blocks in a list:
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Place consecutive physical records into a single block
Parameters:
variable: dict
the variable, returned from varinq('variable', expand=True)
records: list
a list of physical records
data: varies
bytes array, numpy.ndarray or list of str form with vitual data
embedded, returned from varget('variable') call
"""
# Gather the ranges for which we have physical data
sparse_blocks = self._make_blocks(records)
sparse_data = []
if isinstance(data, np.ndarray):
for sblock in sparse_blocks:
# each block in this list: [starting_rec#, ending_rec#, data]
starting = sblock[0]
ending = sblock[1] + 1
sparse_data.append((sblock[0], sblock[1], data[starting:ending]))
return sparse_data
elif isinstance(data, bytes):
y = 1
for z in range(0, variable["Num_Dims"]):
y = y * variable["Dim_Sizes"][z]
y = y * self._datatype_size(variable["Data_Type"], variable["Num_Elements"])
for x in sparse_blocks:
# each block in this list: [starting_rec#, ending_rec#, data]
starting = sblock[0] * y
ending = (sblock[1] + 1) * y
sparse_data.append((sblock[0], sblock[1], np.array(data[starting:ending])))
return sparse_data
elif isinstance(data, list):
for x in sparse_blocks:
# each block in this list: [starting_rec#, ending_rec#, data]
asparse = []
asparse.append(sblock[0])
asparse.append(sblock[1])
records = x[1] - x[0] + 1
datax = []
ist = sblock[0]
for z in range(0, records):
datax.append(data[ist + z])
sparse_data.append((sblock[0], sblock[1], np.array(datax)))
return sparse_data
else:
logger.warning("Can not handle data... Skip")
return None
def _make_sparse_blocks_with_physical(self, variable, records, data) -> List[Tuple[int, int, np.ndarray]]: # type: ignore[no-untyped-def]
# All records are physical... just a single block
# [[0,end_rec,data]]
# Determine if z variable
if variable["Var_Type"].lower() == "zvariable":
zVar = True
else:
zVar = False
# Determine dimension information
numDims = len(variable["Dim_Sizes"])
if zVar:
numValues = 1
for x in range(0, numDims):
numValues = numValues * variable["Dim_Sizes"][x]
else:
for x in range(0, numDims):
if variable["Dim_Vary"][x] != 0:
numValues = numValues * variable["Dim_Sizes"][x]
# Determine blocks
sparse_blocks = self._make_blocks(records)
# Create a list in the form of [[0,100, [data]], ...]
sparse_data = []
recStart = 0
for sblock in sparse_blocks:
recs = sblock
totalRecs = recs[1] - recs[0] + 1
recEnd = recStart + totalRecs
sparse_data.append((recs[0], recs[1], data[recStart:recEnd]))
recStart = recStart + totalRecs
return sparse_data
|
class CDF:
'''
Creates an empty CDF file.
Parameters
----------
path :
The path name of the CDF (with or without .cdf extension)
cdf_spec : dict
The optional specification of the CDF file.
The keys for the dictionary are:
- ['Majority']: 'row_major' or 'column_major', or its
corresponding value. The default is 'column_major'.
- ['Encoding']: Data encoding scheme. See the CDF
documentation about the valid values.
Can be in string or its numeric corresponding value.
The default is 'host', which will be determined when
the script runs.
- ['Checksum']: Whether to set the data validation upon
file creation. The default is False.
- ['rDim_sizes']: The dimensional sizes, applicable
only to rVariables.
- ['Compressed']: Whether to compress the CDF at the file
level. A value of 0-9 or True/False, the
default is 0/False.
'''
def __init__(self, path: Union[str, Path], cdf_spec: Optional[Dict[str, Any]] = None, delete: bool = False):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def close(self) -> None:
'''
Closes the CDF Class.
1. If compression was set, this is where the compressed file is
written.
2. If a checksum is needed, this will place the checksum at the end
of the file.
'''
pass
@is_open
def write_globalattrs(self, globalAttrs):
'''
Writes the global attributes.
Parameters
----------
globalAttrs: dict
Global attribute name(s) and their value(s) pair(s).
The value(s) is a dictionary of entry number and value pair(s).
For example::
globalAttrs={}
globalAttrs['Global1']={0: 'Global Value 1'}
globalAttrs['Global2']={0: 'Global Value 2'}
For a non-string value, use a list with the value and its
CDF data type. For example::
globalAttrs['Global3']={0: [12, 'cdf_int4']}
globalAttrs['Global4']={0: [12.34, 'cdf_double']}
If the data type is not provided, a corresponding
CDF data type is assumed::
globalAttrs['Global3']={0: 12} as 'cdf_int4'
globalAttrs['Global4']={0: 12.34} as 'cdf_double'
CDF allows multi-values for non-string data for an attribute::
globalAttrs['Global5']={0: [[12.34,21.43], 'cdf_double']}
For multi-entries from a global variable, they should be
presented in this form::
GA6={}
GA6[0]='abcd'
GA6[1]=[12, 'cdf_int2']
GA6[2]=[12.5, 'cdf_float']
GA6[3]=[[0,1,2], 'cdf_int8']
globalAttrs['Global6']=GA6
....
f.write_globalattrs(globalAttrs)
'''
pass
@is_open
def write_variableattrs(self, variableAttrs):
'''
Writes a variable's attributes, provided the variable already exists.
Parameters
----------
variableAttrs : dict
Variable attribute name and its entry value pair(s).
The entry value is also a dictionary of variable id and value
pair(s). Variable id can be the variable name or its id number
in the file. Use write_var function if the variable does not exist.
For example::
variableAttrs={}
entries_1={}
entries_1['var_name_1'] = 'abcd'
entries_1['var_name_2'] = [12, 'cdf_int4']
....
variableAttrs['attr_name_1']=entries_1
entries_2={}
entries_2['var_name_1'] = 'xyz'
entries_2['var_name_2'] = [[12, 34], 'cdf_int4']
....
variableAttrs['attr_name_2']=entries_2
....
....
f.write_variableattrs(variableAttrs)
'''
pass
@is_open
def write_variableattrs(self, variableAttrs):
'''
Writes a variable, along with variable attributes and data.
Parameters
----------
var_spec : dict
The specifications of the variable.
The required/optional keys for creating a variable:
Required keys:
- ['Variable']: The name of the variable
- ['Data_Type']: the CDF data type
- ['Num_Elements']: The number of elements. Always 1 the
for numeric type. The char length for string type.
- ['Rec_Vary']: Record variance
For zVariables:
- ['Dim_Sizes']: The dimensional sizes for zVariables only.
Use [] for 0-dimension. Each and
every dimension is varying for zVariables.
For rVariables:
- ['Dim_Vary']: The dimensional variances for rVariables only.
Optional keys:
- ['Var_Type']: Whether the variable is a zVariable or
rVariable. Valid values: "zVariable" and
"rVariable". The default is "zVariable".
- ['Sparse']: Whether the variable has sparse records.
Valid values are "no_sparse", "pad_sparse",
and "prev_sparse". The default is 'no_sparse'.
- ['Compress']: Set the gzip compression level (0 to 9), 0 for
no compression. The default is to compress
with level 6 (done only if the compressed
data is less than the uncompressed data).
- ['Block_Factor']: The blocking factor, the number of
records in a chunk when the variable is compressed.
- ['Pad']: The padded value (in bytes, numpy.ndarray or string)
var_attrs : dict
{attribute:value} pairs.
The attribute is the name of a variable attribute.
The value can have its data type specified for the
numeric data. If not, based on Python's type, a
corresponding CDF type is assumed: CDF_INT4 for int,
CDF_DOUBLE for float, CDF_EPOCH16 for complex and
and CDF_INT8 for long.
For example, the following defined attributes will
have the same types in the CDF::
var_attrs= { 'attr1': 'value1',
'attr2': 12.45,
'attr3': [3,4,5],
.....
}
With data type (in the list form)::
var_attrs= { 'attr1': 'value1',
'attr2': [12.45, 'CDF_DOUBLE'],
'attr3': [[3,4,5], 'CDF_INT4'],
.....
}
var_data :
The data for the variable. If the variable is
a regular variable without sparse records, it must
be in a single structure of bytes, or numpy.ndarray
for numeric variable, or str or list of strs for
string variable.
If the variable has sparse records, var_data should
be presented in a list/tuple with two elements,
the first being a list/tuple that contains the
physical record number(s), the second being the variable
data in bytes, numpy.ndarray, or a list of strings. Variable
data can have just physical records' data (with the same
number of records as the first element) or have data from both
physical records and virtual records (which with filled data).
The var_data has the form::
[[rec_#1,rec_#2,rec_#3,...],
[data_#1,data_#2,data_#3,...]]
See the sample for its setup.
'''
pass
def _write_var_attrs(self, f: io.BufferedWriter, varNum: int, var_attrs: Dict[str, Any], zVar: bool) -> None:
'''
Writes ADRs and AEDRs for variables
Parameters:
f : file
The open CDF file
varNum : int
The variable number for adding attributes
var_attrs : dict
A dictionary object full of variable attributes
zVar : bool
True if varNum is referencing a z variable
Returns: None
'''
pass
def _write_var_data_nonsparse(
self,
f: io.BufferedWriter,
zVar: bool,
var: int,
dataType: int,
numElems: int,
recVary: bool,
compression: int,
blockingfactor: int,
indata: Union[np.ndarray, bytearray, Dict[str, np.ndarray]],
) -> int:
'''
Creates VVRs and the corresponding VXRs full of "indata" data.
If there is no compression, creates exactly one VXR and VVR
If there is compression
Parameters
----------
f : file
The open CDF file
zVar : bool
True if this is z variable data
var : str
The name of the variable
dataType : int
the CDF variable type
numElems : int
number of elements in each record
recVary : bool
True if each record is unque
compression : int
The amount of compression
blockingfactor: int
The size (in number of records) of a VVR data block
indata : varies
the data to write, should be a numpy or byte array
Returns
-------
recs : int
The number of records
'''
pass
def _write_var_data_sparse(
self,
f: io.BufferedWriter,
zVar: bool,
var: int,
dataType: int,
numElems: int,
recVary: bool,
oneblock: Tuple[int, int, np.ndarray],
) -> int:
'''
Writes a VVR and a VXR for this block of sparse data
Parameters:
f : file
The open CDF file
zVar : bool
True if this is for a z variable
var : int
The variable number
dataType : int
The CDF data type of this variable
numElems : int
The number of elements in each record
recVary : bool
True if the value varies across records
oneblock: list
A list of data in the form [startrec, endrec, [data]]
Returns:
recend : int
Just the "endrec" value input by the user in "oneblock"
'''
pass
def _create_vxr(self, f: io.BufferedWriter, recStart: int, recEnd: int, currentVDR: int, priorVXR: int, vvrOffset: int) -> int:
'''
Create a VXR AND use a VXR
Parameters:
f : file
The open CDF file
recStart : int
The start record of this block
recEnd : int
The ending record of this block
currentVDR : int
The byte location of the variables VDR
priorVXR : int
The byte location of the previous VXR
vvrOffset : int
The byte location of ther VVR
Returns:
vxroffset : int
The byte location of the created vxr
'''
pass
def _use_vxrentry(self, f: io.BufferedWriter, VXRoffset: int, recStart: int, recEnd: int, offset: int) -> int:
'''
Adds a VVR pointer to a VXR
'''
pass
def _add_vxr_levels_r(self, f: io.BufferedWriter, vxrhead: int, numVXRs: int) -> Tuple[int, int]:
'''
Build a new level of VXRs... make VXRs more tree-like
From:
VXR1 -> VXR2 -> VXR3 -> VXR4 -> ... -> VXRn
To:
new VXR1
/ | VXR2 VXR3 VXR4
/ | ...
VXR5 .......... VXRn
Parameters
----------
f : file
The open CDF file
vxrhead : int
The byte location of the first VXR for a variable
numVXRs : int
The total number of VXRs
Returns
-------
newVXRhead : int
The byte location of the newest VXR head
newvxroff : int
The byte location of the last VXR head
'''
pass
def _update_vdr_vxrheadtail(self, f: io.BufferedWriter, vdr_offset: int, VXRoffset: int) -> None:
'''
This sets a VXR to be the first and last VXR in the VDR
'''
pass
def _get_recrange(self, f: io.BufferedWriter, VXRoffset: int) -> Tuple[int, int]:
'''
Finds the first and last record numbers pointed by the VXR
Assumes the VXRs are in order
'''
pass
@staticmethod
def _majority_token(major: str) -> int:
'''
Returns the numberical type for a CDF row/column major type
'''
pass
@staticmethod
def _encoding_token(encoding: str) -> int:
'''
Returns the numberical type for a CDF encoding type
'''
pass
@staticmethod
def _datatype_token(datatype: str) -> int:
'''
Returns the numberical type for a CDF data type
'''
pass
def _datatype_define(self, value: Union[str, int, float, complex, np.ndarray]) -> Tuple[int, int]:
pass
@staticmethod
def _datatype_size(datatype: int, numElms: int) -> int:
'''
Gets datatype size
Parameters:
datatype : int
CDF variable data type
numElms : int
number of elements
Returns:
numBytes : int
The number of bytes for the data
'''
pass
@staticmethod
def _sparse_token(sparse: str) -> int:
'''
Returns the numerical CDF value for sparseness.
'''
pass
def _write_cdr(self, f: io.BufferedWriter, major: int, encoding: int, checksum: int) -> int:
pass
def _write_gdr(self, f: io.BufferedWriter) -> int:
pass
def _write_adr(self, f: io.BufferedWriter, gORv: bool, name: str) -> Tuple[int, int]:
'''
Writes and ADR to the end of the file.
Additionally, it will update the offset values to either the previous ADR
or the ADRhead field in the GDR.
Parameters
----------
f : file
The open CDF file
gORv : bool
True if a global attribute, False if variable attribute
name : str
name of the attribute
Returns
-------
num : int
The attribute number
byte_loc : int
The current location in file f
'''
pass
def _write_aedr(
self,
f: io.BufferedWriter,
gORz: bool,
attrNum: int,
entryNum: int,
value: Union[Number, np.ndarray],
pdataType: int,
pnumElems: int,
zVar: bool,
) -> int:
'''
Writes an aedr into the end of the file.
Parameters
----------
f : file
The current open CDF file
gORz : bool
True if this entry is for a global or z variable, False if r variable
attrNum : int
Number of the attribute this aedr belongs to.
entryNum : int
Number of the entry
value :
The value of this entry
pdataType : int
The CDF data type of the value
pnumElems : int
Number of elements in the value.
zVar : bool
True if this entry belongs to a z variable
Returns
-------
byte_loc : int
This current location in the file after writing the aedr.
'''
pass
def _write_vdr(
self,
f: io.BufferedWriter,
cdataType: int,
numElems: int,
numDims: int,
dimSizes: List[int],
name: str,
dimVary: List[bool],
recVary: bool,
sparse: int,
blockingfactor: int,
compression: int,
pad: str,
zVar: bool,
) -> Tuple[int, int]:
'''
Writes a VDR block to the end of the file.
Parameters
----------
f : file
The open CDF file
cdataType : int
The CDF data type
numElems : int
The number of elements in the variable
numDims : int
The number of dimensions in the variable
dimSizes : int
The size of each dimension
name : str
The name of the variable
dimVary : array of bool
Bool array of size numDims.
True if a dimension is physical, False if a dimension is not physical
recVary : bool
True if each record is unique
sparse : bool
True if using sparse records
blockingfactor: int
No idea
compression : int
The level of compression between 0-9
pad : num
The pad values to insert
zVar : bool
True if this variable is a z variable
Returns
-------
num : int
The number of the variable
byte_loc : int
The current byte location within the file
'''
pass
def _write_vxr(self, f: io.BufferedWriter, numEntries: Optional[int] = None) -> int:
'''
Creates a VXR at the end of the file.
Returns byte location of the VXR
The First, Last, and Offset fields will need to be filled in later
'''
pass
def _write_vvr(self, f: io.BufferedWriter, data: bytes) -> int:
'''
Writes a vvr to the end of file "f" with the byte stream "data".
'''
pass
def _write_cpr(self, f: io.BufferedWriter, cType: int, parameter: int) -> int:
'''
Write compression info to the end of the file in a CPR.
'''
pass
def _write_cvvr(self, f: io.BufferedWriter, data: Any) -> int:
'''
Write compressed "data" variable to the end of the file in a CVVR
'''
pass
def _write_ccr(self, f: io.BufferedWriter, g: io.BufferedWriter, level: int) -> None:
'''
Write a CCR to file "g" from file "f" with level "level".
Currently, only handles gzip compression.
Parameters:
f : file
Uncompressed file to read from
g : file
File to read the compressed file into
level : int
The level of the compression from 0 to 9
Returns: None
'''
pass
def _convert_option(self) -> str:
'''
Determines which symbol to use for numpy conversions
> : a little endian system to big endian ordering
< : a big endian system to little endian ordering
= : No conversion
'''
pass
@staticmethod
def _convert_type(data_type: int) -> str:
'''
Converts CDF data types into python types
'''
pass
@staticmethod
def _convert_nptype(data_type: int, data: Any) -> bytes:
'''
Converts "data" of CDF type "data_type" into a numpy array
'''
pass
def _default_pad(self, data_type: int, numElems: int) -> bytes:
'''
Determines the default pad data for a "data_type"
'''
pass
def _numpy_to_bytes(self, data_type: int, num_values: int, num_elems: int, indata: npt.NDArray) -> Tuple[int, bytes]:
'''
Converts a numpy array of numbers into a byte stream
Parameters
----------
data_type : int
The CDF file data type
num_values : int
The number of values in each record
num_elems: int
The number of elements in each value
indata : (varies)
The data to be converted
Returns
-------
recs : int
The number of records generated by converting indata
odata : byte stream
The stream of bytes to write to the CDF file
'''
pass
def _convert_data(self, data_type: int, num_elems: int, num_values: int, indata: Any) -> Tuple[int, bytes]:
'''
Converts "indata" into a byte stream
Parameters
----------
data_type : int
The CDF file data type
num_elems : int
The number of elements in the data
num_values : int
The number of values in each record
indata : (varies)
The data to be converted
Returns
-------
recs : int
The number of records generated by converting indata
odata : byte stream
The stream of bytes to write to the CDF file
'''
pass
def _num_values(self, zVar: bool, varNum: int) -> int:
'''
Determines the number of values in a record.
Set zVar=True if this is a zvariable.
'''
pass
def _read_offset_value(self, f: io.BufferedWriter, offset: int, size: int) -> int:
'''
Reads an integer value from file "f" at location "offset".
'''
pass
def _update_offset_value(self, f: io.BufferedWriter, offset: int, size: int, value: Any) -> None:
'''
Writes "value" into location "offset" in file "f".
'''
pass
def _update_aedr_link(self, f: io.BufferedWriter, attrNum: int, zVar: bool, varNum: int, offset: int) -> None:
'''
Updates variable aedr links
Parameters
----------
f : file
The open CDF file
attrNum : int
The number of the attribute to change
zVar : bool
True if we are updating a z variable attribute
varNum : int
The variable number associated with this aedr
offset : int
The offset in the file to the AEDR
'''
pass
@staticmethod
def _set_bit(value: int, bit: int) -> int:
pass
@staticmethod
def _clear_bit(value: int, bit: int) -> int:
pass
@staticmethod
def _checklistofstrs(obj: Any) -> bool:
pass
@staticmethod
def _checklistofNums(obj: Any) -> bool:
'''
This method checks if a list is ready to be immediately converted to binary format,
or if any pre-processing needs to occur. Numbers and datetime64 objects can be immediately converted.
'''
pass
def _md5_compute(self, f: io.BufferedWriter) -> bytes:
'''
Computes the checksum of the file
'''
pass
@staticmethod
def _make_blocks(records) -> List[Tuple[int, int]]:
'''
Organizes the physical records into blocks in a list by
placing consecutive physical records into a single block, so
lesser VXRs will be created.
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Parameters
----------
records: list
A list of records that there is data for
Returns
-------
sparse_blocks: list of list
A list of ranges we have physical values for.
Example:
Input: [1,2,3,4,10,11,12,13,50,51,52,53]
Output: [[1,4],[10,13],[50,53]]
'''
pass
def _make_sparse_blocks(self, variable, records, data: List[Tuple[int, int, np.ndarray]]):
'''
Handles the data for the variable with sparse records.
Organizes the physical record numbers into blocks in a list:
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Place consecutive physical records into a single block
If all records are physical, this calls _make_sparse_blocks_with_physical
If any records are virtual, this calls _make_sparse_blocks_with_virtual
Parameters:
variable : dict
the variable dictionary, with 'Num_Dims', 'Dim_Sizes',
'Data_Type', 'Num_Elements' key words, typically
returned from a call to cdf read's varinq('variable',
expand=True)
records : list
a list of physical records
data : varies
bytes array, numpy.ndarray or list of str form with all physical
data or embedded virtual data (returned from call to
varget('variable') for a sparse variable)
Returns:
sparse_blocks: list
A list of sparse records/data in the form
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
'''
pass
def _make_sparse_blocks_with_virtual(self, variable, records, data) -> List[Tuple[int, int, np.ndarray]]:
'''
Handles the data for the variable with sparse records.
Organizes the physical record numbers into blocks in a list:
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Place consecutive physical records into a single block
Parameters:
variable: dict
the variable, returned from varinq('variable', expand=True)
records: list
a list of physical records
data: varies
bytes array, numpy.ndarray or list of str form with vitual data
embedded, returned from varget('variable') call
'''
pass
def _make_sparse_blocks_with_physical(self, variable, records, data) -> List[Tuple[int, int, np.ndarray]]:
pass
| 66 | 41 | 52 | 4 | 34 | 14 | 8 | 0.42 | 0 | 21 | 1 | 0 | 38 | 20 | 50 | 50 | 2,815 | 280 | 1,807 | 562 | 1,696 | 752 | 1,493 | 495 | 1,442 | 36 | 0 | 10 | 411 |
147,654 |
MAVENSDC/cdflib
|
cdflib/dataclasses.py
|
cdflib.dataclasses.ADRInfo
|
class ADRInfo:
scope: int
next_adr_loc: int
attribute_number: int
num_gr_entry: int
max_gr_entry: int
num_z_entry: int
max_z_entry: int
first_z_entry: int
first_gr_entry: int
name: str
|
class ADRInfo:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0 | 11 | 1 | 10 | 0 | 11 | 1 | 10 | 0 | 0 | 0 | 0 |
147,655 |
MAVENSDC/cdflib
|
cdflib/dataclasses.py
|
cdflib.dataclasses.AEDR
|
class AEDR:
entry: Union[str, np.ndarray]
data_type: int
num_elements: int
next_aedr: int
entry_num: int
num_strings: Optional[int] = None
|
class AEDR:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0 | 7 | 2 | 6 | 0 | 7 | 2 | 6 | 0 | 0 | 0 | 0 |
147,656 |
MAVENSDC/cdflib
|
cdflib/dataclasses.py
|
cdflib.dataclasses.GDRInfo
|
class GDRInfo:
first_zvariable: int
first_rvariable: int
first_adr: int
num_zvariables: int
num_rvariables: int
num_attributes: int
rvariables_num_dims: int
rvariables_dim_sizes: List[int]
eof: int
leapsecond_updated: Optional[int] = None
|
class GDRInfo:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0 | 11 | 2 | 10 | 0 | 11 | 2 | 10 | 0 | 0 | 0 | 0 |
147,657 |
MAVENSDC/cdflib
|
cdflib/xarray/xarray_to_cdf.py
|
cdflib.xarray.xarray_to_cdf.ISTPError
|
class ISTPError(Exception):
"""
Exception raised for ISTP Compliance Errors
"""
def __init__(self, message: str = ""):
super().__init__(message)
|
class ISTPError(Exception):
'''
Exception raised for ISTP Compliance Errors
'''
def __init__(self, message: str = ""):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 1 | 1 | 2 | 0 | 0 | 1 | 0 | 1 | 11 | 7 | 1 | 3 | 2 | 1 | 3 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
147,658 |
MAVENSDC/cdflib
|
cdflib/s3.py
|
cdflib.s3.S3object
|
class S3object:
"""
Handler for S3 objects so they behave like files.
S3 'read' reads specified byte range
S3 'seek' sets byte range for subsequent readers
"""
def __init__(self, fhandle):
self.pos = 0 # used to track where in S3 we are
self.content_length = fhandle.content_length # size in bytes
self.fhandle = fhandle
self.temp_file = None
def read(self, isize):
if isize == -1:
isize = self.content_length
myrange = "bytes=%d-%d" % (self.pos, (self.pos + isize - 1))
self.pos += isize # advance the pointer
stream = self.fhandle.get(Range=myrange)["Body"]
rawdata = stream.read()
# bdata=io.BytesIO(rawdata)
return rawdata
def seek(self, offset, from_what=0):
if from_what == 2:
# offset is from end of file, ugh, used only for checksum
self.pos = self.content_length + offset
elif from_what == 1:
# from current position
self.pos = self.pos + offset
else:
# usual default is 0, from start of file
self.pos = offset
def tell(self):
return self.pos
def fetchS3entire(self):
obj = self.fhandle.get()["Body"]
rawdata = obj["Body"].read()
bdata = io.BytesIO(rawdata)
return bdata
|
class S3object:
'''
Handler for S3 objects so they behave like files.
S3 'read' reads specified byte range
S3 'seek' sets byte range for subsequent readers
'''
def __init__(self, fhandle):
pass
def read(self, isize):
pass
def seek(self, offset, from_what=0):
pass
def tell(self):
pass
def fetchS3entire(self):
pass
| 6 | 1 | 6 | 0 | 5 | 1 | 2 | 0.43 | 0 | 0 | 0 | 0 | 5 | 4 | 5 | 5 | 42 | 5 | 28 | 16 | 22 | 12 | 26 | 16 | 20 | 3 | 0 | 1 | 8 |
147,659 |
MAVENSDC/cdflib
|
cdflib/epochs_astropy.py
|
cdflib.epochs_astropy.CDFTT2000
|
class CDFTT2000(TimeFromEpoch):
name = "cdf_tt2000"
unit = 1.0 / (erfa.DAYSEC * 1e9) # Nanoseconds
epoch_val = "2000-01-01 12:00:00"
epoch_val2 = None
epoch_scale = "tt"
epoch_format = "iso"
|
class CDFTT2000(TimeFromEpoch):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0 | 7 | 7 | 6 | 1 | 7 | 7 | 6 | 0 | 1 | 0 | 0 |
147,660 |
MAVENSDC/cdflib
|
cdflib/dataclasses.py
|
cdflib.dataclasses.VDR
|
class VDR:
data_type: int
section_type: int
next_vdr_location: int
variable_number: int
head_vxr: int
last_vxr: int
max_rec: int
name: str
num_dims: int
dim_sizes: List[int]
compression_bool: bool
compression_level: int
blocking_factor: int
dim_vary: Union[List[int], List[bool]]
record_vary: int
num_elements: int
sparse: int
pad: Optional[Union[str, np.ndarray]] = None
|
class VDR:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0 | 19 | 2 | 18 | 0 | 19 | 2 | 18 | 0 | 0 | 0 | 0 |
147,661 |
MAVENSDC/cdflib
|
cdflib/epochs_astropy.py
|
cdflib.epochs_astropy.CDFEpoch
|
class CDFEpoch(TimeFromEpoch):
name = "cdf_epoch"
unit = 1.0 / (erfa.DAYSEC * 1000) # Milliseconds
epoch_val = "0000-01-01 00:00:00"
epoch_val2 = None
epoch_scale = "utc"
epoch_format = "iso"
|
class CDFEpoch(TimeFromEpoch):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0 | 7 | 7 | 6 | 1 | 7 | 7 | 6 | 0 | 1 | 0 | 0 |
147,662 |
MAVENSDC/cdflib
|
cdflib/epochs_astropy.py
|
cdflib.epochs_astropy.CDFAstropy
|
class CDFAstropy:
"""
Class to encapsulate astropy time routines with CDF class.
"""
version = 3
release = 7
increment = 0
@staticmethod
def convert_to_astropy(epochs: Union[Time, npt.ArrayLike], format: Optional[str] = None) -> Time:
"""
Convert CDF epochs to astropy time objects.
Returns
-------
astropy.time.Time
"""
# If already in Astropy time Units, do nothing
if isinstance(epochs, Time):
return epochs
# If format is specified, then force it to do that
if format is not None:
return Time(epochs, format=format, precision=9)
epochs = np.array(epochs)
# Determine best format for the input type
if epochs.dtype == np.int64:
return Time(epochs, format="cdf_tt2000", precision=9)
elif epochs.dtype == np.complex128:
return Time(epochs.real, epochs.imag / 1000000000000.0, format="cdf_epoch16", precision=9)
elif epochs.dtype == np.float64:
return Time(epochs, format="cdf_epoch", precision=9)
else:
raise TypeError("Not sure how to handle type {}".format(type(epochs)))
@staticmethod
def encode(epochs: npt.ArrayLike, iso_8601: bool = True) -> npt.NDArray[np.str_]:
epochs = CDFAstropy.convert_to_astropy(epochs)
if iso_8601:
return epochs.iso
else:
return epochs.strftime("%d-%b-%Y %H:%M:%S.%f")
@staticmethod
def breakdown(epochs: Union[Time, npt.ArrayLike]) -> npt.NDArray:
# Returns either a single array, or a array of arrays depending on the input
epochs = CDFAstropy.convert_to_astropy(epochs)
if epochs.format == "cdf_tt2000":
return CDFAstropy.breakdown_tt2000(epochs)
elif epochs.format == "cdf_epoch":
return CDFAstropy.breakdown_epoch(epochs)
elif epochs.format == "cdf_epoch16":
return CDFAstropy.breakdown_epoch16(epochs)
raise TypeError("Not sure how to handle type {}".format(type(epochs)))
@staticmethod
def to_datetime(cdf_time: npt.ArrayLike) -> Time:
cdf_time = CDFAstropy.convert_to_astropy(cdf_time)
return cdf_time.datetime
@staticmethod
def unixtime(cdf_time: Union[Time, npt.ArrayLike]) -> npt.NDArray:
"""
Encodes the epoch(s) into seconds after 1970-01-01. Precision is only
kept to the nearest microsecond.
"""
epochs = CDFAstropy.convert_to_astropy(cdf_time)
return epochs.unix
@staticmethod
def compute(datetimes: npt.ArrayLike) -> npt.NDArray:
datetimes = np.atleast_2d(datetimes)
cdf_time = []
for d in datetimes:
unix_seconds = datetime.datetime(d[0], d[1], d[2], d[3], d[4], d[5]).replace(tzinfo=timezone.utc).timestamp()
if len(d) == 7:
remainder_seconds = d[6] / 1000.0
astrotime = Time(unix_seconds, remainder_seconds, format="unix", precision=9)
cdf_time.append(astrotime.cdf_epoch)
if len(d) == 9:
remainder_seconds = (d[6] / 1000.0) + (d[7] / 1000000.0) + (d[8] / 1000000000.0)
astrotime = Time(unix_seconds, remainder_seconds, format="unix", precision=9)
cdf_time.append(astrotime.cdf_tt2000)
if len(d) == 10:
remainder_seconds = (d[6] / 1000.0) + (d[7] / 1000000.0) + (d[8] / 1000000000.0) + (d[9] / 1000000000000.0)
astrotime = Time(unix_seconds, remainder_seconds, format="unix", precision=9)
cdf_time.append(astrotime.cdf_epoch16)
return np.squeeze(cdf_time)
@staticmethod
def findepochrange(
epochs: Union[Time, npt.ArrayLike], starttime: Optional[npt.ArrayLike] = None, endtime: Optional[npt.ArrayLike] = None
) -> Tuple[int, int]:
if isinstance(starttime, list):
start = CDFAstropy.compute(starttime)
if isinstance(endtime, list):
end = CDFAstropy.compute(endtime)
epochs = CDFAstropy.convert_to_astropy(epochs)
epochs_as_np = epochs.value
indices = np.where((epochs_as_np >= start) & (epochs_as_np <= end))
return min(indices[0]), max(indices[0])
@staticmethod
def breakdown_tt2000(tt2000: Time) -> npt.NDArray:
tt2000strings = np.atleast_1d(tt2000.iso)
times = []
for t in tt2000strings:
date, time = t.split(" ")
yyyy, mon, dd = date.split("-")
hhmmss, decimal_seconds = time.split(".")
decimal_seconds = "." + decimal_seconds
hh, mm, ss = hhmmss.split(":")
time_as_list = []
time_as_list.append(int(yyyy)) # year
time_as_list.append(int(mon)) # month
time_as_list.append(int(dd)) # day
time_as_list.append(int(hh)) # hour
time_as_list.append(int(mm)) # minute
time_as_list.append(int(ss)) # second
decimal_seconds = float(decimal_seconds)
milliseconds = decimal_seconds * 1000
time_as_list.append(int(milliseconds)) # milliseconds
microseconds = (milliseconds % 1) * 1000
time_as_list.append(int(microseconds)) # microseconds
nanoseconds = (microseconds % 1) * 1000
time_as_list.append(int(nanoseconds)) # microseconds
times.append(time_as_list)
return np.squeeze(times)
@staticmethod
def breakdown_epoch16(epochs: Time) -> npt.NDArray:
epoch16strings = np.atleast_1d(epochs.iso)
times = []
for t in epoch16strings:
time_as_list: List[int] = []
date, time = t.split(" ")
yyyy, mon, dd = date.split("-")
hhmmss, decimal_seconds = time.split(".")
decimal_seconds = "." + decimal_seconds
hh, mm, ss = hhmmss.split(":")
time_as_list = []
time_as_list.append(int(yyyy)) # year
time_as_list.append(int(mon)) # month
time_as_list.append(int(dd)) # day
time_as_list.append(int(hh)) # hour
time_as_list.append(int(mm)) # minute
time_as_list.append(int(ss)) # second
decimal_seconds = float(decimal_seconds)
milliseconds = decimal_seconds * 1000
time_as_list.append(int(milliseconds)) # milliseconds
microseconds = (milliseconds % 1) * 1000
time_as_list.append(int(microseconds)) # microseconds
nanoseconds = (microseconds % 1) * 1000
time_as_list.append(int(nanoseconds)) # nanoseconds
picoseconds = (nanoseconds % 1) * 1000
time_as_list.append(int(picoseconds)) # picoseconds
times.append(time_as_list)
return np.squeeze(times)
@staticmethod
def breakdown_epoch(epochs: Time) -> npt.NDArray:
epochstrings = np.atleast_1d(epochs.iso)
times = []
for t in epochstrings:
date, time = t.split(" ")
yyyy, mon, dd = date.split("-")
hhmmss, decimal_seconds = time.split(".")
decimal_seconds = "." + decimal_seconds
hh, mm, ss = hhmmss.split(":")
time_as_list = []
time_as_list.append(int(yyyy)) # year
time_as_list.append(int(mon)) # month
time_as_list.append(int(dd)) # day
time_as_list.append(int(hh)) # hour
time_as_list.append(int(mm)) # minute
time_as_list.append(int(ss)) # second
decimal_seconds = float(decimal_seconds)
milliseconds = decimal_seconds * 1000
time_as_list.append(int(milliseconds)) # milliseconds
times.append(time_as_list)
return np.squeeze(times)
@staticmethod
def parse(value: npt.ArrayLike) -> npt.NDArray:
"""
Parses the provided date/time string(s) into CDF epoch value(s).
For CDF_EPOCH:
'yyyy-mm-dd hh:mm:ss.xxx' (in iso_8601). The string is the output
from encode function.
For CDF_EPOCH16:
The string has to be in the form of
'yyyy-mm-dd hh:mm:ss.mmmuuunnnppp' (in iso_8601). The string is
the output from encode function.
For TT2000:
The string has to be in the form of
'yyyy-mm-dd hh:mm:ss.mmmuuunnn' (in iso_8601). The string is
the output from encode function.
"""
value = np.atleast_1d(value)
time_list = []
for t in value:
date, subs = t.split(".")
if len(subs) == 3:
time_list.append(Time(t, precision=9).cdf_epoch)
if len(subs) == 12:
time_list.append(Time(t, precision=9).cdf_epoch16)
if len(subs) == 9:
time_list.append(int(Time(t, precision=9).cdf_tt2000))
return np.squeeze(time_list)
|
class CDFAstropy:
'''
Class to encapsulate astropy time routines with CDF class.
'''
@staticmethod
def convert_to_astropy(epochs: Union[Time, npt.ArrayLike], format: Optional[str] = None) -> Time:
'''
Convert CDF epochs to astropy time objects.
Returns
-------
astropy.time.Time
'''
pass
@staticmethod
def encode(epochs: npt.ArrayLike, iso_8601: bool = True) -> npt.NDArray[np.str_]:
pass
@staticmethod
def breakdown(epochs: Union[Time, npt.ArrayLike]) -> npt.NDArray:
pass
@staticmethod
def to_datetime(cdf_time: npt.ArrayLike) -> Time:
pass
@staticmethod
def unixtime(cdf_time: Union[Time, npt.ArrayLike]) -> npt.NDArray:
'''
Encodes the epoch(s) into seconds after 1970-01-01. Precision is only
kept to the nearest microsecond.
'''
pass
@staticmethod
def compute(datetimes: npt.ArrayLike) -> npt.NDArray:
pass
@staticmethod
def findepochrange(
epochs: Union[Time, npt.ArrayLike], starttime: Optional[npt.ArrayLike] = None, endtime: Optional[npt.ArrayLike] = None
) -> Tuple[int, int]:
pass
@staticmethod
def breakdown_tt2000(tt2000: Time) -> npt.NDArray:
pass
@staticmethod
def breakdown_epoch16(epochs: Time) -> npt.NDArray:
pass
@staticmethod
def breakdown_epoch16(epochs: Time) -> npt.NDArray:
pass
@staticmethod
def parse(value: npt.ArrayLike) -> npt.NDArray:
'''
Parses the provided date/time string(s) into CDF epoch value(s).
For CDF_EPOCH:
'yyyy-mm-dd hh:mm:ss.xxx' (in iso_8601). The string is the output
from encode function.
For CDF_EPOCH16:
The string has to be in the form of
'yyyy-mm-dd hh:mm:ss.mmmuuunnnppp' (in iso_8601). The string is
the output from encode function.
For TT2000:
The string has to be in the form of
'yyyy-mm-dd hh:mm:ss.mmmuuunnn' (in iso_8601). The string is
the output from encode function.
'''
pass
| 23 | 4 | 18 | 1 | 14 | 5 | 3 | 0.35 | 0 | 9 | 0 | 0 | 0 | 0 | 11 | 11 | 223 | 27 | 165 | 73 | 140 | 57 | 146 | 60 | 134 | 6 | 0 | 2 | 33 |
147,663 |
MAVENSDC/cdflib
|
cdflib/epochs_astropy.py
|
cdflib.epochs_astropy.CDFEpoch16
|
class CDFEpoch16(TimeFromEpoch):
name = "cdf_epoch16"
unit = 1.0 / (erfa.DAYSEC) # Seconds
epoch_val = "0000-01-01 00:00:00"
epoch_val2 = None
epoch_scale = "utc"
epoch_format = "iso"
|
class CDFEpoch16(TimeFromEpoch):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0 | 7 | 7 | 6 | 1 | 7 | 7 | 6 | 0 | 1 | 0 | 0 |
147,664 |
MChiciak/django-datatools
|
MChiciak_django-datatools/django_datatools/data_retriever.py
|
django_datatools.data_retriever.DataRetriever
|
class DataRetriever(object):
"""
This is a helper class for bulk inserts
It will automatically retrieve the records desired based
off a query set and will create any missing records.
It uses a key for record lookup to increase performance dramatically
"""
def __init__(self, model_cls, query_set, key_parts):
self.model_cls = model_cls
self.key_parts = key_parts
self.query_set = query_set.values(*(["id"] + list(self.key_parts)))
self.record_lookup = None
def set_record_lookup(self, force=False):
if force:
self.record_lookup = None
self.query_set = self.query_set.all()
if not self.record_lookup:
self.record_lookup = dict()
for record in self.query_set:
self.record_lookup[self.get_record_key(record, self.key_parts)] = record['id']
@staticmethod
def get_record_key(data_item, key_parts):
record_key = ''
for key_part in key_parts:
key_value = '' if data_item[key_part] is None else str(data_item[key_part])
record_key += key_value
return record_key
def get_instance(self, record_key):
self.set_record_lookup()
if record_key in self.record_lookup:
return self.record_lookup[record_key]
return None
def get_records(self):
self.set_record_lookup()
return self.record_lookup
def bulk_get_or_create(self, data_list):
"""
data_list is the data to get or create
We generate the query and set all the record keys based on passed in queryset
Then we loop over each item in the data_list, which has the keys already! No need to generate them. Should save a lot of time
Use values instead of the whole object, much faster
Args:
data_list:
Returns:
"""
items_to_create = dict()
for record_key, record_config in data_list.items():
if record_key not in items_to_create:
record = self.get_instance(record_key)
if not record:
items_to_create[record_key] = self.model_cls(**record_config)
if items_to_create:
"""
TODO. I think we can optimize this. Switch to values, get the primary id
Query set is just select the model with that ID. Return the model object without running the full queryset again. Should be a lot faster.
"""
self.model_cls.objects.bulk_create(items_to_create.values())
self.set_record_lookup(True)
return self.record_lookup
|
class DataRetriever(object):
'''
This is a helper class for bulk inserts
It will automatically retrieve the records desired based
off a query set and will create any missing records.
It uses a key for record lookup to increase performance dramatically
'''
def __init__(self, model_cls, query_set, key_parts):
pass
def set_record_lookup(self, force=False):
pass
@staticmethod
def get_record_key(data_item, key_parts):
pass
def get_instance(self, record_key):
pass
def get_records(self):
pass
def bulk_get_or_create(self, data_list):
'''
data_list is the data to get or create
We generate the query and set all the record keys based on passed in queryset
Then we loop over each item in the data_list, which has the keys already! No need to generate them. Should save a lot of time
Use values instead of the whole object, much faster
Args:
data_list:
Returns:
'''
pass
| 8 | 2 | 9 | 1 | 6 | 2 | 3 | 0.47 | 1 | 3 | 0 | 0 | 5 | 4 | 6 | 6 | 68 | 9 | 40 | 19 | 32 | 19 | 39 | 18 | 32 | 5 | 1 | 3 | 16 |
147,665 |
MDAnalysis/GridDataFormats
|
gridData/gOpenMol.py
|
gridData.gOpenMol.Plt
|
class Plt(object):
"""A class to represent a gOpenMol_ plt file.
Only reading is implemented; either supply a filename to the constructor
>>> G = Plt(filename)
or load the file with the read method
>>> G = Plt()
>>> G.read(filename)
The data is held in :attr:`GOpenMol.array` and all header information is in
the dict :attr:`GOpenMol.header`.
:attr:`Plt.shape`
D-tuplet describing size in each dimension
:attr:`Plt.origin`
coordinates of the centre of the grid cell with index 0,0,...,0
:attr:`Plt.delta`
DxD array describing the deltas
"""
_header_struct = (Record('rank', 'I', {3:'dimension'}),
Record('surface','I', {1: 'VSS surface',
2: 'Orbital/density surface',
3: 'Probe surface',
42: 'gridcount',
100: 'OpenMol',
200: 'Gaussian 94/98',
201: 'Jaguar',
202: 'Gamess',
203: 'AutoDock',
204: 'Delphi/Insight',
205: 'Grid',
}), # update in init with all user defined values
Record('nz', 'I'),
Record('ny', 'I'),
Record('nx', 'I'),
Record('zmin', 'f'),
Record('zmax', 'f'),
Record('ymin', 'f'),
Record('ymax', 'f'),
Record('xmin', 'f'),
Record('xmax', 'f'))
_data_bintype = 'f' # write(&value,sizeof(float),1L,output);
def __init__(self, filename=None):
self.filename = str(filename)
# fix header_struct because I cannot do {...}.update()
rec_surf = [r for r in self._header_struct if r.key == 'surface'][0]
rec_surf.values.update(dict((k,'user-defined') for k in range(4,51) if k != 42))
# assemble format
self._headerfmt = "".join([r.bintype for r in self._header_struct])
if not filename is None:
self.read(filename)
def read(self, filename):
"""Populate the instance from the plt file *filename*."""
from struct import calcsize, unpack
if not filename is None:
self.filename = str(filename)
with open(self.filename, 'rb') as plt:
h = self.header = self._read_header(plt)
nentries = h['nx'] * h['ny'] * h['nz']
# quick and dirty... slurp it all in one go
datafmt = h['bsaflag']+str(nentries)+self._data_bintype
a = numpy.array(unpack(datafmt, plt.read(calcsize(datafmt))))
self.header['filename'] = self.filename
self.array = a.reshape(h['nz'], h['ny'], h['nx']).transpose() # unpack plt in reverse!!
self.delta = self._delta()
self.origin = numpy.array([h['xmin'], h['ymin'], h['zmin']]) + 0.5*numpy.diagonal(self.delta)
self.rank = h['rank']
@property
def shape(self):
return self.array.shape
@property
def edges(self):
"""Edges of the grid cells, origin at centre of 0,0,..,0 grid cell.
Only works for regular, orthonormal grids.
"""
return [self.delta[d,d] * numpy.arange(self.shape[d]+1) + self.origin[d]\
- 0.5*self.delta[d,d] for d in range(self.rank)]
def _delta(self):
h = self.header
qmin = numpy.array([h['xmin'],h['ymin'],h['zmin']])
qmax = numpy.array([h['xmax'],h['ymax'],h['zmax']])
delta = numpy.abs(qmax - qmin) / self.shape
return numpy.diag(delta)
def _read_header(self, pltfile):
"""Read header bytes, try all possibilities for byte order/size/alignment."""
nheader = struct.calcsize(self._headerfmt)
names = [r.key for r in self._header_struct]
binheader = pltfile.read(nheader)
def decode_header(bsaflag='@'):
h = dict(zip(names, struct.unpack(bsaflag+self._headerfmt, binheader)))
h['bsaflag'] = bsaflag
return h
for flag in '@=<>':
# try all endinaness and alignment options until we find something that looks sensible
header = decode_header(flag)
if header['rank'] == 3:
break # only legal value according to spec
header = None
if header is None:
raise TypeError("Cannot decode header --- corrupted or wrong format?")
for rec in self._header_struct:
if not rec.is_legal_dict(header):
warnings.warn("Key %s: Illegal value %r" % (rec.key, header[rec.key]))
return header
def histogramdd(self):
"""Return array data as (edges,grid), i.e. a numpy nD histogram."""
return (self.array, self.edges)
|
class Plt(object):
'''A class to represent a gOpenMol_ plt file.
Only reading is implemented; either supply a filename to the constructor
>>> G = Plt(filename)
or load the file with the read method
>>> G = Plt()
>>> G.read(filename)
The data is held in :attr:`GOpenMol.array` and all header information is in
the dict :attr:`GOpenMol.header`.
:attr:`Plt.shape`
D-tuplet describing size in each dimension
:attr:`Plt.origin`
coordinates of the centre of the grid cell with index 0,0,...,0
:attr:`Plt.delta`
DxD array describing the deltas
'''
def __init__(self, filename=None):
pass
def read(self, filename):
'''Populate the instance from the plt file *filename*.'''
pass
@property
def shape(self):
pass
@property
def edges(self):
'''Edges of the grid cells, origin at centre of 0,0,..,0 grid cell.
Only works for regular, orthonormal grids.
'''
pass
def _delta(self):
pass
def _read_header(self, pltfile):
'''Read header bytes, try all possibilities for byte order/size/alignment.'''
pass
def decode_header(bsaflag='@'):
pass
def histogramdd(self):
'''Return array data as (edges,grid), i.e. a numpy nD histogram.'''
pass
| 11 | 5 | 9 | 0 | 7 | 2 | 2 | 0.37 | 1 | 5 | 0 | 0 | 7 | 7 | 7 | 7 | 118 | 14 | 79 | 37 | 67 | 29 | 55 | 34 | 45 | 6 | 1 | 2 | 15 |
147,666 |
MDAnalysis/GridDataFormats
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MDAnalysis_GridDataFormats/gridData/tests/test_grid.py
|
gridData.tests.test_grid.test_inheritance.DerivedGrid
|
class DerivedGrid(Grid):
pass
|
class DerivedGrid(Grid):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 0 |
147,667 |
MDAnalysis/GridDataFormats
|
gridData/mrc.py
|
gridData.mrc.MRC
|
class MRC(object):
"""Represent a MRC/CCP4 file.
Load `MRC/CCP4 2014 <MRC2014 file format>`_ 3D volumetric data with
the mrcfile_ library.
Parameters
----------
filename : str (optional)
input file (or stream), can be compressed
Raises
------
ValueError
If the unit cell is not orthorhombic or if the data
are not volumetric.
Attributes
----------
header : numpy.recarray
Header data from the MRC file as a numpy record array.
array : numpy.ndarray
Data as a 3-dimensional array where axis 0 corresponds to X,
axis 1 to Y, and axis 2 to Z. This order is always enforced,
regardless of the order in the mrc file.
delta : numpy.ndarray
Diagonal matrix with the voxel size in X, Y, and Z direction
(taken from the :attr:`mrcfile.mrcfile.voxel_size` attribute)
origin : numpy.ndarray
numpy array with coordinates of the coordinate system origin
(computed from :attr:`header.origin`, the offsets
:attr:`header.origin.nxstart`, :attr:`header.origin.nystart`,
:attr:`header.origin.nzstart` and the spacing :attr:`delta`)
rank : int
The integer 3, denoting that only 3D maps are read.
Notes
-----
* Only volumetric (3D) densities are read.
* Only orthorhombic unitcells supported (other raise :exc:`ValueError`)
* Only reading is currently supported.
.. versionadded:: 0.7.0
"""
def __init__(self, filename=None):
self.filename = filename
if filename is not None:
self.read(filename)
def read(self, filename):
"""Populate the instance from the MRC/CCP4 file *filename*."""
if filename is not None:
self.filename = filename
with mrcfile.open(filename) as mrc:
if not mrc.is_volume(): #pragma: no cover
raise ValueError(
"MRC file {} is not a volumetric density.".format(filename))
self.header = h = mrc.header.copy()
# check for being orthorhombic
if not np.allclose([h.cellb.alpha, h.cellb.beta, h.cellb.gamma],
[90, 90, 90]):
raise ValueError("Only orthorhombic unitcells are currently "
"supported, not "
"alpha={0}, beta={1}, gamma={2}".format(
h.cellb.alpha, h.cellb.beta, h.cellb.gamma))
# mrc.data[z, y, x] indexed: convert to x,y,z as used in GridDataFormats
# together with the axes orientation information in mapc/mapr/maps.
# mapc, mapr, maps = 1, 2, 3 for Fortran-ordering and 3, 2, 1 for C-ordering.
# Other combinations are possible. We reorder the data for the general case
# by sorting mapc, mapr, maps in ascending order, i.e., to obtain x,y,z.
# mrcfile provides the data in zyx shape (without regard to map*) so we first
# transpose it to xyz and then reorient with axes_c_order.
#
# All other "xyz" quantitities are also reordered.
axes_order = np.hstack([h.mapc, h.mapr, h.maps])
axes_c_order = np.argsort(axes_order)
transpose_order = np.argsort(axes_order[::-1])
self.array = np.transpose(mrc.data, axes=transpose_order)
self.delta = np.diag(np.array([mrc.voxel_size.x, mrc.voxel_size.y, mrc.voxel_size.z]))
# the grid is shifted to the MRC origin by offset
# (assume orthorhombic)
offsets = np.hstack([h.nxstart, h.nystart, h.nzstart])[axes_c_order] * np.diag(self.delta)
# GridData origin is centre of cell at x=col=0, y=row=0 z=seg=0
self.origin = np.hstack([h.origin.x, h.origin.y, h.origin.z]) + offsets
self.rank = 3
@property
def shape(self):
"""Shape of the :attr:`array`"""
return self.array.shape
@property
def edges(self):
"""Edges of the grid cells, origin at centre of 0,0,0 grid cell.
Only works for regular, orthonormal grids.
"""
# TODO: Add triclinic cell support.
return [self.delta[d, d] * np.arange(self.shape[d] + 1) +
self.origin[d] - 0.5 * self.delta[d, d]
for d in range(self.rank)]
def histogramdd(self):
"""Return array data as (edges,grid), i.e. a numpy nD histogram."""
return (self.array, self.edges)
|
class MRC(object):
'''Represent a MRC/CCP4 file.
Load `MRC/CCP4 2014 <MRC2014 file format>`_ 3D volumetric data with
the mrcfile_ library.
Parameters
----------
filename : str (optional)
input file (or stream), can be compressed
Raises
------
ValueError
If the unit cell is not orthorhombic or if the data
are not volumetric.
Attributes
----------
header : numpy.recarray
Header data from the MRC file as a numpy record array.
array : numpy.ndarray
Data as a 3-dimensional array where axis 0 corresponds to X,
axis 1 to Y, and axis 2 to Z. This order is always enforced,
regardless of the order in the mrc file.
delta : numpy.ndarray
Diagonal matrix with the voxel size in X, Y, and Z direction
(taken from the :attr:`mrcfile.mrcfile.voxel_size` attribute)
origin : numpy.ndarray
numpy array with coordinates of the coordinate system origin
(computed from :attr:`header.origin`, the offsets
:attr:`header.origin.nxstart`, :attr:`header.origin.nystart`,
:attr:`header.origin.nzstart` and the spacing :attr:`delta`)
rank : int
The integer 3, denoting that only 3D maps are read.
Notes
-----
* Only volumetric (3D) densities are read.
* Only orthorhombic unitcells supported (other raise :exc:`ValueError`)
* Only reading is currently supported.
.. versionadded:: 0.7.0
'''
def __init__(self, filename=None):
pass
def read(self, filename):
'''Populate the instance from the MRC/CCP4 file *filename*.'''
pass
@property
def shape(self):
'''Shape of the :attr:`array`'''
pass
@property
def edges(self):
'''Edges of the grid cells, origin at centre of 0,0,0 grid cell.
Only works for regular, orthonormal grids.
'''
pass
def histogramdd(self):
'''Return array data as (edges,grid), i.e. a numpy nD histogram.'''
pass
| 8 | 5 | 11 | 0 | 7 | 4 | 2 | 1.57 | 1 | 2 | 0 | 0 | 5 | 6 | 5 | 5 | 114 | 20 | 37 | 19 | 29 | 58 | 28 | 16 | 22 | 4 | 1 | 2 | 9 |
147,668 |
MDAnalysis/GridDataFormats
|
gridData/OpenDX.py
|
gridData.OpenDX.gridconnections
|
class gridconnections(DXclass):
"""OpenDX gridconnections class"""
def __init__(self,classid,shape=None,**kwargs):
if shape is None:
raise ValueError('all keyword arguments are required')
self.id = classid
self.name = 'gridconnections'
self.component = 'connections'
self.shape = numpy.asarray(shape) # D dimensional shape
def write(self, stream):
super(gridconnections, self).write(
stream, ('counts '+self.ndformat(' %d')) % tuple(self.shape))
|
class gridconnections(DXclass):
'''OpenDX gridconnections class'''
def __init__(self,classid,shape=None,**kwargs):
pass
def write(self, stream):
pass
| 3 | 1 | 5 | 0 | 5 | 1 | 2 | 0.18 | 1 | 3 | 0 | 0 | 2 | 4 | 2 | 8 | 13 | 1 | 11 | 7 | 8 | 2 | 10 | 7 | 7 | 2 | 2 | 1 | 3 |
147,669 |
MDAnalysis/GridDataFormats
|
gridData/OpenDX.py
|
gridData.OpenDX.field
|
class field(DXclass):
"""OpenDX container class
The *field* is the top-level object and represents the whole
OpenDX file. It contains a number of other objects.
Instantiate a DX object from this class and add subclasses with
:meth:`add`.
"""
# perhaps this should not derive from DXclass as those are
# objects in field but a field cannot contain itself
def __init__(self,classid='0',components=None,comments=None):
"""OpenDX object, which is build from a list of components.
Parameters
----------
id : str
arbitrary string
components : dict
dictionary of DXclass instances (no sanity check on the
individual ids!) which correspond to
* positions
* connections
* data
comments : list
list of strings; each string becomes a comment line
prefixed with '#'. Avoid newlines.
A field must have at least the components 'positions',
'connections', and 'data'. Those components are associated
with objects belonging to the field. When writing a dx file
from the field, only the required objects are dumped to the file.
(For a more general class that can use field:
Because there could be more objects than components, we keep a
separate object list. When dumping the dx file, first all
objects are written and then the field object describes its
components. Objects are referenced by their unique id.)
.. Note:: uniqueness of the *id* is not checked.
Example
-------
Create a new dx object::
dx = OpenDX.field('density',[gridpoints,gridconnections,array])
"""
if components is None:
components = dict(positions=None,connections=None,data=None)
if comments is None:
comments = ['OpenDX written by gridData.OpenDX',
'from https://github.com/MDAnalysis/GridDataFormats']
elif type(comments) is not list:
comments = [str(comments)]
self.id = classid # can be an arbitrary string
self.name = 'field'
self.component = None # cannot be a component of a field
self.components = components
self.comments= comments
def _openfile_writing(self, filename):
"""Returns a regular or gz file stream for writing"""
if filename.endswith('.gz'):
return gzip.open(filename, 'wb')
else:
return open(filename, 'w')
def write(self, filename):
"""Write the complete dx object to the file.
This is the simple OpenDX format which includes the data into
the header via the 'object array ... data follows' statement.
Only simple regular arrays are supported.
The format should be compatible with VMD's dx reader plugin.
"""
# comments (VMD chokes on lines of len > 80, so truncate)
maxcol = 80
with self._openfile_writing(str(filename)) as outfile:
for line in self.comments:
comment = '# '+str(line)
self._write_line(outfile, comment[:maxcol]+'\n')
# each individual object
for component, object in self.sorted_components():
object.write(outfile)
# the field object itself
super(field, self).write(outfile, quote=True)
for component, object in self.sorted_components():
self._write_line(outfile, 'component "%s" value %s\n' % (
component, str(object.id)))
def read(self, stream):
"""Read DX field from file.
dx = OpenDX.field.read(dxfile)
The classid is discarded and replaced with the one from the file.
"""
DXfield = self
p = DXParser(stream)
p.parse(DXfield)
def add(self,component,DXobj):
"""add a component to the field"""
self[component] = DXobj
def add_comment(self,comment):
"""add comments"""
self.comments.append(comment)
def sorted_components(self):
"""iterator that returns (component,object) in id order"""
for component, object in \
sorted(self.components.items(),
key=lambda comp_obj: comp_obj[1].id):
yield component, object
def histogramdd(self):
"""Return array data as (edges,grid), i.e. a numpy nD histogram."""
shape = self.components['positions'].shape
edges = self.components['positions'].edges()
hist = self.components['data'].array.reshape(shape)
return (hist,edges)
def __getitem__(self,key):
return self.components[key]
def __setitem__(self,key,value):
self.components[key] = value
def __repr__(self):
return '<OpenDX.field object, id='+str(self.id)+', with '+\
str(len(self.components))+' components and '+\
str(len(self.components))+' objects>'
|
class field(DXclass):
'''OpenDX container class
The *field* is the top-level object and represents the whole
OpenDX file. It contains a number of other objects.
Instantiate a DX object from this class and add subclasses with
:meth:`add`.
'''
def __init__(self,classid='0',components=None,comments=None):
'''OpenDX object, which is build from a list of components.
Parameters
----------
id : str
arbitrary string
components : dict
dictionary of DXclass instances (no sanity check on the
individual ids!) which correspond to
* positions
* connections
* data
comments : list
list of strings; each string becomes a comment line
prefixed with '#'. Avoid newlines.
A field must have at least the components 'positions',
'connections', and 'data'. Those components are associated
with objects belonging to the field. When writing a dx file
from the field, only the required objects are dumped to the file.
(For a more general class that can use field:
Because there could be more objects than components, we keep a
separate object list. When dumping the dx file, first all
objects are written and then the field object describes its
components. Objects are referenced by their unique id.)
.. Note:: uniqueness of the *id* is not checked.
Example
-------
Create a new dx object::
dx = OpenDX.field('density',[gridpoints,gridconnections,array])
'''
pass
def _openfile_writing(self, filename):
'''Returns a regular or gz file stream for writing'''
pass
def write(self, filename):
'''Write the complete dx object to the file.
This is the simple OpenDX format which includes the data into
the header via the 'object array ... data follows' statement.
Only simple regular arrays are supported.
The format should be compatible with VMD's dx reader plugin.
'''
pass
def read(self, stream):
'''Read DX field from file.
dx = OpenDX.field.read(dxfile)
The classid is discarded and replaced with the one from the file.
'''
pass
def add(self,component,DXobj):
'''add a component to the field'''
pass
def add_comment(self,comment):
'''add comments'''
pass
def sorted_components(self):
'''iterator that returns (component,object) in id order'''
pass
def histogramdd(self):
'''Return array data as (edges,grid), i.e. a numpy nD histogram.'''
pass
def __getitem__(self,key):
pass
def __setitem__(self,key,value):
pass
def __repr__(self):
pass
| 12 | 9 | 11 | 2 | 5 | 5 | 2 | 1.02 | 1 | 6 | 1 | 0 | 11 | 5 | 11 | 17 | 142 | 30 | 57 | 28 | 45 | 58 | 49 | 27 | 37 | 4 | 2 | 2 | 19 |
147,670 |
MDAnalysis/GridDataFormats
|
gridData/OpenDX.py
|
gridData.OpenDX.array
|
class array(DXclass):
"""OpenDX array class.
See `Array Objects`_ for details.
.. _Array Objects:
https://web.archive.org/web/20080808140524/http://opendx.sdsc.edu/docs/html/pages/usrgu068.htm#Header_440
"""
#: conversion from :attr:`numpy.dtype.name` to closest OpenDX array type
#: (round-tripping is not guaranteed to produce identical types); not all
#: types are supported (e.g., strings are missing)
np_types = {
"uint8": "byte", # DX "unsigned byte" equivalent
"int8": "signed byte",
"uint16": "unsigned short",
"int16": "short", # DX "signed short" equivalent
"uint32": "unsigned int",
"int32": "int", # DX "signed int" equivalent
"uint64": "unsigned int", # not explicit in DX, for compatibility
"int64": "int", # not explicit in DX, for compatibility
# "hyper", # ?
"float32": "float", # default
"float64": "double",
"float16": "float", # float16 not available in DX, use float
# numpy "float128 not available, raise error
# "string" not automatically supported
}
#: conversion from OpenDX type to closest :class:`numpy.dtype`
#: (round-tripping is not guaranteed to produce identical types); not all
#: types are supported (e.g., strings and conversion to int64 are missing)
dx_types = {
"byte": "uint8",
"unsigned byte": "uint8",
"signed byte": "int8",
"unsigned short": "uint16",
"short": "int16",
"signed short": "int16",
"unsigned int": "uint32",
"int": "int32",
"signed int": "int32",
# "hyper", # ?
"float": "float32", # default
"double": "float64",
# "string" not automatically supported
}
def __init__(self, classid, array=None, type=None, typequote='"',
**kwargs):
"""
Parameters
----------
classid : int
array : array_like
type : str (optional)
Set the DX type in the output file and cast `array` to
the closest numpy dtype. `type` must be one of the
allowed types in DX files as defined under `Array
Objects`_. The default ``None`` tries to set the type
from the :class:`numpy.dtype` of `array`.
.. versionadded:: 0.4.0
Raises
------
ValueError
if `array` is not provided; or if `type` is not of the correct
DX type
"""
if array is None:
raise ValueError('array keyword argument is required')
self.id = classid
self.name = 'array'
self.component = 'data'
# detect type https://github.com/MDAnalysis/GridDataFormats/issues/35
if type is None:
self.array = numpy.asarray(array)
try:
self.type = self.np_types[self.array.dtype.name]
except KeyError:
warnings.warn(("array dtype.name = {0} can not be automatically "
"converted to a DX array type. Use the 'type' keyword "
"to manually specify the correct type.").format(
self.array.dtype.name))
self.type = self.array.dtype.name # will raise ValueError on writing
else:
try:
self.array = numpy.asarray(array, dtype=self.dx_types[type])
except KeyError:
raise ValueError(("DX type {0} cannot be converted to an "
"appropriate numpy dtype. Available "
"types are: {1}".format(type,
list(self.dx_types.values()))))
self.type = type
self.typequote = typequote
def write(self, stream):
"""Write the *class array* section.
Parameters
----------
stream : stream
Raises
------
ValueError
If the `dxtype` is not a valid type, :exc:`ValueError` is
raised.
"""
if self.type not in self.dx_types:
raise ValueError(("DX type {} is not supported in the DX format. \n"
"Supported valus are: {}\n"
"Use the type=<type> keyword argument.").format(
self.type, list(self.dx_types.keys())))
typelabel = (self.typequote+self.type+self.typequote)
super(array, self).write(stream, 'type {0} rank 0 items {1} data follows'.format(
typelabel, self.array.size))
# grid data, serialized as a C array (z fastest varying)
# (flat iterator is equivalent to: for x: for y: for z: grid[x,y,z])
# VMD's DX reader requires exactly 3 values per line
fmt_string = "{:d}"
if (self.array.dtype.kind == 'f' or self.array.dtype.kind == 'c'):
precision = numpy.finfo(self.array.dtype).precision
fmt_string = "{:."+"{:d}".format(precision)+"f}"
values_per_line = 3
values = self.array.flat
while 1:
try:
for i in range(values_per_line):
self._write_line(stream, fmt_string.format(next(values)) + "\t")
self._write_line(stream, '\n')
except StopIteration:
self._write_line(stream, '\n')
break
self._write_line(stream, 'attribute "dep" string "positions"\n')
|
class array(DXclass):
'''OpenDX array class.
See `Array Objects`_ for details.
.. _Array Objects:
https://web.archive.org/web/20080808140524/http://opendx.sdsc.edu/docs/html/pages/usrgu068.htm#Header_440
'''
def __init__(self, classid, array=None, type=None, typequote='"',
**kwargs):
'''
Parameters
----------
classid : int
array : array_like
type : str (optional)
Set the DX type in the output file and cast `array` to
the closest numpy dtype. `type` must be one of the
allowed types in DX files as defined under `Array
Objects`_. The default ``None`` tries to set the type
from the :class:`numpy.dtype` of `array`.
.. versionadded:: 0.4.0
Raises
------
ValueError
if `array` is not provided; or if `type` is not of the correct
DX type
'''
pass
def write(self, stream):
'''Write the *class array* section.
Parameters
----------
stream : stream
Raises
------
ValueError
If the `dxtype` is not a valid type, :exc:`ValueError` is
raised.
'''
pass
| 3 | 3 | 45 | 3 | 26 | 17 | 6 | 0.73 | 1 | 7 | 0 | 0 | 2 | 6 | 2 | 8 | 136 | 10 | 78 | 18 | 74 | 57 | 42 | 17 | 39 | 6 | 2 | 3 | 11 |
147,671 |
MDAnalysis/GridDataFormats
|
gridData/OpenDX.py
|
gridData.OpenDX.Token
|
class Token:
# token categories (values of dx_regex must match up with these categories)
category = {'COMMENT': ['COMMENT'],
'WORD': ['WORD'],
'STRING': ['QUOTEDSTRING','BARESTRING','STRING'],
'WHITESPACE': ['WHITESPACE'],
'INTEGER': ['INTEGER'],
'REAL': ['REAL'],
'NUMBER': ['INTEGER','REAL']}
# cast functions
cast = {'COMMENT': lambda s:re.sub(r'#\s*','',s),
'WORD': str,
'STRING': str, 'QUOTEDSTRING': str, 'BARESTRING': str,
'WHITESPACE': None,
'NUMBER': float, 'INTEGER': int, 'REAL': float}
def __init__(self,code,text):
self.code = code # store raw code
self.text = text
def equals(self,v):
return self.text == v
def iscode(self,code):
return self.code in self.category[code] # use many -> 1 mappings
def value(self,ascode=None):
"""Return text cast to the correct type or the selected type"""
if ascode is None:
ascode = self.code
return self.cast[ascode](self.text)
def __repr__(self):
return '<token '+str(self.code)+','+str(self.value())+'>'
|
class Token:
def __init__(self,code,text):
pass
def equals(self,v):
pass
def iscode(self,code):
pass
def value(self,ascode=None):
'''Return text cast to the correct type or the selected type'''
pass
def __repr__(self):
pass
| 6 | 1 | 3 | 0 | 3 | 1 | 1 | 0.23 | 0 | 1 | 0 | 0 | 5 | 2 | 5 | 5 | 30 | 1 | 26 | 10 | 20 | 6 | 16 | 10 | 10 | 2 | 0 | 1 | 6 |
147,672 |
MDAnalysis/GridDataFormats
|
gridData/OpenDX.py
|
gridData.OpenDX.DXParserNoTokens
|
class DXParserNoTokens(DXParseError):
"""raised when the token buffer is exhausted"""
pass
|
class DXParserNoTokens(DXParseError):
'''raised when the token buffer is exhausted'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 3 | 0 | 2 | 1 | 1 | 1 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
147,673 |
MDAnalysis/GridDataFormats
|
gridData/OpenDX.py
|
gridData.OpenDX.DXParseError
|
class DXParseError(Exception):
"""general exception for parsing errors in DX files"""
pass
|
class DXParseError(Exception):
'''general exception for parsing errors in DX files'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 10 | 3 | 0 | 2 | 1 | 1 | 1 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
147,674 |
MDAnalysis/GridDataFormats
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MDAnalysis_GridDataFormats/gridData/tests/test_grid.py
|
gridData.tests.test_grid.TestGrid
|
class TestGrid(object):
@pytest.fixture
def pklfile(self, data, tmpdir):
g = data['grid']
fn = tmpdir.mkdir('grid').join('grid.dat')
g.save(fn) # always saves as pkl
return fn
def test_init(self, data):
g = Grid(data['griddata'], origin=data['origin'],
delta=1)
assert_array_equal(g.delta, data['delta'])
def test_init_wrong_origin(self, data):
with pytest.raises(TypeError):
Grid(data['griddata'], origin=np.ones(4), delta=data['delta'])
def test_init_wrong_delta(self, data):
with pytest.raises(TypeError):
Grid(data['griddata'], origin=data['origin'], delta=np.ones(4))
def test_empty_Grid(self):
g = Grid()
assert isinstance(g, Grid)
def test_init_missing_delta_ValueError(self, data):
with pytest.raises(ValueError):
Grid(data['griddata'], origin=data['origin'])
def test_init_missing_origin_ValueError(self, data):
with pytest.raises(ValueError):
Grid(data['griddata'], delta=data['delta'])
def test_init_wrong_data_exception(self):
with pytest.raises(IOError):
Grid("__does_not_exist__")
def test_load_wrong_fileformat_ValueError(self):
with pytest.raises(ValueError):
Grid(grid=True, file_format="xxx")
def test_equality(self, data):
assert data['grid'] == data['grid']
assert data['grid'] != 'foo'
g = Grid(data['griddata'], origin=data['origin'] +
1, delta=data['delta'])
assert data['grid'] != g
def test_addition(self, data):
g = data['grid'] + data['grid']
assert_array_equal(g.grid.flat, (2 * data['griddata']).flat)
g = 2 + data['grid']
assert_array_equal(g.grid.flat, (2 + data['griddata']).flat)
g = g + data['grid']
assert_array_equal(g.grid.flat, (2 + (2 * data['griddata'])).flat)
def test_subtraction(self, data):
g = data['grid'] - data['grid']
assert_array_equal(g.grid.flat, np.zeros(27))
g = 2 - data['grid']
assert_array_equal(g.grid.flat, (2 - data['griddata']).flat)
def test_multiplication(self, data):
g = data['grid'] * data['grid']
assert_array_equal(g.grid.flat, (data['griddata'] ** 2).flat)
g = 2 * data['grid']
assert_array_equal(g.grid.flat, (2 * data['griddata']).flat)
def test_division(self, data):
g = data['grid'] / data['grid']
assert_array_equal(g.grid.flat, np.ones(27))
g = 2 / data['grid']
assert_array_equal(g.grid.flat, (2 / data['griddata']).flat)
def test_floordivision(self, data):
g = data['grid'].__floordiv__(data['grid'])
assert_array_equal(g.grid.flat, np.ones(27, dtype=np.int64))
g = 2 // data['grid']
assert_array_equal(g.grid.flat, (2 // data['griddata']).flat)
def test_power(self, data):
g = data['grid'] ** 2
assert_array_equal(g.grid.flat, (data['griddata'] ** 2).flat)
g = 2 ** data['grid']
assert_array_equal(g.grid.flat, (2 ** data['griddata']).flat)
def test_compatibility_type(self, data):
assert data['grid'].check_compatible(data['grid'])
assert data['grid'].check_compatible(3)
g = Grid(data['griddata'], origin=data['origin'] -
1, delta=data['delta'])
assert data['grid'].check_compatible(g)
def test_wrong_compatibile_type(self, data):
with pytest.raises(TypeError):
data['grid'].check_compatible("foo")
def test_non_orthonormal_boxes(self, data):
delta = np.eye(3)
with pytest.raises(NotImplementedError):
Grid(data['griddata'], origin=data['origin'], delta=delta)
def test_centers(self, data):
# this only checks the edges. If you know an alternative
# algorithm that isn't an exact duplicate of the one in
# g.centers to test this please implement it.
g = Grid(data['griddata'], origin=np.ones(3), delta=data['delta'])
centers = np.array(list(g.centers()))
assert_array_equal(centers[0], g.origin)
assert_array_equal(centers[-1] - g.origin,
(np.array(g.grid.shape) - 1) * data['delta'])
def test_resample_factor_failure(self, data):
pytest.importorskip('scipy')
with pytest.raises(ValueError):
g = data['grid'].resample_factor(0)
def test_resample_factor(self, data):
pytest.importorskip('scipy')
g = data['grid'].resample_factor(2)
assert_array_equal(g.delta, np.ones(3) * .5)
# zooming in by a factor of 2. Each subinterval is
# split in half, so 3 gridpoints (2 subintervals)
# becomes 5 gridpoints (4 subintervals)
assert_array_equal(g.grid.shape, np.ones(3) * 5)
# check that the values are identical with the
# correct stride.
assert_array_almost_equal(g.grid[::2, ::2, ::2],
data['grid'].grid)
def test_load_pickle(self, data, tmpdir):
g = data['grid']
fn = str(tmpdir.mkdir('grid').join('grid.pkl'))
g.save(fn)
h = Grid()
h.load(fn)
assert h == g
def test_init_pickle_pathobjects(self, data, tmpdir):
g = data['grid']
fn = tmpdir.mkdir('grid').join('grid.pickle')
g.save(fn)
h = Grid(fn)
assert h == g
@pytest.mark.parametrize("fileformat", ("pkl", "PKL", "pickle", "python"))
def test_load_fileformat(self, data, pklfile, fileformat):
h = Grid(pklfile, file_format="pkl")
assert h == data['grid']
# At the moment, reading the file with the wrong parser does not give
# good error messages.
@pytest.mark.xfail
@pytest.mark.parametrize("fileformat", ("ccp4", "plt", "dx"))
def test_load_wrong_fileformat(self, data, pklfile, fileformat):
with pytest.raises('ValueError'):
Grid(pklfile, file_format=fileformat)
# just check that we can export without stupid failures; detailed
# format checks in separate tests
@pytest.mark.parametrize("fileformat", ("dx", "pkl"))
def test_export(self, data, fileformat, tmpdir):
g = data['grid']
fn = tmpdir.mkdir('grid_export').join("grid.{}".format(fileformat))
g.export(fn) # check that path objects work
h = Grid(fn) # use format autodetection
assert g == h
@pytest.mark.parametrize("fileformat", ("ccp4", "plt"))
def test_export_not_supported(self, data, fileformat, tmpdir):
g = data['grid']
fn = tmpdir.mkdir('grid_export').join("grid.{}".format(fileformat))
with pytest.raises(ValueError):
g.export(fn)
|
class TestGrid(object):
@pytest.fixture
def pklfile(self, data, tmpdir):
pass
def test_init(self, data):
pass
def test_init_wrong_origin(self, data):
pass
def test_init_wrong_delta(self, data):
pass
def test_empty_Grid(self):
pass
def test_init_missing_delta_ValueError(self, data):
pass
def test_init_missing_origin_ValueError(self, data):
pass
def test_init_wrong_data_exception(self):
pass
def test_load_wrong_fileformat_ValueError(self):
pass
def test_equality(self, data):
pass
def test_addition(self, data):
pass
def test_subtraction(self, data):
pass
def test_multiplication(self, data):
pass
def test_division(self, data):
pass
def test_floordivision(self, data):
pass
def test_power(self, data):
pass
def test_compatibility_type(self, data):
pass
def test_wrong_compatibile_type(self, data):
pass
def test_non_orthonormal_boxes(self, data):
pass
def test_centers(self, data):
pass
def test_resample_factor_failure(self, data):
pass
def test_resample_factor_failure(self, data):
pass
def test_load_pickle(self, data, tmpdir):
pass
def test_init_pickle_pathobjects(self, data, tmpdir):
pass
@pytest.mark.parametrize("fileformat", ("pkl", "PKL", "pickle", "python"))
def test_load_fileformat(self, data, pklfile, fileformat):
pass
@pytest.mark.xfail
@pytest.mark.parametrize("fileformat", ("ccp4", "plt", "dx"))
def test_load_wrong_fileformat_ValueError(self):
pass
@pytest.mark.parametrize("fileformat", ("dx", "pkl"))
def test_export(self, data, fileformat, tmpdir):
pass
@pytest.mark.parametrize("fileformat", ("ccp4", "plt"))
def test_export_not_supported(self, data, fileformat, tmpdir):
pass
| 35 | 0 | 5 | 0 | 5 | 0 | 1 | 0.11 | 1 | 6 | 1 | 0 | 28 | 0 | 28 | 28 | 178 | 33 | 133 | 63 | 98 | 15 | 124 | 58 | 95 | 1 | 1 | 1 | 28 |
147,675 |
MDAnalysis/GridDataFormats
|
gridData/OpenDX.py
|
gridData.OpenDX.DXInitObject
|
class DXInitObject(object):
"""Storage class that holds data to initialize one of the 'real'
classes such as OpenDX.array, OpenDX.gridconnections, ...
All variables are stored in args which will be turned into the
arguments for the DX class.
"""
DXclasses = {'gridpositions':gridpositions,
'gridconnections':gridconnections,
'array':array, 'field':field,
}
def __init__(self,classtype,classid):
self.type = classtype
self.id = classid
self.args = dict()
def initialize(self):
"""Initialize the corresponding DXclass from the data.
class = DXInitObject.initialize()
"""
return self.DXclasses[self.type](self.id,**self.args)
def __getitem__(self,k):
return self.args[k]
def __setitem__(self,k,v):
self.args[k] = v
def __repr__(self):
return '<DXInitObject instance type='+str(self.type)+', id='+str(self.id)+'>'
|
class DXInitObject(object):
'''Storage class that holds data to initialize one of the 'real'
classes such as OpenDX.array, OpenDX.gridconnections, ...
All variables are stored in args which will be turned into the
arguments for the DX class.
'''
def __init__(self,classtype,classid):
pass
def initialize(self):
'''Initialize the corresponding DXclass from the data.
class = DXInitObject.initialize()
'''
pass
def __getitem__(self,k):
pass
def __setitem__(self,k,v):
pass
def __repr__(self):
pass
| 6 | 2 | 3 | 0 | 2 | 1 | 1 | 0.47 | 1 | 2 | 0 | 0 | 5 | 3 | 5 | 5 | 28 | 3 | 17 | 10 | 11 | 8 | 14 | 10 | 8 | 1 | 1 | 0 | 5 |
147,676 |
MDAnalysis/GridDataFormats
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MDAnalysis_GridDataFormats/gridData/tests/test_mrc.py
|
gridData.tests.test_mrc.TestGridMRC
|
class TestGridMRC():
@pytest.fixture(scope="class")
def grid(self):
return Grid(datafiles.CCP4_1JZV)
def test_shape(self, grid, ccp4data):
assert_equal(grid.grid.shape, ccp4data.shape)
def test_mrc_header(self, grid, ccp4data):
# undocumented MRC header in Grid
assert grid._mrc_header == ccp4data.header
def test_delta(self, grid, ccp4data):
assert_allclose(grid.delta, np.diag(ccp4data.delta))
def test_origin(self, grid, ccp4data):
assert_allclose(grid.origin, ccp4data.origin)
def test_data(self, grid, ccp4data):
assert_allclose(grid.grid, ccp4data.array)
|
class TestGridMRC():
@pytest.fixture(scope="class")
def grid(self):
pass
def test_shape(self, grid, ccp4data):
pass
def test_mrc_header(self, grid, ccp4data):
pass
def test_delta(self, grid, ccp4data):
pass
def test_origin(self, grid, ccp4data):
pass
def test_data(self, grid, ccp4data):
pass
| 8 | 0 | 2 | 0 | 2 | 0 | 1 | 0.07 | 0 | 1 | 1 | 0 | 6 | 0 | 6 | 6 | 20 | 5 | 14 | 8 | 6 | 1 | 13 | 7 | 6 | 1 | 0 | 0 | 6 |
147,677 |
MDAnalysis/GridDataFormats
|
gridData/gOpenMol.py
|
gridData.gOpenMol.Record
|
class Record(object):
def __init__(self, key, bintype, values=None):
self.key = key
self.bintype = bintype
self.values = values # dict(value='comment', ...)
def is_legal(self, value):
if self.values is None:
return True
return value in self.values
def is_legal_dict(self, d):
return self.is_legal(d[self.key])
def __repr__(self):
return "Record(%(key)r,%(bintype)r,...)" % vars(self)
|
class Record(object):
def __init__(self, key, bintype, values=None):
pass
def is_legal(self, value):
pass
def is_legal_dict(self, d):
pass
def __repr__(self):
pass
| 5 | 0 | 3 | 0 | 3 | 0 | 1 | 0.08 | 1 | 0 | 0 | 0 | 4 | 3 | 4 | 4 | 13 | 0 | 13 | 8 | 8 | 1 | 13 | 8 | 8 | 2 | 1 | 1 | 5 |
147,678 |
MFreidank/ARSpy
|
MFreidank_ARSpy/arspy/hull.py
|
arspy.hull.HullNode
|
class HullNode(object):
def __init__(self, m, b, left, right, pr=0.0):
self.m, self.b = m, b
self.left, self.right = left, right
self.pr = pr
def __eq__(self, other):
from math import isclose
def close(a, b):
if a is b is None:
return True
if (a is None and b is not None) or (b is None and a is not None):
return False
return isclose(a, b, abs_tol=1e-02)
return all((
close(self.m, other.m), close(self.left, other.left),
close(self.right, other.right),
close(self.pr, other.pr)
))
def __repr__(self):
return "HullNode({m}, {b}, {left}, {right}, {pr})".format(
m=self.m, b=self.b, left=self.left, right=self.right, pr=self.pr
)
def __hash__(self):
return hash(str(self))
|
class HullNode(object):
def __init__(self, m, b, left, right, pr=0.0):
pass
def __eq__(self, other):
pass
def close(a, b):
pass
def __repr__(self):
pass
def __hash__(self):
pass
| 6 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 4 | 5 | 4 | 4 | 29 | 5 | 24 | 10 | 17 | 0 | 18 | 10 | 11 | 3 | 1 | 1 | 7 |
147,679 |
MIR-MU/ntcir-math-density
|
MIR-MU_ntcir-math-density/ntcir_math_density/__main__.py
|
ntcir_math_density.__main__.LabelledPath
|
class LabelledPath(object):
"""This class represents a path labelled with a unique single-letter label.
Parameters
----------
label : str
A single-letter label.
path : Path
The labelled-path.
Attributes
----------
labels : dict of (str, Path)
A mapping between labels, and paths.
label : str
A single-letter label.
path : Path
The labelled-path.
"""
labels = dict()
def __init__(self, label, path):
assert isinstance(label, str) and len(label) == 1
assert label not in LabelledPath.labels
assert isinstance(path, Path)
self.label = label
self.path = path
LabelledPath.labels[self.label] = self.path
|
class LabelledPath(object):
'''This class represents a path labelled with a unique single-letter label.
Parameters
----------
label : str
A single-letter label.
path : Path
The labelled-path.
Attributes
----------
labels : dict of (str, Path)
A mapping between labels, and paths.
label : str
A single-letter label.
path : Path
The labelled-path.
'''
def __init__(self, label, path):
pass
| 2 | 1 | 7 | 0 | 7 | 0 | 1 | 1.78 | 1 | 2 | 0 | 0 | 1 | 2 | 1 | 1 | 28 | 3 | 9 | 5 | 7 | 16 | 9 | 5 | 7 | 1 | 1 | 0 | 1 |
147,680 |
MIT-LCP/wfdb-python
|
tests/test_record.py
|
tests.test_record.TestRecord
|
class TestRecord(unittest.TestCase):
"""
Test read and write of single segment WFDB records, including
PhysioNet streaming.
Target files created using the original WFDB Software Package
version 10.5.24
"""
wrsamp_params = [
"record_name",
"fs",
"units",
"sig_name",
"p_signal",
"d_signal",
"e_p_signal",
"e_d_signal",
"samps_per_frame",
"fmt",
"adc_gain",
"baseline",
"comments",
"base_time",
"base_date",
"base_datetime",
]
# ----------------------- 1. Basic Tests -----------------------#
def test_1a(self):
"""
Format 16, entire signal, digital.
Target file created with:
rdsamp -r sample-data/test01_00s | cut -f 2- > record-1a
"""
record = wfdb.rdrecord(
"sample-data/test01_00s", physical=False, return_res=16
)
sig = record.d_signal
sig_target = np.genfromtxt("tests/target-output/record-1a")
# Compare data streaming from Physionet
record_pn = wfdb.rdrecord(
"test01_00s", pn_dir="macecgdb", physical=False, return_res=16
)
# Test file writing
record_2 = wfdb.rdrecord(
"sample-data/test01_00s", physical=False, return_res=16
)
record_2.sig_name = ["ECG_1", "ECG_2", "ECG_3", "ECG_4"]
record_2.wrsamp(write_dir=self.temp_path)
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "test01_00s"),
physical=False,
return_res=16,
)
assert np.array_equal(sig, sig_target)
assert record.__eq__(record_pn)
assert record_2.__eq__(record_write)
def test_1b(self):
"""
Format 16, byte offset, selected duration, selected channels,
physical.
Target file created with:
rdsamp -r sample-data/a103l -f 50 -t 160 -s 2 0 -P | cut -f 2- > record-1b
"""
sig, fields = wfdb.rdsamp(
"sample-data/a103l", sampfrom=12500, sampto=40000, channels=[2, 0]
)
sig_round = np.round(sig, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-1b")
# Compare data streaming from Physionet
sig_pn, fields_pn = wfdb.rdsamp(
"a103l",
pn_dir="challenge-2015/training",
sampfrom=12500,
sampto=40000,
channels=[2, 0],
)
# Option of selecting channels by name
sig_named, fields_named = wfdb.rdsamp(
"sample-data/a103l",
sampfrom=12500,
sampto=40000,
channel_names=["PLETH", "II"],
)
assert np.array_equal(sig_round, sig_target)
assert np.array_equal(sig, sig_pn) and fields == fields_pn
assert np.array_equal(sig, sig_named) and fields == fields_named
def test_1c(self):
"""
Format 16, byte offset, selected duration, selected channels,
digital, expanded format.
Target file created with:
rdsamp -r sample-data/a103l -f 80 -s 0 1 | cut -f 2- > record-1c
"""
record = wfdb.rdrecord(
"sample-data/a103l",
sampfrom=20000,
channels=[0, 1],
physical=False,
smooth_frames=False,
)
# convert expanded to uniform array
sig = np.zeros((record.sig_len, record.n_sig))
for i in range(record.n_sig):
sig[:, i] = record.e_d_signal[i]
sig_target = np.genfromtxt("tests/target-output/record-1c")
# Compare data streaming from Physionet
record_pn = wfdb.rdrecord(
"a103l",
pn_dir="challenge-2015/training",
sampfrom=20000,
channels=[0, 1],
physical=False,
smooth_frames=False,
)
# Test file writing
record.wrsamp(write_dir=self.temp_path, expanded=True)
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "a103l"),
physical=False,
smooth_frames=False,
)
assert np.array_equal(sig, sig_target)
assert record.__eq__(record_pn)
assert record.__eq__(record_write)
def test_1d(self):
"""
Format 80, selected duration, selected channels, physical
Target file created with:
rdsamp -r sample-data/3000003_0003 -f 1 -t 8 -s 1 -P | cut -f 2- > record-1d
"""
sig, fields = wfdb.rdsamp(
"sample-data/3000003_0003", sampfrom=125, sampto=1000, channels=[1]
)
sig_round = np.round(sig, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-1d")
sig_target = sig_target.reshape(len(sig_target), 1)
# Compare data streaming from Physionet
sig_pn, fields_pn = wfdb.rdsamp(
"3000003_0003",
pn_dir="mimic3wdb/30/3000003/",
sampfrom=125,
sampto=1000,
channels=[1],
)
assert np.array_equal(sig_round, sig_target)
assert np.array_equal(sig, sig_pn) and fields == fields_pn
def test_1e(self):
"""
Format 24, entire signal, digital.
Target file created with:
rdsamp -r sample-data/n8_evoked_raw_95_F1_R9 | cut -f 2- |
gzip -9 -n > record-1e.gz
"""
record = wfdb.rdrecord(
"sample-data/n8_evoked_raw_95_F1_R9", physical=False
)
sig = record.d_signal
sig_target = np.genfromtxt("tests/target-output/record-1e.gz")
sig_target[sig_target == -32768] = -(2**23)
# Compare data streaming from Physionet
record_pn = wfdb.rdrecord(
"n8_evoked_raw_95_F1_R9", physical=False, pn_dir="earndb/raw/N8"
)
# Test file writing
record_2 = wfdb.rdrecord(
"sample-data/n8_evoked_raw_95_F1_R9", physical=False
)
record_2.wrsamp(write_dir=self.temp_path)
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "n8_evoked_raw_95_F1_R9"),
physical=False,
)
assert np.array_equal(sig, sig_target)
assert record.__eq__(record_pn)
assert record_2.__eq__(record_write)
def test_1f(self):
"""
All binary formats, multiple signal files in one record.
Target file created with:
rdsamp -r sample-data/binformats | cut -f 2- |
gzip -9 -n > record-1f.gz
"""
record = wfdb.rdrecord("sample-data/binformats", physical=False)
sig_target = np.genfromtxt("tests/target-output/record-1f.gz")
for n, name in enumerate(record.sig_name):
np.testing.assert_array_equal(
record.d_signal[:, n], sig_target[:, n], "Mismatch in %s" % name
)
for sampfrom in range(0, 3):
for sampto in range(record.sig_len - 3, record.sig_len):
record_2 = wfdb.rdrecord(
"sample-data/binformats",
physical=False,
sampfrom=sampfrom,
sampto=sampto,
)
for n, name in enumerate(record.sig_name):
if record.fmt[n] != "8":
np.testing.assert_array_equal(
record_2.d_signal[:, n],
sig_target[sampfrom:sampto, n],
"Mismatch in %s" % name,
)
# Test writing all supported formats. (Currently not all signal
# formats are supported for output; keep this list in sync with
# 'wr_dat_file' in wfdb/io/_signal.py.)
OUTPUT_FMTS = ["80", "212", "16", "24", "32"]
channels = []
for i, fmt in enumerate(record.fmt):
if fmt in OUTPUT_FMTS:
channels.append(i)
partial_record = wfdb.rdrecord(
"sample-data/binformats",
physical=False,
channels=channels,
)
partial_record.wrsamp(write_dir=self.temp_path)
converted_record = wfdb.rdrecord(
os.path.join(self.temp_path, "binformats"),
physical=False,
)
assert partial_record == converted_record
def test_read_write_flac(self):
"""
All FLAC formats, multiple signal files in one record.
Target file created with:
rdsamp -r sample-data/flacformats | cut -f 2- |
gzip -9 -n > record-flac.gz
"""
record = wfdb.rdrecord("sample-data/flacformats", physical=False)
sig_target = np.genfromtxt("tests/target-output/record-flac.gz")
for n, name in enumerate(record.sig_name):
np.testing.assert_array_equal(
record.d_signal[:, n], sig_target[:, n], f"Mismatch in {name}"
)
for sampfrom in range(0, 3):
for sampto in range(record.sig_len - 3, record.sig_len):
record_2 = wfdb.rdrecord(
"sample-data/flacformats",
physical=False,
sampfrom=sampfrom,
sampto=sampto,
)
for n, name in enumerate(record.sig_name):
np.testing.assert_array_equal(
record_2.d_signal[:, n],
sig_target[sampfrom:sampto, n],
f"Mismatch in {name}",
)
# Test file writing
record.wrsamp(write_dir=self.temp_path)
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "flacformats"),
physical=False,
)
assert record == record_write
def test_read_write_flac_multifrequency(self):
"""
Format 516 with multiple signal files and variable samples per frame.
"""
# Check that we can read a record and write it out again
record = wfdb.rdrecord(
"sample-data/mixedsignals",
physical=False,
smooth_frames=False,
)
record.wrsamp(write_dir=self.temp_path, expanded=True)
# Check that result matches the original
record = wfdb.rdrecord("sample-data/mixedsignals", smooth_frames=False)
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "mixedsignals"),
smooth_frames=False,
)
assert record == record_write
def test_unique_samps_per_frame_e_p_signal(self):
"""
Test writing an e_p_signal with wfdb.io.wrsamp where the signals have different samples per frame. All other
parameters which overlap between a Record object and wfdb.io.wrsamp are also checked.
"""
# Read in a record with different samples per frame
record = wfdb.rdrecord(
"sample-data/mixedsignals",
smooth_frames=False,
)
# Write the signals
wfdb.io.wrsamp(
"mixedsignals",
fs=record.fs,
units=record.units,
sig_name=record.sig_name,
base_date=record.base_date,
base_time=record.base_time,
comments=record.comments,
p_signal=record.p_signal,
d_signal=record.d_signal,
e_p_signal=record.e_p_signal,
e_d_signal=record.e_d_signal,
samps_per_frame=record.samps_per_frame,
baseline=record.baseline,
adc_gain=record.adc_gain,
fmt=record.fmt,
write_dir=self.temp_path,
)
# Check that the written record matches the original
# Read in the original and written records
record = wfdb.rdrecord("sample-data/mixedsignals", smooth_frames=False)
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "mixedsignals"),
smooth_frames=False,
)
# Check that the signals match
for n, name in enumerate(record.sig_name):
np.testing.assert_array_equal(
record.e_p_signal[n],
record_write.e_p_signal[n],
f"Mismatch in {name}",
)
# Filter out the signal
record_filtered = {
k: getattr(record, k)
for k in self.wrsamp_params
if not (
isinstance(getattr(record, k), np.ndarray)
or (
isinstance(getattr(record, k), list)
and all(
isinstance(item, np.ndarray)
for item in getattr(record, k)
)
)
)
}
record_write_filtered = {
k: getattr(record_write, k)
for k in self.wrsamp_params
if not (
isinstance(getattr(record_write, k), np.ndarray)
or (
isinstance(getattr(record_write, k), list)
and all(
isinstance(item, np.ndarray)
for item in getattr(record_write, k)
)
)
)
}
# Check that the arguments beyond the signals also match
assert record_filtered == record_write_filtered
def test_unique_samps_per_frame_e_d_signal(self):
"""
Test writing an e_d_signal with wfdb.io.wrsamp where the signals have different samples per frame. All other
parameters which overlap between a Record object and wfdb.io.wrsamp are also checked.
"""
# Read in a record with different samples per frame
record = wfdb.rdrecord(
"sample-data/mixedsignals",
physical=False,
smooth_frames=False,
)
# Write the signals
wfdb.io.wrsamp(
"mixedsignals",
fs=record.fs,
units=record.units,
sig_name=record.sig_name,
base_date=record.base_date,
base_time=record.base_time,
comments=record.comments,
p_signal=record.p_signal,
d_signal=record.d_signal,
e_p_signal=record.e_p_signal,
e_d_signal=record.e_d_signal,
samps_per_frame=record.samps_per_frame,
baseline=record.baseline,
adc_gain=record.adc_gain,
fmt=record.fmt,
write_dir=self.temp_path,
)
# Check that the written record matches the original
# Read in the original and written records
record = wfdb.rdrecord(
"sample-data/mixedsignals", physical=False, smooth_frames=False
)
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "mixedsignals"),
physical=False,
smooth_frames=False,
)
# Check that the signals match
for n, name in enumerate(record.sig_name):
np.testing.assert_array_equal(
record.e_d_signal[n],
record_write.e_d_signal[n],
f"Mismatch in {name}",
)
# Filter out the signal
record_filtered = {
k: getattr(record, k)
for k in self.wrsamp_params
if not (
isinstance(getattr(record, k), np.ndarray)
or (
isinstance(getattr(record, k), list)
and all(
isinstance(item, np.ndarray)
for item in getattr(record, k)
)
)
)
}
record_write_filtered = {
k: getattr(record_write, k)
for k in self.wrsamp_params
if not (
isinstance(getattr(record_write, k), np.ndarray)
or (
isinstance(getattr(record_write, k), list)
and all(
isinstance(item, np.ndarray)
for item in getattr(record_write, k)
)
)
)
}
# Check that the arguments beyond the signals also match
assert record_filtered == record_write_filtered
def test_read_write_flac_many_channels(self):
"""
Check we can read and write to format 516 with more than 8 channels.
"""
# Read in a signal with 12 channels in format 16
record = wfdb.rdrecord("sample-data/s0010_re", physical=False)
# Test that we can write out the signal in format 516
wfdb.wrsamp(
record_name="s0010_re_fmt516",
fs=record.fs,
units=record.units,
sig_name=record.sig_name,
fmt=["516"] * record.n_sig,
d_signal=record.d_signal,
adc_gain=record.adc_gain,
baseline=record.baseline,
write_dir=self.temp_path,
)
# Check that signal matches the original
record_fmt516 = wfdb.rdrecord(
os.path.join(self.temp_path, "s0010_re_fmt516"),
physical=False,
)
assert (record.d_signal == record_fmt516.d_signal).all()
def test_read_flac_longduration(self):
"""
Three signals multiplexed in a FLAC file, over 2**24 samples.
Input file created with:
yes 25 50 75 | head -5600000 |
wrsamp -O 508 -o flac_3_constant 0 1 2
Note that the total number of samples (across the three
channels) exceeds 2**24. There is a bug in libsndfile that
causes it to break if we try to read more than 2**24 total
samples at a time, when the number of channels is not a power
of two.
"""
record = wfdb.rdrecord("sample-data/flac_3_constant")
sig_target = np.repeat(
np.array([[0.125, 0.25, 0.375]], dtype="float64"),
5600000,
axis=0,
)
np.testing.assert_array_equal(record.p_signal, sig_target)
# ------------------ 2. Special format records ------------------ #
def test_2a(self):
"""
Format 212, entire signal, physical.
Target file created with:
rdsamp -r sample-data/100 -P | cut -f 2- > record-2a
"""
sig, fields = wfdb.rdsamp("sample-data/100")
sig_round = np.round(sig, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-2a")
# Compare data streaming from Physionet
sig_pn, fields_pn = wfdb.rdsamp("100", pn_dir="mitdb")
# This comment line was manually added and is not present in the
# original PhysioNet record
del fields["comments"][0]
assert np.array_equal(sig_round, sig_target)
assert np.array_equal(sig, sig_pn) and fields == fields_pn
def test_2b(self):
"""
Format 212, selected duration, selected channel, digital.
Target file created with:
rdsamp -r sample-data/100 -f 0.002 -t 30 -s 1 | cut -f 2- > record-2b
"""
record = wfdb.rdrecord(
"sample-data/100",
sampfrom=1,
sampto=10800,
channels=[1],
physical=False,
)
sig = record.d_signal
sig_target = np.genfromtxt("tests/target-output/record-2b")
sig_target = sig_target.reshape(len(sig_target), 1)
# Compare data streaming from Physionet
record_pn = wfdb.rdrecord(
"100",
sampfrom=1,
sampto=10800,
channels=[1],
physical=False,
pn_dir="mitdb",
)
# This comment line was manually added and is not present in the
# original PhysioNet record
del record.comments[0]
# Option of selecting channels by name
record_named = wfdb.rdrecord(
"sample-data/100",
sampfrom=1,
sampto=10800,
channel_names=["V5"],
physical=False,
)
del record_named.comments[0]
# Test file writing
record.wrsamp(write_dir=self.temp_path)
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "100"),
physical=False,
)
assert np.array_equal(sig, sig_target)
assert record.__eq__(record_pn)
assert record.__eq__(record_named)
assert record.__eq__(record_write)
def test_2c(self):
"""
Format 212, entire signal, physical, odd sampled record.
Target file created with:
rdsamp -r sample-data/100_3chan -P | cut -f 2- > record-2c
"""
record = wfdb.rdrecord("sample-data/100_3chan")
sig_round = np.round(record.p_signal, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-2c")
# Test file writing
record.d_signal = record.adc()
record.wrsamp(write_dir=self.temp_path)
record_write = wfdb.rdrecord(os.path.join(self.temp_path, "100_3chan"))
record.d_signal = None
assert np.array_equal(sig_round, sig_target)
assert record.__eq__(record_write)
def test_2d(self):
"""
Format 310, selected duration, digital
Target file created with:
rdsamp -r sample-data/3000003_0003 -f 0 -t 8.21 | cut -f 2- | wrsamp -o 310derive -O 310
rdsamp -r 310derive -f 0.007 | cut -f 2- > record-2d
"""
record = wfdb.rdrecord(
"sample-data/310derive", sampfrom=2, physical=False
)
sig = record.d_signal
sig_target = np.genfromtxt("tests/target-output/record-2d")
assert np.array_equal(sig, sig_target)
def test_2e(self):
"""
Format 311, selected duration, physical.
Target file created with:
rdsamp -r sample-data/3000003_0003 -f 0 -t 8.21 -s 1 | cut -f 2- | wrsamp -o 311derive -O 311
rdsamp -r 311derive -f 0.005 -t 3.91 -P | cut -f 2- > record-2e
"""
sig, fields = wfdb.rdsamp(
"sample-data/311derive", sampfrom=1, sampto=978
)
sig = np.round(sig, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-2e")
sig_target = sig_target.reshape([977, 1])
assert np.array_equal(sig, sig_target)
# --------------------- 3. Multi-dat records --------------------- #
def test_3a(self):
"""
Multi-dat, entire signal, digital
Target file created with:
rdsamp -r sample-data/s0010_re | cut -f 2- > record-3a
"""
record = wfdb.rdrecord("sample-data/s0010_re", physical=False)
sig = record.d_signal
sig_target = np.genfromtxt("tests/target-output/record-3a")
# Compare data streaming from Physionet
record_pn = wfdb.rdrecord(
"s0010_re", physical=False, pn_dir="ptbdb/patient001"
)
# Test file writing
record.wrsamp(write_dir=self.temp_path)
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "s0010_re"),
physical=False,
)
assert np.array_equal(sig, sig_target)
assert record.__eq__(record_pn)
assert record.__eq__(record_write)
def test_3b(self):
"""
Multi-dat, selected duration, selected channels, physical.
Target file created with:
rdsamp -r sample-data/s0010_re -f 5 -t 38 -P -s 13 0 4 8 3 | cut -f 2- > record-3b
"""
sig, fields = wfdb.rdsamp(
"sample-data/s0010_re",
sampfrom=5000,
sampto=38000,
channels=[13, 0, 4, 8, 3],
)
sig_round = np.round(sig, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-3b")
# Compare data streaming from Physionet
sig_pn, fields_pn = wfdb.rdsamp(
"s0010_re",
sampfrom=5000,
pn_dir="ptbdb/patient001",
sampto=38000,
channels=[13, 0, 4, 8, 3],
)
assert np.array_equal(sig_round, sig_target)
assert np.array_equal(sig, sig_pn) and fields == fields_pn
# -------------- 4. Skew and multiple samples/frame -------------- #
def test_4a(self):
"""
Format 16, multi-samples per frame, skew, digital.
Target file created with:
rdsamp -r sample-data/test01_00s_skewframe | cut -f 2- > record-4a
"""
record = wfdb.rdrecord(
"sample-data/test01_00s_skewframe", physical=False
)
sig = record.d_signal
# The WFDB library rdsamp does not return the final N samples for all
# channels due to the skew. The WFDB python rdsamp does return the final
# N samples, filling in NANs for end of skewed channels only.
sig = sig[:-3, :]
sig_target = np.genfromtxt("tests/target-output/record-4a")
# Test file writing. Multiple samples per frame and skew.
# Have to read all the samples in the record, ignoring skew
record_no_skew = wfdb.rdrecord(
"sample-data/test01_00s_skewframe",
physical=False,
smooth_frames=False,
ignore_skew=True,
)
record_no_skew.wrsamp(write_dir=self.temp_path, expanded=True)
# Read the written record
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "test01_00s_skewframe"),
physical=False,
)
assert np.array_equal(sig, sig_target)
assert record.__eq__(record_write)
def test_4b(self):
"""
Format 12, multi-samples per frame, skew, entire signal, digital.
Target file created with:
rdsamp -r sample-data/03700181 | cut -f 2- > record-4b
"""
record = wfdb.rdrecord("sample-data/03700181", physical=False)
sig = record.d_signal
# The WFDB library rdsamp does not return the final N samples for all
# channels due to the skew.
sig = sig[:-4, :]
# The WFDB python rdsamp does return the final N samples, filling in
# NANs for end of skewed channels only.
sig_target = np.genfromtxt("tests/target-output/record-4b")
# Compare data streaming from Physionet
record_pn = wfdb.rdrecord(
"03700181", physical=False, pn_dir="mimicdb/037"
)
# Test file writing. Multiple samples per frame and skew.
# Have to read all the samples in the record, ignoring skew
record_no_skew = wfdb.rdrecord(
"sample-data/03700181",
physical=False,
smooth_frames=False,
ignore_skew=True,
)
record_no_skew.wrsamp(write_dir=self.temp_path, expanded=True)
# Read the written record
record_write = wfdb.rdrecord(
os.path.join(self.temp_path, "03700181"),
physical=False,
)
assert np.array_equal(sig, sig_target)
assert record.__eq__(record_pn)
assert record.__eq__(record_write)
def test_4c(self):
"""
Format 12, multi-samples per frame, skew, selected suration,
selected channels, physical.
Target file created with:
rdsamp -r sample-data/03700181 -f 8 -t 128 -s 0 2 -P | cut -f 2- > record-4c
"""
sig, fields = wfdb.rdsamp(
"sample-data/03700181", channels=[0, 2], sampfrom=1000, sampto=16000
)
sig_round = np.round(sig, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-4c")
# Compare data streaming from Physionet
sig_pn, fields_pn = wfdb.rdsamp(
"03700181",
pn_dir="mimicdb/037",
channels=[0, 2],
sampfrom=1000,
sampto=16000,
)
# Test file writing. Multiple samples per frame and skew.
# Have to read all the samples in the record, ignoring skew
record_no_skew = wfdb.rdrecord(
"sample-data/03700181",
physical=False,
smooth_frames=False,
ignore_skew=True,
)
record_no_skew.wrsamp(write_dir=self.temp_path, expanded=True)
# Read the written record
writesig, writefields = wfdb.rdsamp(
os.path.join(self.temp_path, "03700181"),
channels=[0, 2],
sampfrom=1000,
sampto=16000,
)
assert np.array_equal(sig_round, sig_target)
assert np.array_equal(sig, sig_pn) and fields == fields_pn
assert np.array_equal(sig, writesig) and fields == writefields
def test_4d(self):
"""
Format 16, multi-samples per frame, skew, read expanded signals
Target file created with:
rdsamp -r sample-data/test01_00s_skewframe -P -H | cut -f 2- > record-4d
"""
record = wfdb.rdrecord(
"sample-data/test01_00s_skewframe", smooth_frames=False
)
# Upsample the channels with lower samples/frame
expandsig = np.zeros((7994, 3))
expandsig[:, 0] = np.repeat(record.e_p_signal[0][:-3], 2)
expandsig[:, 1] = record.e_p_signal[1][:-6]
expandsig[:, 2] = np.repeat(record.e_p_signal[2][:-3], 2)
sig_round = np.round(expandsig, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-4d")
assert np.array_equal(sig_round, sig_target)
def test_write_smoothed(self):
"""
Test writing a record after reading with smooth_frames
"""
record = wfdb.rdrecord(
"sample-data/drive02",
physical=False,
smooth_frames=True,
)
record.wrsamp(write_dir=self.temp_path)
record2 = wfdb.rdrecord(
os.path.join(self.temp_path, "drive02"),
physical=False,
)
np.testing.assert_array_equal(record.d_signal, record2.d_signal)
def test_to_dataframe(self):
record = wfdb.rdrecord("sample-data/test01_00s")
df = record.to_dataframe()
self.assertEqual(record.sig_name, list(df.columns))
self.assertEqual(len(df), record.sig_len)
self.assertEqual(df.index[0], pd.Timedelta(0))
self.assertEqual(
df.index[-1],
pd.Timedelta(seconds=1 / record.fs * (record.sig_len - 1)),
)
assert np.array_equal(record.p_signal, df.values)
def test_header_with_non_utf8(self):
"""
Ignores non-utf8 characters in the header part.
"""
record = wfdb.rdrecord("sample-data/test_generator_2")
sig_units_target = [
"uV",
"uV",
"uV",
"uV",
"uV",
"uV",
"uV",
"uV",
"mV",
"mV",
"uV",
"mV",
]
assert record.units.__eq__(sig_units_target)
@classmethod
def setUpClass(cls):
cls.temp_directory = tempfile.TemporaryDirectory()
cls.temp_path = cls.temp_directory.name
@classmethod
def tearDownClass(cls):
cls.temp_directory.cleanup()
|
class TestRecord(unittest.TestCase):
'''
Test read and write of single segment WFDB records, including
PhysioNet streaming.
Target files created using the original WFDB Software Package
version 10.5.24
'''
def test_1a(self):
'''
Format 16, entire signal, digital.
Target file created with:
rdsamp -r sample-data/test01_00s | cut -f 2- > record-1a
'''
pass
def test_1b(self):
'''
Format 16, byte offset, selected duration, selected channels,
physical.
Target file created with:
rdsamp -r sample-data/a103l -f 50 -t 160 -s 2 0 -P | cut -f 2- > record-1b
'''
pass
def test_1c(self):
'''
Format 16, byte offset, selected duration, selected channels,
digital, expanded format.
Target file created with:
rdsamp -r sample-data/a103l -f 80 -s 0 1 | cut -f 2- > record-1c
'''
pass
def test_1d(self):
'''
Format 80, selected duration, selected channels, physical
Target file created with:
rdsamp -r sample-data/3000003_0003 -f 1 -t 8 -s 1 -P | cut -f 2- > record-1d
'''
pass
def test_1e(self):
'''
Format 24, entire signal, digital.
Target file created with:
rdsamp -r sample-data/n8_evoked_raw_95_F1_R9 | cut -f 2- |
gzip -9 -n > record-1e.gz
'''
pass
def test_1f(self):
'''
All binary formats, multiple signal files in one record.
Target file created with:
rdsamp -r sample-data/binformats | cut -f 2- |
gzip -9 -n > record-1f.gz
'''
pass
def test_read_write_flac(self):
'''
All FLAC formats, multiple signal files in one record.
Target file created with:
rdsamp -r sample-data/flacformats | cut -f 2- |
gzip -9 -n > record-flac.gz
'''
pass
def test_read_write_flac_multifrequency(self):
'''
Format 516 with multiple signal files and variable samples per frame.
'''
pass
def test_unique_samps_per_frame_e_p_signal(self):
'''
Test writing an e_p_signal with wfdb.io.wrsamp where the signals have different samples per frame. All other
parameters which overlap between a Record object and wfdb.io.wrsamp are also checked.
'''
pass
def test_unique_samps_per_frame_e_d_signal(self):
'''
Test writing an e_d_signal with wfdb.io.wrsamp where the signals have different samples per frame. All other
parameters which overlap between a Record object and wfdb.io.wrsamp are also checked.
'''
pass
def test_read_write_flac_many_channels(self):
'''
Check we can read and write to format 516 with more than 8 channels.
'''
pass
def test_read_flac_longduration(self):
'''
Three signals multiplexed in a FLAC file, over 2**24 samples.
Input file created with:
yes 25 50 75 | head -5600000 |
wrsamp -O 508 -o flac_3_constant 0 1 2
Note that the total number of samples (across the three
channels) exceeds 2**24. There is a bug in libsndfile that
causes it to break if we try to read more than 2**24 total
samples at a time, when the number of channels is not a power
of two.
'''
pass
def test_2a(self):
'''
Format 212, entire signal, physical.
Target file created with:
rdsamp -r sample-data/100 -P | cut -f 2- > record-2a
'''
pass
def test_2b(self):
'''
Format 212, selected duration, selected channel, digital.
Target file created with:
rdsamp -r sample-data/100 -f 0.002 -t 30 -s 1 | cut -f 2- > record-2b
'''
pass
def test_2c(self):
'''
Format 212, entire signal, physical, odd sampled record.
Target file created with:
rdsamp -r sample-data/100_3chan -P | cut -f 2- > record-2c
'''
pass
def test_2d(self):
'''
Format 310, selected duration, digital
Target file created with:
rdsamp -r sample-data/3000003_0003 -f 0 -t 8.21 | cut -f 2- | wrsamp -o 310derive -O 310
rdsamp -r 310derive -f 0.007 | cut -f 2- > record-2d
'''
pass
def test_2e(self):
'''
Format 311, selected duration, physical.
Target file created with:
rdsamp -r sample-data/3000003_0003 -f 0 -t 8.21 -s 1 | cut -f 2- | wrsamp -o 311derive -O 311
rdsamp -r 311derive -f 0.005 -t 3.91 -P | cut -f 2- > record-2e
'''
pass
def test_3a(self):
'''
Multi-dat, entire signal, digital
Target file created with:
rdsamp -r sample-data/s0010_re | cut -f 2- > record-3a
'''
pass
def test_3b(self):
'''
Multi-dat, selected duration, selected channels, physical.
Target file created with:
rdsamp -r sample-data/s0010_re -f 5 -t 38 -P -s 13 0 4 8 3 | cut -f 2- > record-3b
'''
pass
def test_4a(self):
'''
Format 16, multi-samples per frame, skew, digital.
Target file created with:
rdsamp -r sample-data/test01_00s_skewframe | cut -f 2- > record-4a
'''
pass
def test_4b(self):
'''
Format 12, multi-samples per frame, skew, entire signal, digital.
Target file created with:
rdsamp -r sample-data/03700181 | cut -f 2- > record-4b
'''
pass
def test_4c(self):
'''
Format 12, multi-samples per frame, skew, selected suration,
selected channels, physical.
Target file created with:
rdsamp -r sample-data/03700181 -f 8 -t 128 -s 0 2 -P | cut -f 2- > record-4c
'''
pass
def test_4d(self):
'''
Format 16, multi-samples per frame, skew, read expanded signals
Target file created with:
rdsamp -r sample-data/test01_00s_skewframe -P -H | cut -f 2- > record-4d
'''
pass
def test_write_smoothed(self):
'''
Test writing a record after reading with smooth_frames
'''
pass
def test_to_dataframe(self):
pass
def test_header_with_non_utf8(self):
'''
Ignores non-utf8 characters in the header part.
'''
pass
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
| 31 | 26 | 30 | 3 | 21 | 7 | 2 | 0.34 | 1 | 4 | 0 | 0 | 26 | 1 | 28 | 100 | 914 | 116 | 595 | 149 | 564 | 203 | 248 | 146 | 219 | 8 | 2 | 4 | 42 |
147,681 |
MIT-LCP/wfdb-python
|
tests/test_record.py
|
tests.test_record.TestMultiRecord
|
class TestMultiRecord(unittest.TestCase):
"""
Test read and write of multi segment WFDB records, including
PhysioNet streaming.
Target files created using the original WFDB Software Package
version 10.5.24
"""
def test_multi_fixed_a(self):
"""
Multi-segment, fixed layout, read entire signal.
Target file created with:
rdsamp -r sample-data/multi-segment/fixed1/v102s -P | cut -f 2- > record-multi-fixed-a
"""
record = wfdb.rdrecord("sample-data/multi-segment/fixed1/v102s")
sig_round = np.round(record.p_signal, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-multi-fixed-a")
np.testing.assert_equal(sig_round, sig_target)
def test_multi_fixed_b(self):
"""
Multi-segment, fixed layout, selected duration, samples read
from one segment.
Target file created with:
rdsamp -r sample-data/multi-segment/fixed1/v102s -t s75000 -P | cut -f 2- > record-multi-fixed-b
"""
record = wfdb.rdrecord(
"sample-data/multi-segment/fixed1/v102s", sampto=75000
)
sig_round = np.round(record.p_signal, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-multi-fixed-b")
np.testing.assert_equal(sig_round, sig_target)
def test_multi_fixed_c(self):
"""
Multi-segment, fixed layout, selected duration and channels,
samples read from multiple segments
Target file created with:
rdsamp -r sample-data/multi-segment/fixed1/v102s -f s70000 -t s80000 -s 1 0 3 -P | cut -f 2- > record-multi-fixed-c
"""
record = wfdb.rdrecord(
"sample-data/multi-segment/fixed1/v102s",
sampfrom=70000,
sampto=80000,
channels=[1, 0, 3],
)
sig_round = np.round(record.p_signal, decimals=8)
sig_target = np.genfromtxt("tests/target-output/record-multi-fixed-c")
# Option of selecting channels by name
record_named = wfdb.rdrecord(
"sample-data/multi-segment/fixed1/v102s",
sampfrom=70000,
sampto=80000,
channel_names=["V", "II", "RESP"],
)
np.testing.assert_equal(sig_round, sig_target)
assert record.__eq__(record_named)
def test_multi_fixed_d(self):
"""
Multi-segment, fixed layout, multi-frequency, selected channels
Target file created with:
rdsamp -r sample-data/multi-segment/041s/ -s 3 2 1 -H |
cut -f 2- | sed s/-32768/-2048/ |
gzip -9 -n > tests/target-output/record-multi-fixed-d.gz
"""
record = wfdb.rdrecord(
"sample-data/multi-segment/041s/041s",
channels=[3, 2, 1],
physical=False,
smooth_frames=False,
)
# Convert expanded to uniform array (high-resolution)
sig = np.zeros((record.sig_len * 4, record.n_sig), dtype=int)
for i, s in enumerate(record.e_d_signal):
sig[:, i] = np.repeat(s, len(sig[:, i]) // len(s))
sig_target = np.genfromtxt(
"tests/target-output/record-multi-fixed-d.gz"
)
record_named = wfdb.rdrecord(
"sample-data/multi-segment/041s/041s",
channel_names=["ABP", "V", "I"],
physical=False,
smooth_frames=False,
)
# Sample values should match the output of rdsamp -H
np.testing.assert_array_equal(sig, sig_target)
# channel_names=[...] should give the same result as channels=[...]
self.assertEqual(record, record_named)
def test_multi_variable_a(self):
"""
Multi-segment, variable layout, selected duration, samples read
from one segment only.
Target file created with:
rdsamp -r sample-data/multi-segment/s00001/s00001-2896-10-10-00-31 -f s14428365 -t s14428375 -P | cut -f 2- > record-multi-variable-a
"""
record = wfdb.rdrecord(
"sample-data/multi-segment/s00001/s00001-2896-10-10-00-31",
sampfrom=14428365,
sampto=14428375,
)
sig_round = np.round(record.p_signal, decimals=8)
sig_target = np.genfromtxt(
"tests/target-output/record-multi-variable-a"
)
np.testing.assert_equal(sig_round, sig_target)
def test_multi_variable_b(self):
"""
Multi-segment, variable layout, selected duration, samples read
from several segments.
Target file created with:
rdsamp -r sample-data/multi-segment/s00001/s00001-2896-10-10-00-31 -f s14428364 -t s14428375 -P | cut -f 2- > record-multi-variable-b
"""
record = wfdb.rdrecord(
"sample-data/multi-segment/s00001/s00001-2896-10-10-00-31",
sampfrom=14428364,
sampto=14428375,
)
sig_round = np.round(record.p_signal, decimals=8)
sig_target = np.genfromtxt(
"tests/target-output/record-multi-variable-b"
)
np.testing.assert_equal(sig_round, sig_target)
def test_multi_variable_c(self):
"""
Multi-segment, variable layout, entire signal, physical, expanded
The reference signal creation cannot be made with rdsamp
directly because the WFDB c package (10.5.24) applies the single
adcgain and baseline values from the layout specification
header, which is undesired in multi-segment signals with
different adcgain/baseline values across segments.
Target file created with:
```
for i in {01..18}
do
rdsamp -r sample-data/multi-segment/s25047/3234460_00$i -P | cut -f 2- >> record-multi-variable-c
done
```
Entire signal has 543240 samples.
- 25740 length empty segment.
- First 16 segments have same 2 channels, length 420000
- Last 2 segments have same 3 channels, length 97500
"""
record = wfdb.rdrecord(
"sample-data/multi-segment/s25047/s25047-2704-05-04-10-44",
smooth_frames=False,
)
# convert expanded to uniform array and round to 8 digits
sig_round = np.zeros((record.sig_len, record.n_sig))
for i in range(record.n_sig):
sig_round[:, i] = np.round(record.e_p_signal[i], decimals=8)
sig_target_a = np.full((25740, 3), np.nan)
sig_target_b = np.concatenate(
(
np.genfromtxt(
"tests/target-output/record-multi-variable-c",
skip_footer=97500,
),
np.full((420000, 1), np.nan),
),
axis=1,
)
sig_target_c = np.genfromtxt(
"tests/target-output/record-multi-variable-c", skip_header=420000
)
sig_target = np.concatenate((sig_target_a, sig_target_b, sig_target_c))
np.testing.assert_equal(sig_round, sig_target)
def test_multi_variable_d(self):
"""
Multi-segment, variable layout, selected duration, selected
channels, digital. There are two channels: PLETH, and II. Their
fmt, adc_gain, and baseline do not change between the segments.
Target file created with:
rdsamp -r sample-data/multi-segment/p000878/p000878-2137-10-26-16-57 -f s3550 -t s7500 -s 0 1 | cut -f 2- | perl -p -e 's/-32768/ -128/g;' > record-multi-variable-d
"""
record = wfdb.rdrecord(
"sample-data/multi-segment/p000878/p000878-2137-10-26-16-57",
sampfrom=3550,
sampto=7500,
channels=[0, 1],
physical=False,
)
sig = record.d_signal
# Compare data streaming from Physionet
record_pn = wfdb.rdrecord(
"p000878-2137-10-26-16-57",
pn_dir="mimic3wdb/matched/p00/p000878/",
sampfrom=3550,
sampto=7500,
channels=[0, 1],
physical=False,
)
sig_target = np.genfromtxt(
"tests/target-output/record-multi-variable-d"
)
# Option of selecting channels by name
record_named = wfdb.rdrecord(
"sample-data/multi-segment/p000878/p000878-2137-10-26-16-57",
sampfrom=3550,
sampto=7500,
physical=False,
channel_names=["PLETH", "II"],
)
np.testing.assert_equal(sig, sig_target)
assert record.__eq__(record_pn)
assert record.__eq__(record_named)
|
class TestMultiRecord(unittest.TestCase):
'''
Test read and write of multi segment WFDB records, including
PhysioNet streaming.
Target files created using the original WFDB Software Package
version 10.5.24
'''
def test_multi_fixed_a(self):
'''
Multi-segment, fixed layout, read entire signal.
Target file created with:
rdsamp -r sample-data/multi-segment/fixed1/v102s -P | cut -f 2- > record-multi-fixed-a
'''
pass
def test_multi_fixed_b(self):
'''
Multi-segment, fixed layout, selected duration, samples read
from one segment.
Target file created with:
rdsamp -r sample-data/multi-segment/fixed1/v102s -t s75000 -P | cut -f 2- > record-multi-fixed-b
'''
pass
def test_multi_fixed_c(self):
'''
Multi-segment, fixed layout, selected duration and channels,
samples read from multiple segments
Target file created with:
rdsamp -r sample-data/multi-segment/fixed1/v102s -f s70000 -t s80000 -s 1 0 3 -P | cut -f 2- > record-multi-fixed-c
'''
pass
def test_multi_fixed_d(self):
'''
Multi-segment, fixed layout, multi-frequency, selected channels
Target file created with:
rdsamp -r sample-data/multi-segment/041s/ -s 3 2 1 -H |
cut -f 2- | sed s/-32768/-2048/ |
gzip -9 -n > tests/target-output/record-multi-fixed-d.gz
'''
pass
def test_multi_variable_a(self):
'''
Multi-segment, variable layout, selected duration, samples read
from one segment only.
Target file created with:
rdsamp -r sample-data/multi-segment/s00001/s00001-2896-10-10-00-31 -f s14428365 -t s14428375 -P | cut -f 2- > record-multi-variable-a
'''
pass
def test_multi_variable_b(self):
'''
Multi-segment, variable layout, selected duration, samples read
from several segments.
Target file created with:
rdsamp -r sample-data/multi-segment/s00001/s00001-2896-10-10-00-31 -f s14428364 -t s14428375 -P | cut -f 2- > record-multi-variable-b
'''
pass
def test_multi_variable_c(self):
'''
Multi-segment, variable layout, entire signal, physical, expanded
The reference signal creation cannot be made with rdsamp
directly because the WFDB c package (10.5.24) applies the single
adcgain and baseline values from the layout specification
header, which is undesired in multi-segment signals with
different adcgain/baseline values across segments.
Target file created with:
```
for i in {01..18}
do
rdsamp -r sample-data/multi-segment/s25047/3234460_00$i -P | cut -f 2- >> record-multi-variable-c
done
```
Entire signal has 543240 samples.
- 25740 length empty segment.
- First 16 segments have same 2 channels, length 420000
- Last 2 segments have same 3 channels, length 97500
'''
pass
def test_multi_variable_d(self):
'''
Multi-segment, variable layout, selected duration, selected
channels, digital. There are two channels: PLETH, and II. Their
fmt, adc_gain, and baseline do not change between the segments.
Target file created with:
rdsamp -r sample-data/multi-segment/p000878/p000878-2137-10-26-16-57 -f s3550 -t s7500 -s 0 1 | cut -f 2- | perl -p -e 's/-32768/ -128/g;' > record-multi-variable-d
'''
pass
| 9 | 9 | 28 | 4 | 16 | 9 | 1 | 0.59 | 1 | 3 | 0 | 0 | 8 | 0 | 8 | 80 | 240 | 38 | 127 | 42 | 118 | 75 | 56 | 42 | 47 | 2 | 2 | 1 | 10 |
147,682 |
MIT-LCP/wfdb-python
|
tests/test_record.py
|
tests.test_record.TestDownload
|
class TestDownload(unittest.TestCase):
# Test that we can download records with no "dat" file
# Regression test for https://github.com/MIT-LCP/wfdb-python/issues/118
def test_dl_database_no_dat_file(self):
wfdb.dl_database("afdb", self.temp_path, ["00735"])
# Test that we can download records that *do* have a "dat" file.
def test_dl_database_with_dat_file(self):
wfdb.dl_database("afdb", self.temp_path, ["04015"])
@classmethod
def setUpClass(cls):
cls.temp_directory = tempfile.TemporaryDirectory()
cls.temp_path = cls.temp_directory.name
@classmethod
def tearDownClass(cls):
cls.temp_directory.cleanup()
|
class TestDownload(unittest.TestCase):
def test_dl_database_no_dat_file(self):
pass
def test_dl_database_with_dat_file(self):
pass
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
| 7 | 0 | 2 | 0 | 2 | 0 | 1 | 0.25 | 1 | 1 | 0 | 0 | 2 | 0 | 4 | 76 | 18 | 3 | 12 | 7 | 5 | 3 | 10 | 5 | 5 | 1 | 2 | 0 | 4 |
147,683 |
MIT-LCP/wfdb-python
|
tests/test_processing.py
|
tests.test_processing.test_qrs
|
class test_qrs:
"""
Testing QRS detectors
"""
def test_xqrs(self):
"""
Run XQRS detector on record 100 and compare to reference annotations
"""
sig, fields = wfdb.rdsamp("sample-data/100", channels=[0])
ann_ref = wfdb.rdann("sample-data/100", "atr")
xqrs = processing.XQRS(sig=sig[:, 0], fs=fields["fs"])
xqrs.detect()
comparitor = processing.compare_annotations(
ann_ref.sample[1:], xqrs.qrs_inds, int(0.1 * fields["fs"])
)
assert comparitor.sensitivity > 0.99
assert comparitor.positive_predictivity > 0.99
|
class test_qrs:
'''
Testing QRS detectors
'''
def test_xqrs(self):
'''
Run XQRS detector on record 100 and compare to reference annotations
'''
pass
| 2 | 2 | 16 | 3 | 10 | 3 | 1 | 0.55 | 0 | 2 | 1 | 0 | 1 | 0 | 1 | 1 | 21 | 4 | 11 | 6 | 9 | 6 | 9 | 6 | 7 | 1 | 0 | 0 | 1 |
147,684 |
MIT-LCP/wfdb-python
|
tests/test_processing.py
|
tests.test_processing.test_processing
|
class test_processing:
"""
Test processing functions
"""
def test_resample_single(self):
sig, fields = wfdb.rdsamp("sample-data/100")
ann = wfdb.rdann("sample-data/100", "atr")
fs = fields["fs"]
fs_target = 50
new_sig, new_ann = processing.resample_singlechan(
sig[:, 0], ann, fs, fs_target
)
expected_length = int(sig.shape[0] * fs_target / fs)
assert new_sig.shape[0] == expected_length
def test_resample_multi(self):
sig, fields = wfdb.rdsamp("sample-data/100")
ann = wfdb.rdann("sample-data/100", "atr")
fs = fields["fs"]
fs_target = 50
new_sig, new_ann = processing.resample_multichan(
sig, ann, fs, fs_target
)
expected_length = int(sig.shape[0] * fs_target / fs)
assert new_sig.shape[0] == expected_length
assert new_sig.shape[1] == sig.shape[1]
def test_normalize_bound(self):
sig, _ = wfdb.rdsamp("sample-data/100")
lb = -5
ub = 15
x = processing.normalize_bound(sig[:, 0], lb, ub)
assert x.shape[0] == sig.shape[0]
assert np.min(x) >= lb
assert np.max(x) <= ub
def test_find_peaks(self):
x = [0, 2, 1, 0, -10, -15, -15, -15, 9, 8, 0, 0, 1, 2, 10]
hp, sp = processing.find_peaks(x)
assert np.array_equal(hp, [1, 8])
assert np.array_equal(sp, [6, 10])
def test_find_peaks_empty(self):
x = []
hp, sp = processing.find_peaks(x)
assert hp.shape == (0,)
assert sp.shape == (0,)
def test_gqrs(self):
record = wfdb.rdrecord(
"sample-data/100",
channels=[0],
sampfrom=9998,
sampto=19998,
physical=False,
)
expected_peaks = [
271,
580,
884,
1181,
1469,
1770,
2055,
2339,
2634,
2939,
3255,
3551,
3831,
4120,
4412,
4700,
5000,
5299,
5596,
5889,
6172,
6454,
6744,
7047,
7347,
7646,
7936,
8216,
8503,
8785,
9070,
9377,
9682,
]
peaks = processing.gqrs_detect(
d_sig=record.d_signal[:, 0],
fs=record.fs,
adc_gain=record.adc_gain[0],
adc_zero=record.adc_zero[0],
threshold=1.0,
)
assert np.array_equal(peaks, expected_peaks)
def test_correct_peaks(self):
sig, fields = wfdb.rdsamp("sample-data/100")
ann = wfdb.rdann("sample-data/100", "atr")
fs = fields["fs"]
min_bpm = 10
max_bpm = 350
min_gap = fs * 60 / min_bpm
max_gap = fs * 60 / max_bpm
y_idxs = processing.correct_peaks(
sig=sig[:, 0],
peak_inds=ann.sample,
search_radius=int(max_gap),
smooth_window_size=150,
)
yz = np.zeros(sig.shape[0])
yz[y_idxs] = 1
yz = np.where(yz[:10000] == 1)[0]
expected_peaks = [
77,
370,
663,
947,
1231,
1515,
1809,
2045,
2403,
2706,
2998,
3283,
3560,
3863,
4171,
4466,
4765,
5061,
5347,
5634,
5919,
6215,
6527,
6824,
7106,
7393,
7670,
7953,
8246,
8539,
8837,
9142,
9432,
9710,
9998,
]
assert np.array_equal(yz, expected_peaks)
|
class test_processing:
'''
Test processing functions
'''
def test_resample_single(self):
pass
def test_resample_multi(self):
pass
def test_normalize_bound(self):
pass
def test_find_peaks(self):
pass
def test_find_peaks_empty(self):
pass
def test_gqrs(self):
pass
def test_correct_peaks(self):
pass
| 8 | 1 | 23 | 2 | 21 | 0 | 1 | 0.02 | 0 | 1 | 0 | 0 | 7 | 0 | 7 | 7 | 172 | 23 | 146 | 41 | 138 | 3 | 55 | 41 | 47 | 1 | 0 | 0 | 7 |
147,685 |
MIT-LCP/wfdb-python
|
tests/test_plot.py
|
tests.test_plot.TestPlotWfdb
|
class TestPlotWfdb(unittest.TestCase):
"""
Tests for the wfdb.plot_wfdb function
"""
def assertAxesMatchSignal(self, axes, signal, t_divisor=1):
"""
Check that axis limits are reasonable for plotting a signal array.
Parameters
----------
axes : matplotlib.axes.Axes
An Axes object.
signal : numpy.ndarray
A one-dimensional array of sample values.
t_divisor : float, optional
The intended plotting resolution (number of samples of `signal`
per unit of the X axis.)
"""
xmin, xmax = axes.get_xlim()
tmin = 0
tmax = (len(signal) - 1) / t_divisor
# The range from tmin to tmax should fit within the plot.
self.assertLessEqual(
xmin,
tmin,
msg=f"X range is [{xmin}, {xmax}]; expected [{tmin}, {tmax}]",
)
self.assertGreaterEqual(
xmax,
tmax,
msg=f"X range is [{xmin}, {xmax}]; expected [{tmin}, {tmax}]",
)
# The padding on left and right sides should be approximately equal.
self.assertAlmostEqual(
xmin - tmin,
tmax - xmax,
delta=(tmax - tmin) / 10 + 1 / t_divisor,
msg=f"X range is [{xmin}, {xmax}]; expected [{tmin}, {tmax}]",
)
ymin, ymax = axes.get_ylim()
vmin = np.nanmin(signal)
vmax = np.nanmax(signal)
# The range from vmin to vmax should fit within the plot.
self.assertLessEqual(
ymin,
vmin,
msg=f"Y range is [{ymin}, {ymax}]; expected [{vmin}, {vmax}]",
)
self.assertGreaterEqual(
ymax,
vmax,
msg=f"Y range is [{ymin}, {ymax}]; expected [{vmin}, {vmax}]",
)
# The padding on top and bottom should be approximately equal.
self.assertAlmostEqual(
ymin - vmin,
vmax - ymax,
delta=(vmax - vmin) / 10,
msg=f"Y range is [{ymin}, {ymax}]; expected [{vmin}, {vmax}]",
)
def test_physical_smooth(self):
"""
Plot a record with physical, single-frequency data
"""
record = wfdb.rdrecord(
"sample-data/100",
sampto=1000,
physical=True,
smooth_frames=True,
)
self.assertIsNotNone(record.p_signal)
annotation = wfdb.rdann("sample-data/100", "atr", sampto=1000)
fig = wfdb.plot_wfdb(
record,
annotation,
time_units="samples",
ecg_grids="all",
return_fig=True,
)
plt.close(fig)
self.assertEqual(len(fig.axes), record.n_sig)
for ch in range(record.n_sig):
self.assertAxesMatchSignal(fig.axes[ch], record.p_signal[:, ch])
def test_digital_smooth(self):
"""
Plot a record with digital, single-frequency data
"""
record = wfdb.rdrecord(
"sample-data/drive02",
sampto=1000,
physical=False,
smooth_frames=True,
)
self.assertIsNotNone(record.d_signal)
fig = wfdb.plot_wfdb(record, time_units="seconds", return_fig=True)
plt.close(fig)
self.assertEqual(len(fig.axes), record.n_sig)
for ch in range(record.n_sig):
self.assertAxesMatchSignal(
fig.axes[ch], record.d_signal[:, ch], record.fs
)
def test_physical_multifrequency(self):
"""
Plot a record with physical, multi-frequency data
"""
record = wfdb.rdrecord(
"sample-data/wave_4",
sampto=10,
physical=True,
smooth_frames=False,
)
self.assertIsNotNone(record.e_p_signal)
fig = wfdb.plot_wfdb(record, time_units="seconds", return_fig=True)
plt.close(fig)
self.assertEqual(len(fig.axes), record.n_sig)
for ch in range(record.n_sig):
self.assertAxesMatchSignal(
fig.axes[ch],
record.e_p_signal[ch],
record.fs * record.samps_per_frame[ch],
)
def test_digital_multifrequency(self):
"""
Plot a record with digital, multi-frequency data
"""
record = wfdb.rdrecord(
"sample-data/multi-segment/041s/041s",
sampto=1000,
physical=False,
smooth_frames=False,
)
self.assertIsNotNone(record.e_d_signal)
fig = wfdb.plot_wfdb(record, time_units="seconds", return_fig=True)
plt.close(fig)
self.assertEqual(len(fig.axes), record.n_sig)
for ch in range(record.n_sig):
self.assertAxesMatchSignal(
fig.axes[ch],
record.e_d_signal[ch],
record.fs * record.samps_per_frame[ch],
)
|
class TestPlotWfdb(unittest.TestCase):
'''
Tests for the wfdb.plot_wfdb function
'''
def assertAxesMatchSignal(self, axes, signal, t_divisor=1):
'''
Check that axis limits are reasonable for plotting a signal array.
Parameters
----------
axes : matplotlib.axes.Axes
An Axes object.
signal : numpy.ndarray
A one-dimensional array of sample values.
t_divisor : float, optional
The intended plotting resolution (number of samples of `signal`
per unit of the X axis.)
'''
pass
def test_physical_smooth(self):
'''
Plot a record with physical, single-frequency data
'''
pass
def test_digital_smooth(self):
'''
Plot a record with digital, single-frequency data
'''
pass
def test_physical_multifrequency(self):
'''
Plot a record with physical, multi-frequency data
'''
pass
def test_digital_multifrequency(self):
'''
Plot a record with digital, multi-frequency data
'''
pass
| 6 | 6 | 30 | 2 | 22 | 6 | 2 | 0.28 | 1 | 1 | 0 | 0 | 5 | 0 | 5 | 77 | 157 | 17 | 109 | 25 | 103 | 31 | 47 | 25 | 41 | 2 | 2 | 1 | 9 |
147,686 |
MIT-LCP/wfdb-python
|
wfdb/io/datasource.py
|
wfdb.io.datasource.DataSource
|
class DataSource:
def __init__(self, name: str, ds_type: DataSourceType, uri: str):
self.name = name
self.ds_type = ds_type
self.uri = uri
def __str__(self):
return f"{self.name} : {self.ds_type} : {self.uri}"
@property
def uri(self):
return self._uri
@uri.setter
def uri(self, value: str):
if self.ds_type == DataSourceType.LOCAL:
path = Path(value)
if not path.is_absolute():
raise ValueError(
"uri field for a LOCAL DataSource must be a valid absolute path"
)
elif self.ds_type is DataSourceType.HTTP:
url = urlparse(value)
if not url.netloc:
raise ValueError(
"uri field for an HTTP DataSource must be a valid URL"
)
self._uri = value
|
class DataSource:
def __init__(self, name: str, ds_type: DataSourceType, uri: str):
pass
def __str__(self):
pass
@property
def uri(self):
pass
@uri.setter
def uri(self):
pass
| 7 | 0 | 6 | 0 | 6 | 0 | 2 | 0 | 0 | 4 | 1 | 0 | 4 | 3 | 4 | 4 | 28 | 3 | 25 | 12 | 18 | 0 | 18 | 10 | 13 | 5 | 0 | 2 | 8 |
147,687 |
MIT-LCP/wfdb-python
|
tests/io/test_convert.py
|
test_convert.TestCsvToWfdb
|
class TestCsvToWfdb(unittest.TestCase):
"""
Tests for the io.convert.csv module.
"""
def setUp(self):
"""
Create a temporary directory containing data for testing.
Load 100.dat file for comparison to 100.csv file.
"""
self.test_dir = "test_output"
os.makedirs(self.test_dir, exist_ok=True)
self.record_100_csv = "sample-data/100.csv"
self.record_100_dat = rdrecord("sample-data/100", physical=True)
def tearDown(self):
"""
Remove the temporary directory after the test.
"""
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def test_write_dir(self):
"""
Call the function with the write_dir argument.
"""
csv_to_wfdb(
file_name=self.record_100_csv,
fs=360,
units="mV",
write_dir=self.test_dir,
)
# Check if the output files are created in the specified directory
base_name = os.path.splitext(os.path.basename(self.record_100_csv))[0]
expected_dat_file = os.path.join(self.test_dir, f"{base_name}.dat")
expected_hea_file = os.path.join(self.test_dir, f"{base_name}.hea")
self.assertTrue(os.path.exists(expected_dat_file))
self.assertTrue(os.path.exists(expected_hea_file))
# Check that newly written file matches the 100.dat file
record_write = rdrecord(os.path.join(self.test_dir, base_name))
self.assertEqual(record_write.fs, 360)
self.assertEqual(record_write.fs, self.record_100_dat.fs)
self.assertEqual(record_write.units, ["mV", "mV"])
self.assertEqual(record_write.units, self.record_100_dat.units)
self.assertEqual(record_write.sig_name, ["MLII", "V5"])
self.assertEqual(record_write.sig_name, self.record_100_dat.sig_name)
self.assertEqual(record_write.p_signal.size, 1300000)
self.assertEqual(
record_write.p_signal.size, self.record_100_dat.p_signal.size
)
|
class TestCsvToWfdb(unittest.TestCase):
'''
Tests for the io.convert.csv module.
'''
def setUp(self):
'''
Create a temporary directory containing data for testing.
Load 100.dat file for comparison to 100.csv file.
'''
pass
def tearDown(self):
'''
Remove the temporary directory after the test.
'''
pass
def test_write_dir(self):
'''
Call the function with the write_dir argument.
'''
pass
| 4 | 4 | 16 | 2 | 10 | 4 | 1 | 0.47 | 1 | 0 | 0 | 0 | 3 | 3 | 3 | 75 | 56 | 9 | 32 | 11 | 28 | 15 | 25 | 11 | 21 | 2 | 2 | 1 | 4 |
147,688 |
MIT-LCP/wfdb-python
|
tests/test_multi_record.py
|
tests.test_multi_record.TestMultiRecordRanges
|
class TestMultiRecordRanges:
"""
Test logic that deduces relevant segments/ranges for given signals.
"""
def test_contained_ranges_simple_cases(self):
record = wfdb.MultiRecord(
segments=[
wfdb.Record(sig_name=["I", "II"], sig_len=5),
wfdb.Record(sig_name=["I", "III"], sig_len=10),
],
)
assert record.contained_ranges("I") == [(0, 15)]
assert record.contained_ranges("II") == [(0, 5)]
assert record.contained_ranges("III") == [(5, 15)]
def test_contained_ranges_variable_layout(self):
record = wfdb.rdheader(
"sample-data/multi-segment/s00001/s00001-2896-10-10-00-31",
rd_segments=True,
)
assert record.contained_ranges("II") == [
(3261, 10136),
(4610865, 10370865),
(10528365, 14518365),
]
assert record.contained_ranges("V") == [
(3261, 918261),
(920865, 4438365),
(4610865, 10370865),
(10528365, 14518365),
]
assert record.contained_ranges("MCL1") == [
(10136, 918261),
(920865, 4438365),
]
assert record.contained_ranges("ABP") == [
(14428365, 14450865),
(14458365, 14495865),
]
def test_contained_ranges_fixed_layout(self):
record = wfdb.rdheader(
"sample-data/multi-segment/041s/041s",
rd_segments=True,
)
for sig_name in record.sig_name:
assert record.contained_ranges(sig_name) == [(0, 2000)]
def test_contained_combined_ranges_simple_cases(self):
record = wfdb.MultiRecord(
segments=[
wfdb.Record(sig_name=["I", "II", "V"], sig_len=5),
wfdb.Record(sig_name=["I", "III", "V"], sig_len=10),
wfdb.Record(sig_name=["I", "II", "V"], sig_len=20),
],
)
assert record.contained_combined_ranges(["I", "II"]) == [
(0, 5),
(15, 35),
]
assert record.contained_combined_ranges(["II", "III"]) == []
assert record.contained_combined_ranges(["I", "III"]) == [(5, 15)]
assert record.contained_combined_ranges(["I", "II", "V"]) == [
(0, 5),
(15, 35),
]
def test_contained_combined_ranges_variable_layout(self):
record = wfdb.rdheader(
"sample-data/multi-segment/s00001/s00001-2896-10-10-00-31",
rd_segments=True,
)
assert record.contained_combined_ranges(["II", "V"]) == [
(3261, 10136),
(4610865, 10370865),
(10528365, 14518365),
]
assert record.contained_combined_ranges(["II", "MCL1"]) == []
assert record.contained_combined_ranges(["II", "ABP"]) == [
(14428365, 14450865),
(14458365, 14495865),
]
assert record.contained_combined_ranges(["II", "V", "ABP"]) == [
(14428365, 14450865),
(14458365, 14495865),
]
assert (
record.contained_combined_ranges(["II", "V", "MCL1", "ABP"]) == []
)
def test_contained_combined_ranges_variable_layout(self):
record = wfdb.rdheader(
"sample-data/multi-segment/041s/041s",
rd_segments=True,
)
for sig_1 in record.sig_name:
for sig_2 in record.sig_name:
if sig_1 == sig_2:
continue
assert record.contained_combined_ranges([sig_1, sig_2]) == [
(0, 2000)
]
|
class TestMultiRecordRanges:
'''
Test logic that deduces relevant segments/ranges for given signals.
'''
def test_contained_ranges_simple_cases(self):
pass
def test_contained_ranges_variable_layout(self):
pass
def test_contained_ranges_fixed_layout(self):
pass
def test_contained_combined_ranges_simple_cases(self):
pass
def test_contained_combined_ranges_variable_layout(self):
pass
def test_contained_combined_ranges_variable_layout(self):
pass
| 7 | 1 | 17 | 1 | 16 | 0 | 2 | 0.03 | 0 | 2 | 2 | 0 | 6 | 0 | 6 | 6 | 110 | 13 | 94 | 16 | 87 | 3 | 36 | 16 | 29 | 4 | 0 | 3 | 10 |
147,689 |
MIT-LCP/wfdb-python
|
tests/test_datasource.py
|
tests.test_datasource.TestDataSource
|
class TestDataSource:
def test_create_valid_local_ds(self):
ds = DataSource(
name="localds",
ds_type=DataSourceType.LOCAL,
uri=LOCAL_PATH,
)
assert ds
def test_create_invalid_local_ds(self):
with pytest.raises(ValueError):
DataSource(
name="localds",
ds_type=DataSourceType.LOCAL,
uri="notabsolute",
)
def test_create_valid_http_ds(self):
ds = DataSource(
name="httpds",
ds_type=DataSourceType.HTTP,
uri="http://bigdata.com",
)
assert ds.uri == "http://bigdata.com"
def test_create_invalid_http_ds(self):
with pytest.raises(ValueError):
DataSource(
name="httpds",
ds_type=DataSourceType.HTTP,
uri="www.bigdata.com",
)
def test_add_reset_ds(self):
ds = DataSource(
name="localds",
ds_type=DataSourceType.LOCAL,
uri=LOCAL_PATH,
)
add_data_source(ds)
assert len(_data_sources) == 2
assert _data_sources[ds.name] == ds
# We rely on reset_data_sources for test cleanup.
reset_data_sources(keep_pn=True)
assert len(_data_sources) == 1
def test_add_multiple_ds(self):
ds1 = DataSource(
name="localds",
ds_type=DataSourceType.LOCAL,
uri=LOCAL_PATH,
)
add_data_source(ds1)
ds2 = DataSource(
name="anotherlocalds",
ds_type=DataSourceType.LOCAL,
uri=LOCAL_PATH,
)
add_data_source(ds2)
assert len(_data_sources) == 3
assert _data_sources[ds1.name] == ds1
assert _data_sources[ds2.name] == ds2
reset_data_sources(keep_pn=True)
def test_remove_ds(self):
ds = DataSource(
name="localds",
ds_type=DataSourceType.LOCAL,
uri=LOCAL_PATH,
)
add_data_source(ds)
remove_data_source("localds")
assert len(_data_sources) == 1
def test_unique_ds_names(self):
ds = DataSource(
name="localds",
ds_type=DataSourceType.LOCAL,
uri=LOCAL_PATH,
)
add_data_source(ds)
# Cannot set multiple data sources with the same name
with pytest.raises(ValueError):
add_data_source(ds)
reset_data_sources(keep_pn=True)
|
class TestDataSource:
def test_create_valid_local_ds(self):
pass
def test_create_invalid_local_ds(self):
pass
def test_create_valid_http_ds(self):
pass
def test_create_invalid_http_ds(self):
pass
def test_add_reset_ds(self):
pass
def test_add_multiple_ds(self):
pass
def test_remove_ds(self):
pass
def test_unique_ds_names(self):
pass
| 9 | 0 | 10 | 0 | 9 | 0 | 1 | 0.03 | 0 | 2 | 1 | 0 | 8 | 0 | 8 | 8 | 86 | 8 | 76 | 16 | 67 | 2 | 40 | 16 | 31 | 1 | 0 | 1 | 8 |
147,690 |
MIT-LCP/wfdb-python
|
tests/test_annotation.py
|
tests.test_annotation.TestAnnotation
|
class TestAnnotation(unittest.TestCase):
"""
Testing read and write of WFDB annotations, including Physionet
streaming.
Target files created using the original WFDB Software Package
version 10.5.24
"""
def test_1(self):
"""
Target file created with:
rdann -r sample-data/100 -a atr > ann-1
"""
annotation = wfdb.rdann("sample-data/100", "atr")
# This is not the fault of the script. The annotation file specifies a
# length 3
annotation.aux_note[0] = "(N"
# aux_note field with a null written after '(N' which the script correctly picks up. I am just
# getting rid of the null in this unit test to compare with the regexp output below which has
# no null to detect in the output text file of rdann.
# Target data from WFDB software package
with open("tests/target-output/ann-1", "r") as f:
lines = tuple(f)
nannot = len(lines)
target_time = [None] * nannot
target_sample = np.empty(nannot, dtype="object")
target_symbol = [None] * nannot
target_subtype = np.empty(nannot, dtype="object")
target_chan = np.empty(nannot, dtype="object")
target_num = np.empty(nannot, dtype="object")
target_aux_note = [None] * nannot
RXannot = re.compile(
r"[ \t]*(?P<time>[\[\]\w\.:]+) +(?P<sample>\d+) +(?P<symbol>.) +(?P<subtype>\d+) +(?P<chan>\d+) +(?P<num>\d+)\t?(?P<aux_note>.*)"
)
for i in range(0, nannot):
(
target_time[i],
target_sample[i],
target_symbol[i],
target_subtype[i],
target_chan[i],
target_num[i],
target_aux_note[i],
) = RXannot.findall(lines[i])[0]
# Convert objects into integers
target_sample = target_sample.astype("int")
target_num = target_num.astype("int")
target_subtype = target_subtype.astype("int")
target_chan = target_chan.astype("int")
# Compare
comp = [
np.array_equal(annotation.sample, target_sample),
np.array_equal(annotation.symbol, target_symbol),
np.array_equal(annotation.subtype, target_subtype),
np.array_equal(annotation.chan, target_chan),
np.array_equal(annotation.num, target_num),
annotation.aux_note == target_aux_note,
]
# Test file streaming
pn_annotation = wfdb.rdann(
"100",
"atr",
pn_dir="mitdb",
return_label_elements=["label_store", "symbol"],
)
pn_annotation.aux_note[0] = "(N"
pn_annotation.create_label_map()
# Test file writing
annotation.wrann(write_fs=True, write_dir=self.temp_path)
write_annotation = wfdb.rdann(
os.path.join(self.temp_path, "100"),
"atr",
return_label_elements=["label_store", "symbol"],
)
write_annotation.create_label_map()
assert comp == [True] * 6
assert annotation.__eq__(pn_annotation)
assert annotation.__eq__(write_annotation)
def test_2(self):
"""
Annotation file with many aux_note strings.
Target file created with:
rdann -r sample-data/100 -a atr > ann-2
"""
annotation = wfdb.rdann("sample-data/12726", "anI")
# Target data from WFDB software package
with open("tests/target-output/ann-2", "r") as f:
lines = tuple(f)
nannot = len(lines)
target_time = [None] * nannot
target_sample = np.empty(nannot, dtype="object")
target_symbol = [None] * nannot
target_subtype = np.empty(nannot, dtype="object")
target_chan = np.empty(nannot, dtype="object")
target_num = np.empty(nannot, dtype="object")
target_aux_note = [None] * nannot
RXannot = re.compile(
r"[ \t]*(?P<time>[\[\]\w\.:]+) +(?P<sample>\d+) +(?P<symbol>.) +(?P<subtype>\d+) +(?P<chan>\d+) +(?P<num>\d+)\t?(?P<aux_note>.*)"
)
for i in range(0, nannot):
(
target_time[i],
target_sample[i],
target_symbol[i],
target_subtype[i],
target_chan[i],
target_num[i],
target_aux_note[i],
) = RXannot.findall(lines[i])[0]
# Convert objects into integers
target_sample = target_sample.astype("int")
target_num = target_num.astype("int")
target_subtype = target_subtype.astype("int")
target_chan = target_chan.astype("int")
# Compare
comp = [
np.array_equal(annotation.sample, target_sample),
np.array_equal(annotation.symbol, target_symbol),
np.array_equal(annotation.subtype, target_subtype),
np.array_equal(annotation.chan, target_chan),
np.array_equal(annotation.num, target_num),
annotation.aux_note == target_aux_note,
]
# Test file streaming
pn_annotation = wfdb.rdann(
"12726",
"anI",
pn_dir="prcp",
return_label_elements=["label_store", "symbol"],
)
pn_annotation.create_label_map()
# Test file writing
annotation.wrann(write_fs=True, write_dir=self.temp_path)
write_annotation = wfdb.rdann(
os.path.join(self.temp_path, "12726"),
"anI",
return_label_elements=["label_store", "symbol"],
)
write_annotation.create_label_map()
assert comp == [True] * 6
assert annotation.__eq__(pn_annotation)
assert annotation.__eq__(write_annotation)
def test_3(self):
"""
Annotation file with custom annotation types
Target file created with:
rdann -r sample-data/1003 -a atr > ann-3
"""
annotation = wfdb.rdann("sample-data/1003", "atr")
# Target data from WFDB software package
with open("tests/target-output/ann-3", "r") as f:
lines = tuple(f)
nannot = len(lines)
target_time = [None] * nannot
target_sample = np.empty(nannot, dtype="object")
target_symbol = [None] * nannot
target_subtype = np.empty(nannot, dtype="object")
target_chan = np.empty(nannot, dtype="object")
target_num = np.empty(nannot, dtype="object")
target_aux_note = [None] * nannot
RXannot = re.compile(
r"[ \t]*(?P<time>[\[\]\w\.:]+) +(?P<sample>\d+) +(?P<symbol>.) +(?P<subtype>\d+) +(?P<chan>\d+) +(?P<num>\d+)\t?(?P<aux_note>.*)"
)
for i in range(0, nannot):
(
target_time[i],
target_sample[i],
target_symbol[i],
target_subtype[i],
target_chan[i],
target_num[i],
target_aux_note[i],
) = RXannot.findall(lines[i])[0]
# Convert objects into integers
target_sample = target_sample.astype("int")
target_num = target_num.astype("int")
target_subtype = target_subtype.astype("int")
target_chan = target_chan.astype("int")
# Compare
comp = [
np.array_equal(annotation.sample, target_sample),
np.array_equal(annotation.symbol, target_symbol),
np.array_equal(annotation.subtype, target_subtype),
np.array_equal(annotation.chan, target_chan),
np.array_equal(annotation.num, target_num),
annotation.aux_note == target_aux_note,
]
# Test file streaming
pn_annotation = wfdb.rdann(
"1003",
"atr",
pn_dir="challenge-2014/set-p2",
return_label_elements=["label_store", "symbol"],
)
pn_annotation.create_label_map()
# Test file writing
annotation.wrann(write_fs=True, write_dir=self.temp_path)
write_annotation = wfdb.rdann(
os.path.join(self.temp_path, "1003"),
"atr",
return_label_elements=["label_store", "symbol"],
)
write_annotation.create_label_map()
assert comp == [True] * 6
assert annotation.__eq__(pn_annotation)
assert annotation.__eq__(write_annotation)
def test_4(self):
"""
Read and write annotations with large time skips
Annotation file created by:
echo "xxxxxxxxx 10000000000 N 0 0 0" | wrann -r huge -a qrs
"""
annotation = wfdb.rdann("sample-data/huge", "qrs")
self.assertEqual(annotation.sample[0], 10000000000)
annotation.wrann(write_dir=self.temp_path)
annotation1 = wfdb.rdann("sample-data/huge", "qrs")
annotation2 = wfdb.rdann(os.path.join(self.temp_path, "huge"), "qrs")
self.assertEqual(annotation1, annotation2)
def test_5(self):
"""
Write and read annotations with custom labels.
"""
ann_idx = np.array([1, 1000, 2000, 3000])
ann_chan = np.array([3, 1, 2, 3])
# write custom labels
ann_label_store = np.array([4, 2, 1, 3])
ann_custom_labels = {
"label_store": [1, 2, 3, 4],
"symbol": ["v", "l", "r", "z"],
"description": ["pvc", "lbbb", "rbbb", "pac"],
}
ann_custom_labels = pd.DataFrame(data=ann_custom_labels)
wfdb.wrann(
"CustomLabel",
"atr",
ann_idx,
chan=ann_chan,
custom_labels=ann_custom_labels,
label_store=ann_label_store,
write_dir=self.temp_path,
)
ann = wfdb.rdann(os.path.join(self.temp_path, "CustomLabel"), "atr")
self.assertEqual(ann.symbol, ["z", "l", "v", "r"])
@classmethod
def setUpClass(cls):
cls.temp_directory = tempfile.TemporaryDirectory()
cls.temp_path = cls.temp_directory.name
@classmethod
def tearDownClass(cls):
cls.temp_directory.cleanup()
|
class TestAnnotation(unittest.TestCase):
'''
Testing read and write of WFDB annotations, including Physionet
streaming.
Target files created using the original WFDB Software Package
version 10.5.24
'''
def test_1(self):
'''
Target file created with:
rdann -r sample-data/100 -a atr > ann-1
'''
pass
def test_2(self):
'''
Annotation file with many aux_note strings.
Target file created with:
rdann -r sample-data/100 -a atr > ann-2
'''
pass
def test_3(self):
'''
Annotation file with custom annotation types
Target file created with:
rdann -r sample-data/1003 -a atr > ann-3
'''
pass
def test_4(self):
'''
Read and write annotations with large time skips
Annotation file created by:
echo "xxxxxxxxx 10000000000 N 0 0 0" | wrann -r huge -a qrs
'''
pass
def test_5(self):
'''
Write and read annotations with custom labels.
'''
pass
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
| 10 | 6 | 39 | 4 | 28 | 6 | 1 | 0.24 | 1 | 3 | 0 | 0 | 5 | 1 | 7 | 79 | 288 | 39 | 200 | 67 | 190 | 49 | 108 | 61 | 100 | 2 | 2 | 1 | 10 |
147,691 |
MIT-LCP/wfdb-python
|
wfdb/io/annotation.py
|
wfdb.io.annotation.AnnotationLabel
|
class AnnotationLabel(object):
"""
Describes the individual annotation labels.
Attributes
----------
label_store : int
The value used to store the labels.
symbol : str
The shortened version of the annotation label abbreviation.
short_description : str
The shortened version of the description provided with the annotation.
description : str
The description provided with the annotation.
"""
def __init__(self, label_store, symbol, short_description, description):
self.label_store = label_store
self.symbol = symbol
self.short_description = short_description
self.description = description
def __str__(self):
return (
str(self.label_store)
+ ", "
+ str(self.symbol)
+ ", "
+ str(self.short_description)
+ ", "
+ str(self.description)
)
|
class AnnotationLabel(object):
'''
Describes the individual annotation labels.
Attributes
----------
label_store : int
The value used to store the labels.
symbol : str
The shortened version of the annotation label abbreviation.
short_description : str
The shortened version of the description provided with the annotation.
description : str
The description provided with the annotation.
'''
def __init__(self, label_store, symbol, short_description, description):
pass
def __str__(self):
pass
| 3 | 1 | 8 | 0 | 8 | 0 | 1 | 0.81 | 1 | 1 | 0 | 0 | 2 | 4 | 2 | 2 | 33 | 4 | 16 | 7 | 13 | 13 | 8 | 7 | 5 | 1 | 1 | 0 | 2 |
147,692 |
MIT-LCP/wfdb-python
|
tests/test_record.py
|
tests.test_record.TestSignal
|
class TestSignal(unittest.TestCase):
"""
For lower level signal tests
"""
def test_infer_sig_len(self):
"""
Infer the signal length of a record without the sig_len header
Read two headers. The records should be the same.
"""
record = wfdb.rdrecord("sample-data/drive02")
record_2 = wfdb.rdrecord("sample-data/drive02-no-len")
record_2.record_name = record.record_name
assert record_2.__eq__(record)
record = wfdb.rdrecord("sample-data/a103l")
record_2 = wfdb.rdrecord("sample-data/a103l-no-len")
record_2.record_name = record.record_name
assert record_2.__eq__(record)
def test_physical_conversion(self):
n_sig = 3
adc_gain = [1.0, 1234.567, 765.4321]
baseline = [10, 20, -30]
d_signal = np.repeat(np.arange(-100, 100), 3).reshape(-1, 3)
d_signal[5:10, :] = [-32768, -2048, -128]
e_d_signal = list(d_signal.transpose())
fmt = ["16", "212", "80"]
# Test adding or subtracting a small offset (0.01 ADU) to check
# that we correctly round to the nearest integer
for offset in (0, -0.01, 0.01):
p_signal = (d_signal + offset - baseline) / adc_gain
p_signal[5:10, :] = np.nan
e_p_signal = list(p_signal.transpose())
# Test converting p_signal to d_signal
record = wfdb.Record(
p_signal=p_signal.copy(),
adc_gain=adc_gain,
baseline=baseline,
fmt=fmt,
)
d_signal_converted = record.adc(expanded=False, inplace=False)
np.testing.assert_array_equal(d_signal_converted, d_signal)
record.adc(expanded=False, inplace=True)
np.testing.assert_array_equal(record.d_signal, d_signal)
# Test converting e_p_signal to e_d_signal
record = wfdb.Record(
e_p_signal=[s.copy() for s in e_p_signal],
adc_gain=adc_gain,
baseline=baseline,
fmt=fmt,
)
e_d_signal_converted = record.adc(expanded=True, inplace=False)
self.assertEqual(len(e_d_signal_converted), n_sig)
for x, y in zip(e_d_signal_converted, e_d_signal):
np.testing.assert_array_equal(x, y)
record.adc(expanded=True, inplace=True)
self.assertEqual(len(record.e_d_signal), n_sig)
for x, y in zip(record.e_d_signal, e_d_signal):
np.testing.assert_array_equal(x, y)
# Test automatic conversion using wfdb.wrsamp()
wfdb.wrsamp(
"test_physical_conversion",
fs=1000,
sig_name=["X", "Y", "Z"],
units=["mV", "mV", "mV"],
p_signal=p_signal,
adc_gain=adc_gain,
baseline=baseline,
fmt=fmt,
write_dir=self.temp_path,
)
record = wfdb.rdrecord(
os.path.join(self.temp_path, "test_physical_conversion"),
physical=False,
)
np.testing.assert_array_equal(record.d_signal, d_signal)
record = wfdb.rdrecord(
os.path.join(self.temp_path, "test_physical_conversion"),
physical=True,
)
for ch, gain in enumerate(adc_gain):
np.testing.assert_allclose(
record.p_signal[:, ch],
p_signal[:, ch],
rtol=0.0000001,
atol=(0.05 / gain),
)
@classmethod
def setUpClass(cls):
cls.temp_directory = tempfile.TemporaryDirectory()
cls.temp_path = cls.temp_directory.name
@classmethod
def tearDownClass(cls):
cls.temp_directory.cleanup()
|
class TestSignal(unittest.TestCase):
'''
For lower level signal tests
'''
def test_infer_sig_len(self):
'''
Infer the signal length of a record without the sig_len header
Read two headers. The records should be the same.
'''
pass
def test_physical_conversion(self):
pass
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
| 7 | 2 | 26 | 4 | 19 | 2 | 2 | 0.15 | 1 | 5 | 1 | 0 | 2 | 1 | 4 | 76 | 113 | 21 | 80 | 24 | 73 | 12 | 47 | 21 | 42 | 5 | 2 | 2 | 8 |
147,693 |
MIT-LCP/wfdb-python
|
tests/test_plot.py
|
tests.test_plot.TestPlotInternal
|
class TestPlotInternal(unittest.TestCase):
"""
Unit tests for internal wfdb.plot.plot functions
"""
def test_get_plot_dims(self):
sampfrom = 0
sampto = 3000
record = wfdb.rdrecord(
"sample-data/100", physical=True, sampfrom=sampfrom, sampto=sampto
)
ann = wfdb.rdann(
"sample-data/100", "atr", sampfrom=sampfrom, sampto=sampto
)
sig_len, n_sig, n_annot, n_subplots = plot._get_plot_dims(
signal=record.p_signal, ann_samp=[ann.sample]
)
assert sig_len == sampto - sampfrom
assert n_sig == record.n_sig
assert n_annot == 1
assert n_subplots == record.n_sig
def test_create_figure_single_subplots(self):
n_subplots = 1
fig, axes = plot._create_figure(
n_subplots, sharex=True, sharey=True, figsize=None
)
assert fig is not None
assert axes is not None
assert len(axes) == n_subplots
def test_create_figure_multiple_subplots(self):
n_subplots = 5
fig, axes = plot._create_figure(
n_subplots, sharex=True, sharey=True, figsize=None
)
assert fig is not None
assert axes is not None
assert len(axes) == n_subplots
|
class TestPlotInternal(unittest.TestCase):
'''
Unit tests for internal wfdb.plot.plot functions
'''
def test_get_plot_dims(self):
pass
def test_create_figure_single_subplots(self):
pass
def test_create_figure_multiple_subplots(self):
pass
| 4 | 1 | 11 | 0 | 11 | 0 | 1 | 0.09 | 1 | 0 | 0 | 0 | 3 | 0 | 3 | 75 | 40 | 4 | 33 | 13 | 29 | 3 | 23 | 13 | 19 | 1 | 2 | 0 | 3 |
147,694 |
MIT-LCP/wfdb-python
|
tests/test_record.py
|
tests.test_record.TestTimeConversion
|
class TestTimeConversion(unittest.TestCase):
"""
Test cases for time conversion
"""
def test_single(self):
"""
Time conversion for a single-segment record
This checks the get_frame_number, get_elapsed_time, and
get_absolute_time methods for a Record object. The example record
has no base date defined, so attempting to convert to/from absolute
time should raise an exception.
"""
header = wfdb.rdheader("sample-data/test01_00s")
# these time values should be equivalent
n = 123 * header.fs
t = datetime.timedelta(seconds=123)
self.assertEqual(header.get_frame_number(n), n)
self.assertEqual(header.get_frame_number(t), n)
self.assertEqual(header.get_elapsed_time(n), t)
self.assertEqual(header.get_elapsed_time(t), t)
# record test01_00s has no base date, so absolute time conversions
# should fail
self.assertIsNone(header.base_date)
d = datetime.datetime(2001, 1, 1, 12, 0, 0)
self.assertRaises(ValueError, header.get_frame_number, d)
self.assertRaises(ValueError, header.get_absolute_time, n)
self.assertRaises(ValueError, header.get_absolute_time, t)
def test_multisegment_with_date(self):
"""
Time conversion for a multi-segment record with base date
This checks the get_frame_number, get_elapsed_time, and
get_absolute_time methods for a MultiRecord object. The example
record has a base date, so we can convert timestamps between all
three of the supported representations.
"""
header = wfdb.rdheader(
"sample-data/multi-segment/p000878/p000878-2137-10-26-16-57"
)
# these time values should be equivalent
n = 123 * header.fs
t = datetime.timedelta(seconds=123)
d = t + header.base_datetime
self.assertEqual(header.get_frame_number(n), n)
self.assertEqual(header.get_frame_number(t), n)
self.assertEqual(header.get_frame_number(d), n)
self.assertEqual(header.get_elapsed_time(n), t)
self.assertEqual(header.get_elapsed_time(t), t)
self.assertEqual(header.get_elapsed_time(d), t)
self.assertEqual(header.get_absolute_time(n), d)
self.assertEqual(header.get_absolute_time(t), d)
self.assertEqual(header.get_absolute_time(d), d)
|
class TestTimeConversion(unittest.TestCase):
'''
Test cases for time conversion
'''
def test_single(self):
'''
Time conversion for a single-segment record
This checks the get_frame_number, get_elapsed_time, and
get_absolute_time methods for a Record object. The example record
has no base date defined, so attempting to convert to/from absolute
time should raise an exception.
'''
pass
def test_multisegment_with_date(self):
'''
Time conversion for a multi-segment record with base date
This checks the get_frame_number, get_elapsed_time, and
get_absolute_time methods for a MultiRecord object. The example
record has a base date, so we can convert timestamps between all
three of the supported representations.
'''
pass
| 3 | 3 | 27 | 4 | 15 | 9 | 1 | 0.7 | 1 | 3 | 0 | 0 | 2 | 0 | 2 | 74 | 60 | 9 | 30 | 11 | 27 | 21 | 28 | 11 | 25 | 1 | 2 | 0 | 2 |
147,695 |
MIT-LCP/wfdb-python
|
tests/io/test_convert.py
|
test_convert.TestEdfToWfdb
|
class TestEdfToWfdb:
"""
Tests for the io.convert.edf module.
"""
def test_edf_uniform(self):
"""
EDF format conversion to MIT for uniform sample rates.
"""
# Uniform sample rates
record_MIT = rdrecord("sample-data/n16").__dict__
record_EDF = read_edf("sample-data/n16.edf").__dict__
fields = list(record_MIT.keys())
# Original MIT format method of checksum is outdated, sometimes
# the same value though
fields.remove("checksum")
# Original MIT format units are less comprehensive since they
# default to mV if unknown.. therefore added more default labels
fields.remove("units")
test_results = []
for field in fields:
# Signal value will be slightly off due to C to Python type conversion
if field == "p_signal":
true_array = np.array(record_MIT[field]).flatten()
pred_array = np.array(record_EDF[field]).flatten()
# Prevent divide by zero warning
for i, v in enumerate(true_array):
if v == 0:
true_array[i] = 1
pred_array[i] = 1
sig_diff = np.abs((pred_array - true_array) / true_array)
sig_diff[sig_diff == -np.inf] = 0
sig_diff[sig_diff == np.inf] = 0
sig_diff = np.nanmean(sig_diff, 0)
# 5% tolerance
if np.max(sig_diff) <= 5:
test_results.append(True)
else:
test_results.append(False)
elif field == "init_value":
signal_diff = [
abs(record_MIT[field][i] - record_EDF[field][i])
for i in range(len(record_MIT[field]))
]
if abs(max(min(signal_diff), max(signal_diff), key=abs)) <= 2:
test_results.append(True)
else:
test_results.append(False)
else:
test_results.append(record_MIT[field] == record_MIT[field])
target_results = len(fields) * [True]
assert np.array_equal(test_results, target_results)
def test_edf_non_uniform(self):
"""
EDF format conversion to MIT for non-uniform sample rates.
"""
# Non-uniform sample rates
record_MIT = rdrecord("sample-data/wave_4").__dict__
record_EDF = read_edf("sample-data/wave_4.edf").__dict__
fields = list(record_MIT.keys())
# Original MIT format method of checksum is outdated, sometimes
# the same value though
fields.remove("checksum")
# Original MIT format units are less comprehensive since they
# default to mV if unknown.. therefore added more default labels
fields.remove("units")
test_results = []
for field in fields:
# Signal value will be slightly off due to C to Python type conversion
if field == "p_signal":
true_array = np.array(record_MIT[field]).flatten()
pred_array = np.array(record_EDF[field]).flatten()
# Prevent divide by zero warning
for i, v in enumerate(true_array):
if v == 0:
true_array[i] = 1
pred_array[i] = 1
sig_diff = np.abs((pred_array - true_array) / true_array)
sig_diff[sig_diff == -np.inf] = 0
sig_diff[sig_diff == np.inf] = 0
sig_diff = np.nanmean(sig_diff, 0)
# 5% tolerance
if np.max(sig_diff) <= 5:
test_results.append(True)
else:
test_results.append(False)
elif field == "init_value":
signal_diff = [
abs(record_MIT[field][i] - record_EDF[field][i])
for i in range(len(record_MIT[field]))
]
if abs(max(min(signal_diff), max(signal_diff), key=abs)) <= 2:
test_results.append(True)
else:
test_results.append(False)
else:
test_results.append(record_MIT[field] == record_MIT[field])
target_results = len(fields) * [True]
assert np.array_equal(test_results, target_results)
|
class TestEdfToWfdb:
'''
Tests for the io.convert.edf module.
'''
def test_edf_uniform(self):
'''
EDF format conversion to MIT for uniform sample rates.
'''
pass
def test_edf_non_uniform(self):
'''
EDF format conversion to MIT for non-uniform sample rates.
'''
pass
| 3 | 3 | 50 | 3 | 36 | 11 | 8 | 0.34 | 0 | 3 | 0 | 0 | 2 | 0 | 2 | 2 | 106 | 8 | 73 | 25 | 70 | 25 | 59 | 25 | 56 | 8 | 0 | 4 | 16 |
147,696 |
MIT-LCP/wfdb-python
|
tests/test_url.py
|
tests.test_url.DummyHTTPServer
|
class DummyHTTPServer(http.server.HTTPServer):
"""
HTTPServer used to simulate a web server for testing.
The server may be used as a context manager (using "with"); during
execution of the "with" block, a background thread runs that
listens for and handles client requests.
Attributes
----------
file_content : dict
Dictionary containing the content of each file on the server.
The keys are absolute paths (such as "/foo.txt"); the values
are the corresponding content (bytes).
allow_gzip : bool, optional
True if the server should return compressed responses (using
"Content-Encoding: gzip") when the client requests them (using
"Accept-Encoding: gzip").
allow_range : bool, optional
True if the server should return partial responses (using 206
Partial Content and "Content-Range") when the client requests
them (using "Range").
server_address : tuple (str, int), optional
A tuple specifying the address and port number where the
server should listen for connections. If the port is 0, an
arbitrary unused port is selected. The default address is
"127.0.0.1" and the default port is 0.
"""
def __init__(
self,
file_content,
allow_gzip=True,
allow_range=True,
server_address=("127.0.0.1", 0),
):
super().__init__(server_address, DummyHTTPRequestHandler)
self.file_content = file_content
self.allow_gzip = allow_gzip
self.allow_range = allow_range
def url(self, path="/"):
"""
Generate a URL that points to a file on this server.
Parameters
----------
path : str, optional
Path of the file on the server.
Returns
-------
url : str
Absolute URL for the specified file.
"""
return "http://127.0.0.1:%d/%s" % (
self.server_address[1],
path.lstrip("/"),
)
def __enter__(self):
super().__enter__()
self.thread = threading.Thread(target=self.serve_forever)
self.thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
self.thread.join()
self.thread = None
return super().__exit__(exc_type, exc_val, exc_tb)
|
class DummyHTTPServer(http.server.HTTPServer):
'''
HTTPServer used to simulate a web server for testing.
The server may be used as a context manager (using "with"); during
execution of the "with" block, a background thread runs that
listens for and handles client requests.
Attributes
----------
file_content : dict
Dictionary containing the content of each file on the server.
The keys are absolute paths (such as "/foo.txt"); the values
are the corresponding content (bytes).
allow_gzip : bool, optional
True if the server should return compressed responses (using
"Content-Encoding: gzip") when the client requests them (using
"Accept-Encoding: gzip").
allow_range : bool, optional
True if the server should return partial responses (using 206
Partial Content and "Content-Range") when the client requests
them (using "Range").
server_address : tuple (str, int), optional
A tuple specifying the address and port number where the
server should listen for connections. If the port is 0, an
arbitrary unused port is selected. The default address is
"127.0.0.1" and the default port is 0.
'''
def __init__(
self,
file_content,
allow_gzip=True,
allow_range=True,
server_address=("127.0.0.1", 0),
):
pass
def url(self, path="/"):
'''
Generate a URL that points to a file on this server.
Parameters
----------
path : str, optional
Path of the file on the server.
Returns
-------
url : str
Absolute URL for the specified file.
'''
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
| 5 | 2 | 10 | 1 | 7 | 3 | 1 | 1.33 | 1 | 3 | 1 | 0 | 4 | 4 | 4 | 30 | 73 | 10 | 27 | 15 | 16 | 36 | 18 | 9 | 13 | 1 | 3 | 0 | 4 |
147,697 |
MIT-LCP/wfdb-python
|
wfdb/io/annotation.py
|
wfdb.io.annotation.Annotation
|
class Annotation(object):
"""
The class representing WFDB annotations.
Annotation objects can be created using the initializer, or by reading a
WFDB annotation file with `rdann`.
The attributes of the Annotation object give information about the
annotation as specified by:
https://www.physionet.org/physiotools/wag/annot-5.htm
Call `show_ann_labels()` to see the list of standard annotation codes. Any
text used to label annotations that are not one of these codes should go in
the 'aux_note' field rather than the 'sym' field.
The current annotation values organized as such:
AnnotationLabel(label_store (or subtype), symbol (or aux_note), short_description, description)
where the associated values are:
ann_labels = [
AnnotationLabel(0, " ", 'NOTANN', 'Not an actual annotation'),
AnnotationLabel(1, "N", 'NORMAL', 'Normal beat'),
AnnotationLabel(2, "L", 'LBBB', 'Left bundle branch block beat'),
AnnotationLabel(3, "R", 'RBBB', 'Right bundle branch block beat'),
AnnotationLabel(4, "a", 'ABERR', 'Aberrated atrial premature beat'),
AnnotationLabel(5, "V", 'PVC', 'Premature ventricular contraction'),
AnnotationLabel(6, "F", 'FUSION', 'Fusion of ventricular and normal beat'),
AnnotationLabel(7, "J", 'NPC', 'Nodal (junctional) premature beat'),
AnnotationLabel(8, "A", 'APC', 'Atrial premature contraction'),
AnnotationLabel(9, "S", 'SVPB', 'Premature or ectopic supraventricular beat'),
AnnotationLabel(10, "E", 'VESC', 'Ventricular escape beat'),
AnnotationLabel(11, "j", 'NESC', 'Nodal (junctional) escape beat'),
AnnotationLabel(12, "/", 'PACE', 'Paced beat'),
AnnotationLabel(13, "Q", 'UNKNOWN', 'Unclassifiable beat'),
AnnotationLabel(14, "~", 'NOISE', 'Signal quality change'),
AnnotationLabel(16, "|", 'ARFCT', 'Isolated QRS-like artifact'),
AnnotationLabel(18, "s", 'STCH', 'ST change'),
AnnotationLabel(19, "T", 'TCH', 'T-wave change'),
AnnotationLabel(20, "*", 'SYSTOLE', 'Systole'),
AnnotationLabel(21, "D", 'DIASTOLE', 'Diastole'),
AnnotationLabel(22, '"', 'NOTE', 'Comment annotation'),
AnnotationLabel(23, "=", 'MEASURE', 'Measurement annotation'),
AnnotationLabel(24, "p", 'PWAVE', 'P-wave peak'),
AnnotationLabel(25, "B", 'BBB', 'Left or right bundle branch block'),
AnnotationLabel(26, "^", 'PACESP', 'Non-conducted pacer spike'),
AnnotationLabel(27, "t", 'TWAVE', 'T-wave peak'),
AnnotationLabel(28, "+", 'RHYTHM', 'Rhythm change'),
AnnotationLabel(29, "u", 'UWAVE', 'U-wave peak'),
AnnotationLabel(30, "?", 'LEARN', 'Learning'),
AnnotationLabel(31, "!", 'FLWAV', 'Ventricular flutter wave'),
AnnotationLabel(32, "[", 'VFON', 'Start of ventricular flutter/fibrillation'),
AnnotationLabel(33, "]", 'VFOFF', 'End of ventricular flutter/fibrillation'),
AnnotationLabel(34, "e", 'AESC', 'Atrial escape beat'),
AnnotationLabel(35, "n", 'SVESC', 'Supraventricular escape beat'),
AnnotationLabel(36, "@", 'LINK', 'Link to external data (aux_note contains URL)'),
AnnotationLabel(37, "x", 'NAPC', 'Non-conducted P-wave (blocked APB)'),
AnnotationLabel(38, "f", 'PFUS', 'Fusion of paced and normal beat'),
AnnotationLabel(39, "(", 'WFON', 'Waveform onset'),
AnnotationLabel(40, ")", 'WFOFF', 'Waveform end'),
AnnotationLabel(41, "r", 'RONT', 'R-on-T premature ventricular contraction')
]
The current annotation classes are organized as such:
AnnotationClass(extension, description, human_reviewed)
where the associated values are:
ann_classes = [
AnnotationClass('atr', 'Reference ECG annotations', True),
AnnotationClass('blh', 'Human reviewed beat labels', True),
AnnotationClass('blm', 'Machine beat labels', False),
AnnotationClass('alh', 'Human reviewed alarms', True),
AnnotationClass('alm', 'Machine alarms', False),
AnnotationClass('qrsc', 'Human reviewed QRS detections', True),
AnnotationClass('qrs', 'Machine QRS detections', False),
AnnotationClass('bph', 'Human reviewed BP beat detections', True),
AnnotationClass('bpm', 'Machine BP beat detections', False)
]
Attributes
----------
record_name : str
The base file name (without extension) of the record that the
annotation is associated with.
extension : str
The file extension of the file the annotation is stored in.
sample : ndarray
A numpy array containing the annotation locations in samples relative to
the beginning of the record.
symbol : list, numpy array, optional
The symbols used to display the annotation labels. List or numpy array.
If this field is present, `label_store` must not be present.
subtype : ndarray, optional
A numpy array containing the marked class/category of each annotation.
chan : ndarray, optional
A numpy array containing the signal channel associated with each
annotation.
num : ndarray, optional
A numpy array containing the labelled annotation number for each
annotation.
aux_note : list, optional
A list containing the auxiliary information string (or None for
annotations without notes) for each annotation.
fs : int, float, optional
The sampling frequency of the record.
label_store : ndarray, optional
The integer value used to store/encode each annotation label
description : list, optional
A list containing the descriptive string of each annotation label.
custom_labels : pandas dataframe, optional
The custom annotation labels defined in the annotation file. Maps
the relationship between the three label fields. The data type is a
pandas DataFrame with three columns:
['label_store', 'symbol', 'description'].
contained_labels : pandas dataframe, optional
The unique labels contained in this annotation. Same structure as
`custom_labels`.
Examples
--------
>>> ann1 = wfdb.Annotation(record_name='rec1', extension='atr',
sample=[10,20,400], symbol=['N','N','['],
aux_note=[None, None, 'Serious Vfib'])
"""
def __init__(
self,
record_name,
extension,
sample,
symbol=None,
subtype=None,
chan=None,
num=None,
aux_note=None,
fs=None,
label_store=None,
description=None,
custom_labels=None,
contained_labels=None,
):
self.record_name = record_name
self.extension = extension
self.sample = sample
self.symbol = symbol
self.subtype = subtype
self.chan = chan
self.num = num
self.aux_note = aux_note
self.fs = fs
self.label_store = label_store
self.description = description
self.custom_labels = custom_labels
self.contained_labels = contained_labels
self.ann_len = len(self.sample)
# __label_map__: (storevalue, symbol, description) hidden attribute
def __eq__(self, other):
"""
Equal comparison operator for objects of this type.
Parameters
----------
other : object
The object that is being compared to self.
Returns
-------
bool
Determines if the objects are equal (True) or not equal (False).
"""
att1 = self.__dict__
att2 = other.__dict__
if set(att1.keys()) != set(att2.keys()):
print("keyset")
return False
for k in att1.keys():
v1 = att1[k]
v2 = att2[k]
if type(v1) != type(v2):
print(k)
return False
if isinstance(v1, np.ndarray):
if not np.array_equal(v1, v2):
print(k)
return False
elif isinstance(v1, pd.DataFrame):
if not v1.equals(v2):
print(k)
return False
else:
if v1 != v2:
print(k)
return False
return True
def apply_range(self, sampfrom=0, sampto=None):
"""
Filter the annotation attributes to keep only items between the
desired sample values.
Parameters
----------
sampfrom : int, optional
The minimum sample number for annotations to be returned.
sampto : int, optional
The maximum sample number for annotations to be returned.
"""
sampto = sampto or self.sample[-1]
kept_inds = np.intersect1d(
np.where(self.sample >= sampfrom), np.where(self.sample <= sampto)
)
for field in ["sample", "label_store", "subtype", "chan", "num"]:
setattr(self, field, getattr(self, field)[kept_inds])
self.aux_note = [self.aux_note[i] for i in kept_inds]
self.ann_len = len(self.sample)
def wrann(self, write_fs=False, write_dir=""):
"""
Write a WFDB annotation file from this object.
Parameters
----------
write_fs : bool, optional
Whether to write the `fs` attribute to the file.
write_dir : str, optional
The output directory in which the header is written.
Returns
-------
N/A
"""
for field in ["record_name", "extension"]:
if getattr(self, field) is None:
raise Exception(
"Missing required field for writing annotation file: ",
field,
)
present_label_fields = self.get_label_fields()
if not present_label_fields:
raise Exception(
"At least one annotation label field is required to write the annotation: ",
ann_label_fields,
)
# Check the validity of individual fields
self.check_fields()
# Standardize the format of the custom_labels field
self.standardize_custom_labels()
# Create the label map used in this annotaion
self.create_label_map()
# Check the cohesion of fields
self.check_field_cohesion(present_label_fields)
# Calculate the label_store field if necessary
if "label_store" not in present_label_fields:
self.convert_label_attribute(
source_field=present_label_fields[0], target_field="label_store"
)
# Calculate the symbol field if necessary
if "symbol" not in present_label_fields:
self.convert_label_attribute(
source_field=present_label_fields[0], target_field="symbol"
)
# Write the header file using the specified fields
self.wr_ann_file(write_fs=write_fs, write_dir=write_dir)
return
def get_label_fields(self):
"""
Get the present label fields in the object.
Parameters
----------
N/A
Returns
-------
present_label_fields : list
All of the present label fields in the object.
"""
present_label_fields = []
for field in ann_label_fields:
if getattr(self, field) is not None:
present_label_fields.append(field)
return present_label_fields
def check_fields(self):
"""
Check the set fields of the annotation object.
Parameters
----------
N/A
Returns
-------
N/A
"""
# Check all set fields
for field in ALLOWED_TYPES:
if getattr(self, field) is not None:
# Check the type of the field's elements
self.check_field(field)
return
def check_field(self, field):
"""
Check a particular annotation field.
Parameters
----------
field : str
The annotation field to be checked.
Returns
-------
N/A
"""
item = getattr(self, field)
if not isinstance(item, ALLOWED_TYPES[field]):
raise TypeError(
"The " + field + " field must be one of the following types:",
ALLOWED_TYPES[field],
)
# Numerical integer annotation fields: sample, label_store, sub,
# chan, num
if ALLOWED_TYPES[field] == (np.ndarray):
record.check_np_array(
item=item,
field_name=field,
ndim=1,
parent_class=np.integer,
channel_num=None,
)
# Field specific checks
if field == "record_name":
if bool(re.search(r"[^-\w]", self.record_name)):
raise ValueError(
"record_name must only comprise of letters, digits, hyphens, and underscores."
)
elif field == "extension":
if bool(re.search("[^a-zA-Z]", self.extension)):
raise ValueError("extension must only comprise of letters.")
elif field == "fs":
if self.fs <= 0:
raise ValueError("The fs field must be a non-negative number")
elif field == "custom_labels":
# The role of this section is just to check the
# elements of this item, without utilizing
# any other fields. No format conversion
# or free value looksups etc are done.
# Check the structure of the subelements
if isinstance(item, pd.DataFrame):
column_names = list(item)
if "symbol" in column_names and "description" in column_names:
if "label_store" in column_names:
label_store = list(item["label_store"].values)
else:
label_store = None
symbol = item["symbol"].values
description = item["description"].values
else:
raise ValueError(
"".join(
[
"If the "
+ field
+ " field is pandas dataframe, its columns",
" must be one of the following:\n-[label_store, symbol, description]",
"\n-[symbol, description]",
]
)
)
else:
if set([len(i) for i in item]) == {2}:
label_store = None
symbol = [i[0] for i in item]
description = [i[1] for i in item]
elif set([len(i) for i in item]) == {3}:
label_store = [i[0] for i in item]
symbol = [i[1] for i in item]
description = [i[2] for i in item]
else:
raise ValueError(
"".join(
[
"If the "
+ field
+ " field is an array-like object, its subelements",
" must be one of the following:\n- tuple triplets storing: ",
"(label_store, symbol, description)\n- tuple pairs storing: ",
"(symbol, description)",
]
)
)
# Check the values of the subelements
if label_store:
if len(item) != len(set(label_store)):
raise ValueError(
"The label_store values of the "
+ field
+ " field must be unique"
)
if min(label_store) < 1 or max(label_store) > 49:
raise ValueError(
"The label_store values of the custom_labels field must be between 1 and 49"
)
if len(item) != len(set(symbol)):
raise ValueError(
"The symbol values of the "
+ field
+ " field must be unique"
)
for i in range(len(item)):
if label_store:
if not hasattr(label_store[i], "__index__"):
raise TypeError(
"The label_store values of the "
+ field
+ " field must be integer-like"
)
if not isinstance(symbol[i], str_types) or len(
symbol[i]
) not in [
1,
2,
3,
]:
raise ValueError(
"The symbol values of the "
+ field
+ " field must be strings of length 1 to 3"
)
if bool(re.search("[ \t\n\r\f\v]", symbol[i])):
raise ValueError(
"The symbol values of the "
+ field
+ " field must not contain whitespace characters"
)
if not isinstance(description[i], str_types):
raise TypeError(
"The description values of the "
+ field
+ " field must be strings"
)
# Would be good to enfore this but existing garbage annotations have tabs and newlines...
# if bool(re.search('[\t\n\r\f\v]', description[i])):
# raise ValueError('The description values of the '+field+' field must not contain tabs or newlines')
# The string fields
elif field in ["symbol", "description", "aux_note"]:
uniq_elements = set(item)
for e in uniq_elements:
if not isinstance(e, str_types):
raise TypeError(
"Subelements of the " + field + " field must be strings"
)
if field == "symbol":
for e in uniq_elements:
if len(e) not in [1, 2, 3]:
raise ValueError(
"Subelements of the "
+ field
+ " field must be strings of length 1 to 3"
)
if bool(re.search("[ \t\n\r\f\v]", e)):
raise ValueError(
"Subelements of the "
+ field
+ " field may not contain whitespace characters"
)
else:
for e in uniq_elements:
if bool(re.search("[\t\n\r\f\v]", e)):
raise ValueError(
"Subelements of the "
+ field
+ " field must not contain tabs or newlines"
)
elif field == "sample":
if len(self.sample) == 1:
sampdiffs = np.array([self.sample[0]])
elif len(self.sample) > 1:
sampdiffs = np.concatenate(
([self.sample[0]], np.diff(self.sample))
)
else:
raise ValueError(
"The 'sample' field must be a numpy array with length greater than 0"
)
if min(self.sample) < 0:
raise ValueError(
"The 'sample' field must only contain non-negative integers"
)
if min(sampdiffs) < 0:
raise ValueError(
"The 'sample' field must contain monotonically increasing sample numbers"
)
elif field == "label_store":
if min(item) < 1 or max(item) > 49:
raise ValueError(
"The label_store values must be between 1 and 49"
)
# The C WFDB library stores num/sub/chan as chars.
elif field == "subtype":
# signed character
if min(self.subtype) < -128 or max(self.subtype) > 127:
raise ValueError(
"The 'subtype' field must only contain integers from -128 to 127"
)
elif field == "chan":
# un_signed character
if min(self.chan) < 0 or max(self.chan) > 255:
raise ValueError(
"The 'chan' field must only contain non-negative integers up to 255"
)
elif field == "num":
# signed character
if min(self.num) < 0 or max(self.num) > 127:
raise ValueError(
"The 'num' field must only contain non-negative integers up to 127"
)
return
def check_field_cohesion(self, present_label_fields):
"""
Check that the content and structure of different fields are consistent
with one another.
Parameters
----------
present_label_fields : list
All of the present label fields in the object.
Returns
-------
N/A
"""
# Ensure all written annotation fields have the same length
nannots = len(self.sample)
for field in [
"sample",
"num",
"subtype",
"chan",
"aux_note",
] + present_label_fields:
if getattr(self, field) is not None:
if len(getattr(self, field)) != nannots:
raise ValueError(
"The lengths of the 'sample' and '"
+ field
+ "' fields do not match"
)
# Ensure all label fields are defined by the label map. This has to be checked because
# it is possible the user defined (or lack of) custom_labels does not capture all the
# labels present.
for field in present_label_fields:
defined_values = self.__label_map__[field].values
if set(getattr(self, field)) - set(defined_values) != set():
raise ValueError(
"\n".join(
[
"\nThe "
+ field
+ " field contains elements not encoded in the stardard WFDB annotation labels, or this object's custom_labels field",
"- To see the standard WFDB annotation labels, call: show_ann_labels()",
"- To transfer non-encoded symbol items into the aux_note field, call: self.sym_to_aux()",
"- To define custom labels, set the custom_labels field as a list of tuple triplets with format: (label_store, symbol, description)",
]
)
)
return
def standardize_custom_labels(self):
"""
Set the custom_labels field of the object to a standardized format:
3 column pandas df with ann_label_fields as columns.
Does nothing if there are no custom labels defined.
Does nothing if custom_labels is already a df with all 3 columns.
If custom_labels is an iterable of pairs/triplets, this
function will convert it into a df.
If the label_store attribute is not already defined, this
function will automatically choose values by trying to use:
1. The undefined store values from the standard WFDB annotation
label map.
2. The unused label store values. This is extracted by finding the
set of all labels contained in this annotation object and seeing
which symbols/descriptions are not used.
If there are more custom labels defined than there are enough spaces,
even in condition 2 from above, this function will raise an error.
This function must work when called as a standalone.
Parameters
----------
N/A
Returns
-------
N/A
"""
custom_labels = self.custom_labels
if custom_labels is None:
return
self.check_field("custom_labels")
# Convert to dataframe if not already
if not isinstance(custom_labels, pd.DataFrame):
if len(self.custom_labels[0]) == 2:
symbol = self.get_custom_label_attribute("symbol")
description = self.get_custom_label_attribute("description")
custom_labels = pd.DataFrame(
{"symbol": symbol, "description": description}
)
else:
label_store = self.get_custom_label_attribute("label_store")
symbol = self.get_custom_label_attribute("symbol")
description = self.get_custom_label_attribute("description")
custom_labels = pd.DataFrame(
{
"label_store": label_store,
"symbol": symbol,
"description": description,
}
)
# Assign label_store values to the custom labels if not defined
if "label_store" not in list(custom_labels):
undefined_label_stores = self.get_undefined_label_stores()
if len(custom_labels) > len(undefined_label_stores):
available_label_stores = self.get_available_label_stores()
else:
available_label_stores = undefined_label_stores
n_custom_labels = custom_labels.shape[0]
if n_custom_labels > len(available_label_stores):
raise ValueError(
"There are more custom_label definitions than storage values available for them."
)
custom_labels["label_store"] = available_label_stores[
:n_custom_labels
]
custom_labels.set_index(
custom_labels["label_store"].values, inplace=True
)
custom_labels = custom_labels[list(ann_label_fields)]
self.custom_labels = custom_labels
return
def get_undefined_label_stores(self):
"""
Get the label_store values not defined in the
standard WFDB annotation labels.
Parameters
----------
N/A
Returns
-------
list
The label_store values not found in WFDB annotation labels.
"""
return list(set(range(50)) - set(ann_label_table["label_store"]))
def get_available_label_stores(self, usefield="tryall"):
"""
Get the label store values that may be used
for writing this annotation.
Available store values include:
- the undefined values in the standard WFDB labels
- the store values not used in the current
annotation object.
- the store values whose standard WFDB symbols/descriptions
match those of the custom labels (if custom_labels exists)
Parameters
----------
usefield : str, optional
If 'usefield' is explicitly specified, the function will use that
field to figure out available label stores. If 'usefield'
is set to 'tryall', the function will choose one of the contained
attributes by checking availability in the order: label_store, symbol, description.
Returns
-------
available_label_stores : set
The available store values used for writing the annotation.
"""
# Figure out which field to use to get available labels stores.
if usefield == "tryall":
if self.label_store is not None:
usefield = "label_store"
elif self.symbol is not None:
usefield = "symbol"
elif self.description is not None:
usefield = "description"
else:
raise ValueError(
"No label fields are defined. At least one of the following is required: ",
ann_label_fields,
)
return self.get_available_label_stores(usefield=usefield)
# Use the explicitly stated field to get available stores.
else:
# If usefield == 'label_store', there are slightly fewer/different steps
# compared to if it were another option
contained_field = getattr(self, usefield)
# Get the unused label_store values
if usefield == "label_store":
unused_label_stores = (
set(ann_label_table["label_store"].values) - contained_field
)
else:
# the label_store values from the standard WFDB annotation labels
# whose symbols are not contained in this annotation
unused_field = (
set(ann_label_table[usefield].values) - contained_field
)
unused_label_stores = ann_label_table.loc[
ann_label_table[usefield] in unused_field, "label_store"
].values
# Get the standard WFDB label_store values overwritten by the
# custom_labels if any
if self.custom_symbols is not None:
custom_field = set(self.get_custom_label_attribute(usefield))
if usefield == "label_store":
overwritten_label_stores = set(custom_field).intersection(
set(ann_label_table["label_store"])
)
else:
overwritten_fields = set(custom_field).intersection(
set(ann_label_table[usefield])
)
overwritten_label_stores = ann_label_table.loc[
ann_label_table[usefield] in overwritten_fields,
"label_store",
].values
else:
overwritten_label_stores = set()
# The undefined values in the standard WFDB labels
undefined_label_stores = self.get_undefined_label_stores()
# Final available label stores = undefined + unused + overwritten
available_label_stores = (
set(undefined_label_stores)
.union(set(unused_label_stores))
.union(overwritten_label_stores)
)
return available_label_stores
def get_custom_label_attribute(self, attribute):
"""
Get a list of the custom_labels attribute i.e. label_store,
symbol, or description. The custom_labels variable could be in
a number of formats.
Parameters
----------
attribute : str
The selected attribute to generate the list.
Returns
-------
a : list
All of the custom_labels attributes.
"""
if attribute not in ann_label_fields:
raise ValueError("Invalid attribute specified")
if isinstance(self.custom_labels, pd.DataFrame):
if "label_store" not in list(self.custom_labels):
raise ValueError("label_store not defined in custom_labels")
a = list(self.custom_labels[attribute].values)
else:
if len(self.custom_labels[0]) == 2:
if attribute == "label_store":
raise ValueError("label_store not defined in custom_labels")
elif attribute == "symbol":
a = [l[0] for l in self.custom_labels]
elif attribute == "description":
a = [l[1] for l in self.custom_labels]
else:
if attribute == "label_store":
a = [l[0] for l in self.custom_labels]
elif attribute == "symbol":
a = [l[1] for l in self.custom_labels]
elif attribute == "description":
a = [l[2] for l in self.custom_labels]
return a
def create_label_map(self, inplace=True):
"""
Creates mapping df based on ann_label_table and self.custom_labels. Table
composed of entire WFDB standard annotation table, overwritten/appended
with custom_labels if any. Sets __label_map__ attribute, or returns value.
Parameters
----------
inplace : bool, optional
Determines whether to add the label map to the current
object (True) or as a return variable (False).
Returns
-------
label_map : pandas DataFrame
Mapping based on ann_label_table and self.custom_labels.
"""
label_map = ann_label_table.copy()
if self.custom_labels is not None:
self.standardize_custom_labels()
for i in self.custom_labels.index:
label_map.loc[i] = self.custom_labels.loc[i]
if inplace:
self.__label_map__ = label_map
else:
return label_map
def wr_ann_file(self, write_fs, write_dir=""):
"""
Calculate the bytes used to encode an annotation set and
write them to an annotation file.
Parameters
----------
write_fs : bool
Whether to write the `fs` attribute to the file.
write_dir : str, optional
The output directory in which the header is written.
Returns
-------
N/A
"""
# Calculate the fs bytes to write if present and desired to write
if write_fs:
fs_bytes = self.calc_fs_bytes()
else:
fs_bytes = []
# Calculate the custom_labels bytes to write if present
cl_bytes = self.calc_cl_bytes()
# Calculate the core field bytes to write
core_bytes = self.calc_core_bytes()
# Mark the end of the special annotation types if needed
if len(fs_bytes) or len(cl_bytes):
end_special_bytes = [0, 236, 255, 255, 255, 255, 1, 0]
else:
end_special_bytes = []
# Write the file
with open(
os.path.join(write_dir, self.record_name + "." + self.extension),
"wb",
) as f:
# Combine all bytes to write: fs (if any), custom annotations (if any), main content, file terminator
np.concatenate(
(
fs_bytes,
cl_bytes,
end_special_bytes,
core_bytes,
np.array([0, 0]),
)
).astype("u1").tofile(f)
return
def calc_fs_bytes(self):
"""
Calculate the bytes written to the annotation file for the fs field.
Parameters
----------
N/A
Returns
-------
list, ndarray
All of the bytes to be written to the annotation file.
"""
if self.fs is None:
return []
# Initial indicators of encoding fs
data_bytes = [
0,
88,
0,
252,
35,
35,
32,
116,
105,
109,
101,
32,
114,
101,
115,
111,
108,
117,
116,
105,
111,
110,
58,
32,
]
# Check if fs is close enough to int
if isinstance(self.fs, float):
if round(self.fs, 8) == float(int(self.fs)):
self.fs = int(self.fs)
fschars = str(self.fs)
ndigits = len(fschars)
for i in range(ndigits):
data_bytes.append(ord(fschars[i]))
# Fill in the aux_note length
data_bytes[2] = ndigits + 20
# odd number of digits
if ndigits % 2:
data_bytes.append(0)
return np.array(data_bytes).astype("u1")
def calc_cl_bytes(self):
"""
Calculate the bytes written to the annotation file for the
custom_labels field.
Parameters
----------
N/A
Returns
-------
list, ndarray
All of the bytes to be written to the annotation file.
"""
if self.custom_labels is None:
return []
# The start wrapper: '0 NOTE length aux_note ## annotation type definitions'
headbytes = [
0,
88,
30,
252,
35,
35,
32,
97,
110,
110,
111,
116,
97,
116,
105,
111,
110,
32,
116,
121,
112,
101,
32,
100,
101,
102,
105,
110,
105,
116,
105,
111,
110,
115,
]
# The end wrapper: '0 NOTE length aux_note ## end of definitions' followed by SKIP -1, +1
tailbytes = [
0,
88,
21,
252,
35,
35,
32,
101,
110,
100,
32,
111,
102,
32,
100,
101,
102,
105,
110,
105,
116,
105,
111,
110,
115,
0,
]
custom_bytes = []
for i in self.custom_labels.index:
custom_bytes += custom_triplet_bytes(
list(self.custom_labels.loc[i, list(ann_label_fields)])
)
# writecontent = []
# for i in range(len(self.custom_labels)):
# writecontent.append([freenumbers[i],list(custom_labels.keys())[i],list(custom_labels.values())[i]])
# custombytes = [customcode2bytes(triplet) for triplet in writecontent]
# custombytes = [item for sublist in custombytes for item in sublist]
return np.array(headbytes + custom_bytes + tailbytes).astype("u1")
def calc_core_bytes(self):
"""
Convert all used annotation fields into bytes to write.
Parameters
----------
N/A
Returns
-------
list, ndarray
All of the bytes to be written to the annotation file.
"""
# The difference sample to write
if len(self.sample) == 1:
sampdiff = np.array([self.sample[0]])
else:
sampdiff = np.concatenate(([self.sample[0]], np.diff(self.sample)))
# Create a copy of the annotation object with a
# compact version of fields to write
compact_annotation = copy.deepcopy(self)
compact_annotation.compact_fields()
# The optional fields to be written. Write if they are not None or all empty
extra_write_fields = []
for field in ["num", "subtype", "chan", "aux_note"]:
if not isblank(getattr(compact_annotation, field)):
extra_write_fields.append(field)
data_bytes = []
# Allow use of custom labels
label_table = ann_label_table
if self.custom_labels is not None:
label_table = pd.concat(
[label_table, self.custom_labels], ignore_index=True
)
# Generate typecodes from annotation label table
typecodes = {
label_table.iloc[i]["symbol"]: label_table.iloc[i]["label_store"]
for i in range(len(label_table))
}
# Iterate across all fields one index at a time
for i in range(len(sampdiff)):
# Process the samp (difference) and sym items
data_bytes.append(
field2bytes(
"samptype", [sampdiff[i], self.symbol[i]], typecodes
)
)
# Process the extra optional fields
for field in extra_write_fields:
value = getattr(compact_annotation, field)[i]
if value is not None:
data_bytes.append(field2bytes(field, value, typecodes))
# Flatten and convert to correct format
data_bytes = np.array(
[item for sublist in data_bytes for item in sublist]
).astype("u1")
return data_bytes
def compact_fields(self):
"""
Compact all of the object's fields so that the output
writing annotation file writes as few bytes as possible.
Parameters
----------
N/A
Returns
-------
N/A
"""
# Number of annotations
nannots = len(self.sample)
# Chan and num carry over previous fields. Get lists of as few
# elements to write as possible
self.chan = compact_carry_field(self.chan)
self.num = compact_carry_field(self.num)
# Elements of 0 (default) do not need to be written for subtype.
# num and sub are signed in original c package...
if self.subtype is not None:
if isinstance(self.subtype, list):
for i in range(nannots):
if self.subtype[i] == 0:
self.subtype[i] = None
if np.array_equal(self.subtype, [None] * nannots):
self.subtype = None
else:
zero_inds = np.where(self.subtype == 0)[0]
if len(zero_inds) == nannots:
self.subtype = None
else:
self.subtype = list(self.subtype)
for i in zero_inds:
self.subtype[i] = None
# Empty aux_note strings are not written
if self.aux_note is not None:
for i in range(nannots):
if self.aux_note[i] == "":
self.aux_note[i] = None
if np.array_equal(self.aux_note, [None] * nannots):
self.aux_note = None
def sym_to_aux(self):
"""
Move non-encoded symbol elements into the aux_note field.
Parameters
----------
N/A
Returns
-------
N/A
"""
self.check_field("symbol")
# Non-encoded symbols
label_table_map = self.create_label_map(inplace=False)
external_syms = set(self.symbol) - set(label_table_map["symbol"].values)
if external_syms == set():
return
if self.aux_note is None:
self.aux_note = [""] * len(self.sample)
for ext in external_syms:
for i in [i for i, x in enumerate(self.symbol) if x == ext]:
if not self.aux_note[i]:
self.aux_note[i] = self.symbol[i]
else:
self.aux_note[i] = self.symbol[i] + " " + self.aux_note[i]
self.symbol[i] = '"'
return
def get_contained_labels(self, inplace=True):
"""
Get the set of unique labels contained in this annotation.
Returns a pandas dataframe or sets the contained_labels
attribute of the object. Requires the label_store field to be set.
Function will try to use attributes contained in the order:
1. label_store
2. symbol
3. description
This function should also be called to summarize information about an
annotation after it has been read. Should not be a helper function
to others except rdann.
Parameters
----------
inplace : bool, optional
Determines whether to add the label map to the current
object (True) or as a return variable (False).
Returns
-------
contained_labels : pandas DataFrame
Mapping based on ann_label_table and self.custom_labels.
"""
if self.custom_labels is not None:
self.check_field("custom_labels")
# Create the label map
label_map = ann_label_table.copy()
# Convert the tuple triplets into a pandas dataframe if needed
if isinstance(self.custom_labels, (list, tuple)):
custom_labels = label_triplets_to_df(self.custom_labels)
elif isinstance(self.custom_labels, pd.DataFrame):
# Set the index just in case it doesn't already match the label_store
self.custom_labels.set_index(
self.custom_labels["label_store"].values, inplace=True
)
custom_labels = self.custom_labels
else:
custom_labels = None
# Merge the standard WFDB labels with the custom labels.
# custom labels values overwrite standard WFDB if overlap.
if custom_labels is not None:
for i in custom_labels.index:
label_map.loc[i] = custom_labels.loc[i]
# This doesn't work...
# label_map.loc[custom_labels.index] = custom_labels.loc[custom_labels.index]
# Get the labels using one of the features
if self.label_store is not None:
index_vals = set(self.label_store)
reset_index = False
counts = np.unique(self.label_store, return_counts=True)
elif self.symbol is not None:
index_vals = set(self.symbol)
label_map.set_index(label_map["symbol"].values, inplace=True)
reset_index = True
counts = np.unique(self.symbol, return_counts=True)
elif self.description is not None:
index_vals = set(self.description)
label_map.set_index(label_map["description"].values, inplace=True)
reset_index = True
counts = np.unique(self.description, return_counts=True)
else:
raise Exception("No annotation labels contained in object")
contained_labels = label_map.loc[list(index_vals), :]
# Add the counts
for i in range(len(counts[0])):
contained_labels.loc[counts[0][i], "n_occurrences"] = counts[1][i]
contained_labels["n_occurrences"] = pd.to_numeric(
contained_labels["n_occurrences"], downcast="integer"
)
if reset_index:
contained_labels.set_index(
contained_labels["label_store"].values, inplace=True
)
if inplace:
self.contained_labels = contained_labels
return
else:
return contained_labels
def set_label_elements(self, wanted_label_elements):
"""
Set one or more label elements based on at least one of the others.
Parameters
----------
wanted_label_elements : list
All of the desired label elements.
Returns
-------
N/A
"""
if isinstance(wanted_label_elements, str):
wanted_label_elements = [wanted_label_elements]
# Figure out which desired label elements are missing
missing_elements = [
e for e in wanted_label_elements if getattr(self, e) is None
]
contained_elements = [
e for e in ann_label_fields if getattr(self, e) is not None
]
if not contained_elements:
raise Exception("No annotation labels contained in object")
for e in missing_elements:
self.convert_label_attribute(contained_elements[0], e)
unwanted_label_elements = list(
set(ann_label_fields) - set(wanted_label_elements)
)
self.rm_attributes(unwanted_label_elements)
return
def rm_attributes(self, attributes):
"""
Remove attributes from object.
Parameters
----------
attributes : list
All of the desired attributes to remove.
Returns
-------
N/A
"""
if isinstance(attributes, str):
attributes = [attributes]
for a in attributes:
setattr(self, a, None)
return
def convert_label_attribute(
self, source_field, target_field, inplace=True, overwrite=True
):
"""
Convert one label attribute (label_store, symbol, or description) to
another. Creates mapping df on the fly based on ann_label_table and
self.custom_labels.
Parameters
----------
source_field : str
The label attribute to be converted.
target_field : str
The label attribute that will be converted to.
inplace : bool, optional
Determines whether to add the label map to the current
object (True) or as a return variable (False).
overwrite : bool, optional
If True, performs conversion and replaces target field attribute
even if the target attribute already has a value. If False, does
not perform conversion in the aforementioned case. Set to
True (do conversion) if inplace=False.
Returns
-------
target_item : list
All of the desired target fields.
"""
if inplace and not overwrite:
if getattr(self, target_field) is not None:
return
label_map = self.create_label_map(inplace=False)
label_map.set_index(label_map[source_field].values, inplace=True)
try:
target_item = label_map.loc[
getattr(self, source_field), target_field
].values
except KeyError:
target_item = label_map.reindex(
index=getattr(self, source_field), columns=[target_field]
).values.flatten()
if target_field != "label_store":
# Should already be int64 dtype if target is label_store
target_item = list(target_item)
if inplace:
setattr(self, target_field, target_item)
else:
return target_item
|
class Annotation(object):
'''
The class representing WFDB annotations.
Annotation objects can be created using the initializer, or by reading a
WFDB annotation file with `rdann`.
The attributes of the Annotation object give information about the
annotation as specified by:
https://www.physionet.org/physiotools/wag/annot-5.htm
Call `show_ann_labels()` to see the list of standard annotation codes. Any
text used to label annotations that are not one of these codes should go in
the 'aux_note' field rather than the 'sym' field.
The current annotation values organized as such:
AnnotationLabel(label_store (or subtype), symbol (or aux_note), short_description, description)
where the associated values are:
ann_labels = [
AnnotationLabel(0, " ", 'NOTANN', 'Not an actual annotation'),
AnnotationLabel(1, "N", 'NORMAL', 'Normal beat'),
AnnotationLabel(2, "L", 'LBBB', 'Left bundle branch block beat'),
AnnotationLabel(3, "R", 'RBBB', 'Right bundle branch block beat'),
AnnotationLabel(4, "a", 'ABERR', 'Aberrated atrial premature beat'),
AnnotationLabel(5, "V", 'PVC', 'Premature ventricular contraction'),
AnnotationLabel(6, "F", 'FUSION', 'Fusion of ventricular and normal beat'),
AnnotationLabel(7, "J", 'NPC', 'Nodal (junctional) premature beat'),
AnnotationLabel(8, "A", 'APC', 'Atrial premature contraction'),
AnnotationLabel(9, "S", 'SVPB', 'Premature or ectopic supraventricular beat'),
AnnotationLabel(10, "E", 'VESC', 'Ventricular escape beat'),
AnnotationLabel(11, "j", 'NESC', 'Nodal (junctional) escape beat'),
AnnotationLabel(12, "/", 'PACE', 'Paced beat'),
AnnotationLabel(13, "Q", 'UNKNOWN', 'Unclassifiable beat'),
AnnotationLabel(14, "~", 'NOISE', 'Signal quality change'),
AnnotationLabel(16, "|", 'ARFCT', 'Isolated QRS-like artifact'),
AnnotationLabel(18, "s", 'STCH', 'ST change'),
AnnotationLabel(19, "T", 'TCH', 'T-wave change'),
AnnotationLabel(20, "*", 'SYSTOLE', 'Systole'),
AnnotationLabel(21, "D", 'DIASTOLE', 'Diastole'),
AnnotationLabel(22, '"', 'NOTE', 'Comment annotation'),
AnnotationLabel(23, "=", 'MEASURE', 'Measurement annotation'),
AnnotationLabel(24, "p", 'PWAVE', 'P-wave peak'),
AnnotationLabel(25, "B", 'BBB', 'Left or right bundle branch block'),
AnnotationLabel(26, "^", 'PACESP', 'Non-conducted pacer spike'),
AnnotationLabel(27, "t", 'TWAVE', 'T-wave peak'),
AnnotationLabel(28, "+", 'RHYTHM', 'Rhythm change'),
AnnotationLabel(29, "u", 'UWAVE', 'U-wave peak'),
AnnotationLabel(30, "?", 'LEARN', 'Learning'),
AnnotationLabel(31, "!", 'FLWAV', 'Ventricular flutter wave'),
AnnotationLabel(32, "[", 'VFON', 'Start of ventricular flutter/fibrillation'),
AnnotationLabel(33, "]", 'VFOFF', 'End of ventricular flutter/fibrillation'),
AnnotationLabel(34, "e", 'AESC', 'Atrial escape beat'),
AnnotationLabel(35, "n", 'SVESC', 'Supraventricular escape beat'),
AnnotationLabel(36, "@", 'LINK', 'Link to external data (aux_note contains URL)'),
AnnotationLabel(37, "x", 'NAPC', 'Non-conducted P-wave (blocked APB)'),
AnnotationLabel(38, "f", 'PFUS', 'Fusion of paced and normal beat'),
AnnotationLabel(39, "(", 'WFON', 'Waveform onset'),
AnnotationLabel(40, ")", 'WFOFF', 'Waveform end'),
AnnotationLabel(41, "r", 'RONT', 'R-on-T premature ventricular contraction')
]
The current annotation classes are organized as such:
AnnotationClass(extension, description, human_reviewed)
where the associated values are:
ann_classes = [
AnnotationClass('atr', 'Reference ECG annotations', True),
AnnotationClass('blh', 'Human reviewed beat labels', True),
AnnotationClass('blm', 'Machine beat labels', False),
AnnotationClass('alh', 'Human reviewed alarms', True),
AnnotationClass('alm', 'Machine alarms', False),
AnnotationClass('qrsc', 'Human reviewed QRS detections', True),
AnnotationClass('qrs', 'Machine QRS detections', False),
AnnotationClass('bph', 'Human reviewed BP beat detections', True),
AnnotationClass('bpm', 'Machine BP beat detections', False)
]
Attributes
----------
record_name : str
The base file name (without extension) of the record that the
annotation is associated with.
extension : str
The file extension of the file the annotation is stored in.
sample : ndarray
A numpy array containing the annotation locations in samples relative to
the beginning of the record.
symbol : list, numpy array, optional
The symbols used to display the annotation labels. List or numpy array.
If this field is present, `label_store` must not be present.
subtype : ndarray, optional
A numpy array containing the marked class/category of each annotation.
chan : ndarray, optional
A numpy array containing the signal channel associated with each
annotation.
num : ndarray, optional
A numpy array containing the labelled annotation number for each
annotation.
aux_note : list, optional
A list containing the auxiliary information string (or None for
annotations without notes) for each annotation.
fs : int, float, optional
The sampling frequency of the record.
label_store : ndarray, optional
The integer value used to store/encode each annotation label
description : list, optional
A list containing the descriptive string of each annotation label.
custom_labels : pandas dataframe, optional
The custom annotation labels defined in the annotation file. Maps
the relationship between the three label fields. The data type is a
pandas DataFrame with three columns:
['label_store', 'symbol', 'description'].
contained_labels : pandas dataframe, optional
The unique labels contained in this annotation. Same structure as
`custom_labels`.
Examples
--------
>>> ann1 = wfdb.Annotation(record_name='rec1', extension='atr',
sample=[10,20,400], symbol=['N','N','['],
aux_note=[None, None, 'Serious Vfib'])
'''
def __init__(
self,
record_name,
extension,
sample,
symbol=None,
subtype=None,
chan=None,
num=None,
aux_note=None,
fs=None,
label_store=None,
description=None,
custom_labels=None,
contained_labels=None,
):
pass
def __eq__(self, other):
'''
Equal comparison operator for objects of this type.
Parameters
----------
other : object
The object that is being compared to self.
Returns
-------
bool
Determines if the objects are equal (True) or not equal (False).
'''
pass
def apply_range(self, sampfrom=0, sampto=None):
'''
Filter the annotation attributes to keep only items between the
desired sample values.
Parameters
----------
sampfrom : int, optional
The minimum sample number for annotations to be returned.
sampto : int, optional
The maximum sample number for annotations to be returned.
'''
pass
def wrann(self, write_fs=False, write_dir=""):
'''
Write a WFDB annotation file from this object.
Parameters
----------
write_fs : bool, optional
Whether to write the `fs` attribute to the file.
write_dir : str, optional
The output directory in which the header is written.
Returns
-------
N/A
'''
pass
def get_label_fields(self):
'''
Get the present label fields in the object.
Parameters
----------
N/A
Returns
-------
present_label_fields : list
All of the present label fields in the object.
'''
pass
def check_fields(self):
'''
Check the set fields of the annotation object.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def check_fields(self):
'''
Check a particular annotation field.
Parameters
----------
field : str
The annotation field to be checked.
Returns
-------
N/A
'''
pass
def check_field_cohesion(self, present_label_fields):
'''
Check that the content and structure of different fields are consistent
with one another.
Parameters
----------
present_label_fields : list
All of the present label fields in the object.
Returns
-------
N/A
'''
pass
def standardize_custom_labels(self):
'''
Set the custom_labels field of the object to a standardized format:
3 column pandas df with ann_label_fields as columns.
Does nothing if there are no custom labels defined.
Does nothing if custom_labels is already a df with all 3 columns.
If custom_labels is an iterable of pairs/triplets, this
function will convert it into a df.
If the label_store attribute is not already defined, this
function will automatically choose values by trying to use:
1. The undefined store values from the standard WFDB annotation
label map.
2. The unused label store values. This is extracted by finding the
set of all labels contained in this annotation object and seeing
which symbols/descriptions are not used.
If there are more custom labels defined than there are enough spaces,
even in condition 2 from above, this function will raise an error.
This function must work when called as a standalone.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def get_undefined_label_stores(self):
'''
Get the label_store values not defined in the
standard WFDB annotation labels.
Parameters
----------
N/A
Returns
-------
list
The label_store values not found in WFDB annotation labels.
'''
pass
def get_available_label_stores(self, usefield="tryall"):
'''
Get the label store values that may be used
for writing this annotation.
Available store values include:
- the undefined values in the standard WFDB labels
- the store values not used in the current
annotation object.
- the store values whose standard WFDB symbols/descriptions
match those of the custom labels (if custom_labels exists)
Parameters
----------
usefield : str, optional
If 'usefield' is explicitly specified, the function will use that
field to figure out available label stores. If 'usefield'
is set to 'tryall', the function will choose one of the contained
attributes by checking availability in the order: label_store, symbol, description.
Returns
-------
available_label_stores : set
The available store values used for writing the annotation.
'''
pass
def get_custom_label_attribute(self, attribute):
'''
Get a list of the custom_labels attribute i.e. label_store,
symbol, or description. The custom_labels variable could be in
a number of formats.
Parameters
----------
attribute : str
The selected attribute to generate the list.
Returns
-------
a : list
All of the custom_labels attributes.
'''
pass
def create_label_map(self, inplace=True):
'''
Creates mapping df based on ann_label_table and self.custom_labels. Table
composed of entire WFDB standard annotation table, overwritten/appended
with custom_labels if any. Sets __label_map__ attribute, or returns value.
Parameters
----------
inplace : bool, optional
Determines whether to add the label map to the current
object (True) or as a return variable (False).
Returns
-------
label_map : pandas DataFrame
Mapping based on ann_label_table and self.custom_labels.
'''
pass
def wr_ann_file(self, write_fs, write_dir=""):
'''
Calculate the bytes used to encode an annotation set and
write them to an annotation file.
Parameters
----------
write_fs : bool
Whether to write the `fs` attribute to the file.
write_dir : str, optional
The output directory in which the header is written.
Returns
-------
N/A
'''
pass
def calc_fs_bytes(self):
'''
Calculate the bytes written to the annotation file for the fs field.
Parameters
----------
N/A
Returns
-------
list, ndarray
All of the bytes to be written to the annotation file.
'''
pass
def calc_cl_bytes(self):
'''
Calculate the bytes written to the annotation file for the
custom_labels field.
Parameters
----------
N/A
Returns
-------
list, ndarray
All of the bytes to be written to the annotation file.
'''
pass
def calc_core_bytes(self):
'''
Convert all used annotation fields into bytes to write.
Parameters
----------
N/A
Returns
-------
list, ndarray
All of the bytes to be written to the annotation file.
'''
pass
def compact_fields(self):
'''
Compact all of the object's fields so that the output
writing annotation file writes as few bytes as possible.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def sym_to_aux(self):
'''
Move non-encoded symbol elements into the aux_note field.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def get_contained_labels(self, inplace=True):
'''
Get the set of unique labels contained in this annotation.
Returns a pandas dataframe or sets the contained_labels
attribute of the object. Requires the label_store field to be set.
Function will try to use attributes contained in the order:
1. label_store
2. symbol
3. description
This function should also be called to summarize information about an
annotation after it has been read. Should not be a helper function
to others except rdann.
Parameters
----------
inplace : bool, optional
Determines whether to add the label map to the current
object (True) or as a return variable (False).
Returns
-------
contained_labels : pandas DataFrame
Mapping based on ann_label_table and self.custom_labels.
'''
pass
def set_label_elements(self, wanted_label_elements):
'''
Set one or more label elements based on at least one of the others.
Parameters
----------
wanted_label_elements : list
All of the desired label elements.
Returns
-------
N/A
'''
pass
def rm_attributes(self, attributes):
'''
Remove attributes from object.
Parameters
----------
attributes : list
All of the desired attributes to remove.
Returns
-------
N/A
'''
pass
def convert_label_attribute(
self, source_field, target_field, inplace=True, overwrite=True
):
'''
Convert one label attribute (label_store, symbol, or description) to
another. Creates mapping df on the fly based on ann_label_table and
self.custom_labels.
Parameters
----------
source_field : str
The label attribute to be converted.
target_field : str
The label attribute that will be converted to.
inplace : bool, optional
Determines whether to add the label map to the current
object (True) or as a return variable (False).
overwrite : bool, optional
If True, performs conversion and replaces target field attribute
even if the target attribute already has a value. If False, does
not perform conversion in the aforementioned case. Set to
True (do conversion) if inplace=False.
Returns
-------
target_item : list
All of the desired target fields.
'''
pass
| 24 | 23 | 57 | 8 | 33 | 16 | 7 | 0.64 | 1 | 14 | 0 | 0 | 23 | 15 | 23 | 23 | 1,474 | 231 | 758 | 139 | 717 | 485 | 405 | 120 | 381 | 47 | 1 | 4 | 171 |
147,698 |
MIT-LCP/wfdb-python
|
wfdb/io/annotation.py
|
wfdb.io.annotation.AnnotationClass
|
class AnnotationClass(object):
"""
Describes the annotations.
Attributes
----------
extension : str
The file extension of the annotation.
description : str
The description provided with the annotation.
human_reviewed : bool
Whether the annotation was human-reviewed (True) or not (False).
"""
def __init__(self, extension, description, human_reviewed):
self.extension = extension
self.description = description
self.human_reviewed = human_reviewed
|
class AnnotationClass(object):
'''
Describes the annotations.
Attributes
----------
extension : str
The file extension of the annotation.
description : str
The description provided with the annotation.
human_reviewed : bool
Whether the annotation was human-reviewed (True) or not (False).
'''
def __init__(self, extension, description, human_reviewed):
pass
| 2 | 1 | 4 | 0 | 4 | 0 | 1 | 2.2 | 1 | 0 | 0 | 0 | 1 | 3 | 1 | 1 | 19 | 3 | 5 | 5 | 3 | 11 | 5 | 5 | 3 | 1 | 1 | 0 | 1 |
147,699 |
MIT-LCP/wfdb-python
|
wfdb/processing/qrs.py
|
wfdb.processing.qrs.XQRS
|
class XQRS(object):
"""
The QRS detector class for the XQRS algorithm. The `XQRS.Conf`
class is the configuration class that stores initial parameters
for the detection. The `XQRS.detect` method runs the detection algorithm.
The process works as follows:
- Load the signal and configuration parameters.
- Bandpass filter the signal between 5 and 20 Hz, to get the
filtered signal.
- Apply moving wave integration (MWI) with a Ricker
(Mexican hat) wavelet onto the filtered signal, and save the
square of the integrated signal.
- Conduct learning if specified, to initialize running
parameters of noise and QRS amplitudes, the QRS detection
threshold, and recent R-R intervals. If learning is unspecified
or fails, use default parameters. See the docstring for the
`_learn_init_params` method of this class for details.
- Run the main detection. Iterate through the local maxima of
the MWI signal. For each local maxima:
- Check if it is a QRS complex. To be classified as a QRS,
it must come after the refractory period, cross the QRS
detection threshold, and not be classified as a T-wave
if it comes close enough to the previous QRS. If
successfully classified, update running detection
threshold and heart rate parameters.
- If not a QRS, classify it as a noise peak and update
running parameters.
- Before continuing to the next local maxima, if no QRS
was detected within 1.66 times the recent R-R interval,
perform backsearch QRS detection. This checks previous
peaks using a lower QRS detection threshold.
Attributes
----------
sig : 1d ndarray
The input ECG signal to apply the QRS detection on.
fs : int, float
The sampling frequency of the input signal.
conf : XQRS.Conf object, optional
The configuration object specifying signal configuration
parameters. See the docstring of the XQRS.Conf class.
Examples
--------
>>> import wfdb
>>> from wfdb import processing
>>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
>>> xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])
>>> xqrs.detect()
>>> wfdb.plot_items(signal=sig, ann_samp=[xqrs.qrs_inds])
"""
def __init__(self, sig, fs, conf=None):
if sig.ndim != 1:
raise ValueError("sig must be a 1d numpy array")
self.sig = sig
self.fs = fs
self.sig_len = len(sig)
self.conf = conf or XQRS.Conf()
self._set_conf()
class Conf(object):
"""
Initial signal configuration object for this QRS detector.
Attributes
----------
hr_init : int, float, optional
Initial heart rate in beats per minute. Used for calculating
recent R-R intervals.
hr_max : int, float, optional
Hard maximum heart rate between two beats, in beats per
minute. Used for refractory period.
hr_min : int, float, optional
Hard minimum heart rate between two beats, in beats per
minute. Used for calculating recent R-R intervals.
qrs_width : int, float, optional
Expected QRS width in seconds. Used for filter widths
indirect refractory period.
qrs_thr_init : int, float, optional
Initial QRS detection threshold in mV. Use when learning
is False, or learning fails.
qrs_thr_min : int, float, string, optional
Hard minimum detection threshold of QRS wave. Leave as 0
for no minimum.
ref_period : int, float, optional
The QRS refractory period.
t_inspect_period : int, float, optional
The period below which a potential QRS complex is inspected to
see if it is a T-wave. Leave as 0 for no T-wave inspection.
"""
def __init__(
self,
hr_init=75,
hr_max=200,
hr_min=25,
qrs_width=0.1,
qrs_thr_init=0.13,
qrs_thr_min=0,
ref_period=0.2,
t_inspect_period=0,
):
if hr_min < 0:
raise ValueError("'hr_min' must be >= 0")
if not hr_min < hr_init < hr_max:
raise ValueError("'hr_min' < 'hr_init' < 'hr_max' must be True")
if qrs_thr_init < qrs_thr_min:
raise ValueError("qrs_thr_min must be <= qrs_thr_init")
self.hr_init = hr_init
self.hr_max = hr_max
self.hr_min = hr_min
self.qrs_width = qrs_width
self.qrs_radius = self.qrs_width / 2
self.qrs_thr_init = qrs_thr_init
self.qrs_thr_min = qrs_thr_min
self.ref_period = ref_period
self.t_inspect_period = t_inspect_period
def _set_conf(self):
"""
Set configuration parameters from the Conf object into the detector
object. Time values are converted to samples, and amplitude values
are in mV.
Parameters
----------
N/A
Returns
-------
N/A
"""
self.rr_init = 60 * self.fs / self.conf.hr_init
self.rr_max = 60 * self.fs / self.conf.hr_min
self.rr_min = 60 * self.fs / self.conf.hr_max
# Note: if qrs_width is odd, qrs_width == qrs_radius*2 + 1
self.qrs_width = int(self.conf.qrs_width * self.fs)
self.qrs_radius = int(self.conf.qrs_radius * self.fs)
self.qrs_thr_init = self.conf.qrs_thr_init
self.qrs_thr_min = self.conf.qrs_thr_min
self.ref_period = int(self.conf.ref_period * self.fs)
self.t_inspect_period = int(self.conf.t_inspect_period * self.fs)
def _bandpass(self, fc_low=5, fc_high=20):
"""
Apply a bandpass filter onto the signal, and save the filtered
signal.
Parameters
----------
fc_low : int, float
The low frequency cutoff for the filter.
fc_high : int, float
The high frequency cutoff for the filter.
Returns
-------
N/A
"""
self.fc_low = fc_low
self.fc_high = fc_high
b, a = signal.butter(
2,
[float(fc_low) * 2 / self.fs, float(fc_high) * 2 / self.fs],
"pass",
)
self.sig_f = signal.filtfilt(
b, a, self.sig[self.sampfrom : self.sampto], axis=0
)
# Save the passband gain (x2 due to double filtering)
self.filter_gain = (
get_filter_gain(b, a, np.mean([fc_low, fc_high]), self.fs) * 2
)
def _mwi(self):
"""
Apply moving wave integration (MWI) with a Ricker (Mexican hat)
wavelet onto the filtered signal, and save the square of the
integrated signal. The width of the hat is equal to the QRS width.
After integration, find all local peaks in the MWI signal.
Parameters
----------
N/A
Returns
-------
N/A
"""
wavelet_filter = ricker(self.qrs_width, 4)
self.sig_i = (
signal.filtfilt(wavelet_filter, [1], self.sig_f, axis=0) ** 2
)
# Save the MWI gain (x2 due to double filtering) and the total
# gain from raw to MWI
self.mwi_gain = (
get_filter_gain(
wavelet_filter,
[1],
np.mean([self.fc_low, self.fc_high]),
self.fs,
)
* 2
)
self.transform_gain = self.filter_gain * self.mwi_gain
self.peak_inds_i = find_local_peaks(self.sig_i, radius=self.qrs_radius)
self.n_peaks_i = len(self.peak_inds_i)
def _learn_init_params(self, n_calib_beats=8):
"""
Find a number of consecutive beats and use them to initialize:
- recent QRS amplitude
- recent noise amplitude
- recent R-R interval
- QRS detection threshold
The learning works as follows:
- Find all local maxima (largest sample within `qrs_radius`
samples) of the filtered signal.
- Inspect the local maxima until `n_calib_beats` beats are
found:
- Calculate the cross-correlation between a Ricker wavelet of
length `qrs_width`, and the filtered signal segment centered
around the local maximum.
- If the cross-correlation exceeds 0.6, classify it as a beat.
- Use the beats to initialize the previously described
parameters.
- If the system fails to find enough beats, the default
parameters will be used instead. See the docstring of
`XQRS._set_default_init_params` for details.
Parameters
----------
n_calib_beats : int, optional
Number of calibration beats to detect for learning
Returns
-------
N/A
"""
if self.verbose:
print("Learning initial signal parameters...")
last_qrs_ind = -self.rr_max
qrs_inds = []
qrs_amps = []
noise_amps = []
ricker_wavelet = ricker(self.qrs_radius * 2, 4).reshape(-1, 1)
# Find the local peaks of the signal.
peak_inds_f = find_local_peaks(self.sig_f, self.qrs_radius)
# Peak numbers at least qrs_width away from signal boundaries
peak_nums_r = np.where(peak_inds_f > self.qrs_width)[0]
peak_nums_l = np.where(peak_inds_f <= self.sig_len - self.qrs_width)[0]
# Skip if no peaks in range
if not peak_inds_f.size or not peak_nums_r.size or not peak_nums_l.size:
if self.verbose:
print(
"Failed to find %d beats during learning." % n_calib_beats
)
self._set_default_init_params()
return
# Go through the peaks and find QRS peaks and noise peaks.
# only inspect peaks with at least qrs_radius around either side
for peak_num in range(peak_nums_r[0], peak_nums_l[-1]):
i = peak_inds_f[peak_num]
# Calculate cross-correlation between the filtered signal
# segment and a Ricker wavelet
# Question: should the signal be squared? Case for inverse QRS
# complexes
sig_segment = normalize(
self.sig_f[i - self.qrs_radius : i + self.qrs_radius]
)
xcorr = np.correlate(sig_segment, ricker_wavelet[:, 0])
# Classify as QRS if xcorr is large enough
if xcorr > 0.6 and i - last_qrs_ind > self.rr_min:
last_qrs_ind = i
qrs_inds.append(i)
qrs_amps.append(self.sig_i[i])
else:
noise_amps.append(self.sig_i[i])
if len(qrs_inds) == n_calib_beats:
break
# Found enough calibration beats to initialize parameters
if len(qrs_inds) == n_calib_beats:
if self.verbose:
print(
"Found %d beats during learning." % n_calib_beats
+ " Initializing using learned parameters"
)
# QRS amplitude is most important.
qrs_amp = np.mean(qrs_amps)
# Set noise amplitude if found
if noise_amps:
noise_amp = np.mean(noise_amps)
else:
# Set default of 1/10 of QRS amplitude
noise_amp = qrs_amp / 10
# Get R-R intervals of consecutive beats, if any.
rr_intervals = np.diff(qrs_inds)
rr_intervals = rr_intervals[rr_intervals < self.rr_max]
if rr_intervals.any():
rr_recent = np.mean(rr_intervals)
else:
rr_recent = self.rr_init
# If an early QRS was detected, set last_qrs_ind so that it can be
# picked up.
last_qrs_ind = min(0, qrs_inds[0] - self.rr_min - 1)
self._set_init_params(
qrs_amp_recent=qrs_amp,
noise_amp_recent=noise_amp,
rr_recent=rr_recent,
last_qrs_ind=last_qrs_ind,
)
self.learned_init_params = True
# Failed to find enough calibration beats. Use default values.
else:
if self.verbose:
print(
"Failed to find %d beats during learning." % n_calib_beats
)
self._set_default_init_params()
def _set_init_params(
self, qrs_amp_recent, noise_amp_recent, rr_recent, last_qrs_ind
):
"""
Set initial online parameters.
Parameters
----------
qrs_amp_recent : int, float
The mean of the signal QRS amplitudes.
noise_amp_recent : int, float
The mean of the signal noise amplitudes.
rr_recent : int
The mean of the signal R-R interval values.
last_qrs_ind : int
The index of the signal's early QRS detected.
Returns
-------
N/A
"""
self.qrs_amp_recent = qrs_amp_recent
self.noise_amp_recent = noise_amp_recent
# What happens if qrs_thr is calculated to be less than the explicit
# min threshold? Should print warning?
self.qrs_thr = max(
0.25 * self.qrs_amp_recent + 0.75 * self.noise_amp_recent,
self.qrs_thr_min * self.transform_gain,
)
self.rr_recent = rr_recent
self.last_qrs_ind = last_qrs_ind
# No QRS detected initially
self.last_qrs_peak_num = None
def _set_default_init_params(self):
"""
Set initial running parameters using default values.
The steady state equation is:
`qrs_thr = 0.25*qrs_amp + 0.75*noise_amp`
Estimate that QRS amp is 10x noise amp, giving:
`qrs_thr = 0.325 * qrs_amp or 13/40 * qrs_amp`
Parameters
----------
N/A
Returns
-------
N/A
"""
if self.verbose:
print("Initializing using default parameters")
# Multiply the specified ECG thresholds by the filter and MWI gain
# factors
qrs_thr_init = self.qrs_thr_init * self.transform_gain
qrs_thr_min = self.qrs_thr_min * self.transform_gain
qrs_amp = 27 / 40 * qrs_thr_init
noise_amp = qrs_amp / 10
rr_recent = self.rr_init
last_qrs_ind = 0
self._set_init_params(
qrs_amp_recent=qrs_amp,
noise_amp_recent=noise_amp,
rr_recent=rr_recent,
last_qrs_ind=last_qrs_ind,
)
self.learned_init_params = False
def _is_qrs(self, peak_num, backsearch=False):
"""
Check whether a peak is a QRS complex. It is classified as QRS
if it:
- Comes after the refractory period.
- Passes QRS threshold.
- Is not a T-wave (check it if the peak is close to the previous QRS).
Parameters
----------
peak_num : int
The peak number of the MWI signal to be inspected.
backsearch: bool, optional
Whether the peak is being inspected during backsearch.
Returns
-------
bool
Whether the peak is QRS (True) or not (False).
"""
i = self.peak_inds_i[peak_num]
if backsearch:
qrs_thr = self.qrs_thr / 2
else:
qrs_thr = self.qrs_thr
if i - self.last_qrs_ind > self.ref_period and self.sig_i[i] > qrs_thr:
if i - self.last_qrs_ind < self.t_inspect_period:
if self._is_twave(peak_num):
return False
return True
return False
def _update_qrs(self, peak_num, backsearch=False):
"""
Update live QRS parameters. Adjust the recent R-R intervals and
QRS amplitudes, and the QRS threshold.
Parameters
----------
peak_num : int
The peak number of the MWI signal where the QRS is detected.
backsearch: bool, optional
Whether the QRS was found via backsearch.
Returns
-------
N/A
"""
i = self.peak_inds_i[peak_num]
# Update recent R-R interval if the beat is consecutive (do this
# before updating self.last_qrs_ind)
rr_new = i - self.last_qrs_ind
if rr_new < self.rr_max:
self.rr_recent = 0.875 * self.rr_recent + 0.125 * rr_new
self.qrs_inds.append(i)
self.last_qrs_ind = i
# Peak number corresponding to last QRS
self.last_qrs_peak_num = self.peak_num
# QRS recent amplitude is adjusted twice as quickly if the peak
# was found via backsearch
if backsearch:
self.backsearch_qrs_inds.append(i)
self.qrs_amp_recent = (
0.75 * self.qrs_amp_recent + 0.25 * self.sig_i[i]
)
else:
self.qrs_amp_recent = (
0.875 * self.qrs_amp_recent + 0.125 * self.sig_i[i]
)
self.qrs_thr = max(
(0.25 * self.qrs_amp_recent + 0.75 * self.noise_amp_recent),
self.qrs_thr_min,
)
return
def _is_twave(self, peak_num):
"""
Check whether a segment is a T-wave. Compare the maximum gradient of
the filtered signal segment with that of the previous QRS segment.
Parameters
----------
peak_num : int
The peak number of the MWI signal where the QRS is detected.
Returns
-------
bool
Whether a segment is a T-wave (True) or not (False).
"""
i = self.peak_inds_i[peak_num]
# Due to initialization parameters, last_qrs_ind may be negative.
# No way to check in this instance.
if self.last_qrs_ind - self.qrs_radius < 0:
return False
# Get half the QRS width of the signal to the left.
# Should this be squared?
sig_segment = normalize(self.sig_f[i - self.qrs_radius : i])
last_qrs_segment = self.sig_f[
self.last_qrs_ind - self.qrs_radius : self.last_qrs_ind
]
segment_slope = np.diff(sig_segment)
last_qrs_slope = np.diff(last_qrs_segment)
# Should we be using absolute values?
if max(segment_slope) < 0.5 * max(abs(last_qrs_slope)):
return True
else:
return False
def _update_noise(self, peak_num):
"""
Update live noise parameters.
Parameters
----------
peak_num : int
The peak number.
Returns
-------
N/A
"""
i = self.peak_inds_i[peak_num]
self.noise_amp_recent = (
0.875 * self.noise_amp_recent + 0.125 * self.sig_i[i]
)
return
def _require_backsearch(self):
"""
Determine whether a backsearch should be performed on prior peaks.
Parameters
----------
N/A
Returns
-------
bool
Whether to require backsearch (True) or not (False).
"""
if self.peak_num == self.n_peaks_i - 1:
# If we just return false, we may miss a chance to backsearch.
# Update this?
return False
next_peak_ind = self.peak_inds_i[self.peak_num + 1]
if next_peak_ind - self.last_qrs_ind > self.rr_recent * 1.66:
return True
else:
return False
def _backsearch(self):
"""
Inspect previous peaks from the last detected QRS peak (if any),
using a lower threshold.
Parameters
----------
N/A
Returns
-------
N/A
"""
if self.last_qrs_peak_num is not None:
for peak_num in range(
self.last_qrs_peak_num + 1, self.peak_num + 1
):
if self._is_qrs(peak_num=peak_num, backsearch=True):
self._update_qrs(peak_num=peak_num, backsearch=True)
# No need to update noise parameters if it was classified as
# noise. It would have already been updated.
def _run_detection(self):
"""
Run the QRS detection after all signals and parameters have been
configured and set.
Parameters
----------
N/A
Returns
-------
N/A
"""
if self.verbose:
print("Running QRS detection...")
# Detected QRS indices
self.qrs_inds = []
# QRS indices found via backsearch
self.backsearch_qrs_inds = []
# Iterate through MWI signal peak indices
for self.peak_num in range(self.n_peaks_i):
if self._is_qrs(self.peak_num):
self._update_qrs(self.peak_num)
else:
self._update_noise(self.peak_num)
# Before continuing to the next peak, do backsearch if
# necessary
if self._require_backsearch():
self._backsearch()
# Detected indices are relative to starting sample
if self.qrs_inds:
self.qrs_inds = np.array(self.qrs_inds) + self.sampfrom
else:
self.qrs_inds = np.array(self.qrs_inds)
if self.verbose:
print("QRS detection complete.")
def detect(self, sampfrom=0, sampto="end", learn=True, verbose=True):
"""
Detect QRS locations between two samples.
Parameters
----------
sampfrom : int, optional
The starting sample number to run the detection on.
sampto : int, optional
The final sample number to run the detection on. Set as
'end' to run on the entire signal.
learn : bool, optional
Whether to apply learning on the signal before running the
main detection. If learning fails or is not conducted, the
default configuration parameters will be used to initialize
these variables. See the `XQRS._learn_init_params` docstring
for details.
verbose : bool, optional
Whether to display the stages and outcomes of the detection
process.
Returns
-------
N/A
"""
if sampfrom < 0:
raise ValueError("'sampfrom' cannot be negative")
self.sampfrom = sampfrom
if sampto == "end":
sampto = self.sig_len
elif sampto > self.sig_len:
raise ValueError("'sampto' cannot exceed the signal length")
self.sampto = sampto
self.verbose = verbose
# Don't attempt to run on a flat signal
if np.max(self.sig) == np.min(self.sig):
self.qrs_inds = np.empty(0)
if self.verbose:
print("Flat signal. Detection skipped.")
return
# Get/set signal configuration fields from Conf object
self._set_conf()
# Bandpass filter the signal
self._bandpass()
# Compute moving wave integration of filtered signal
self._mwi()
# Initialize the running parameters
if learn:
self._learn_init_params()
else:
self._set_default_init_params()
# Run the detection
self._run_detection()
|
class XQRS(object):
'''
The QRS detector class for the XQRS algorithm. The `XQRS.Conf`
class is the configuration class that stores initial parameters
for the detection. The `XQRS.detect` method runs the detection algorithm.
The process works as follows:
- Load the signal and configuration parameters.
- Bandpass filter the signal between 5 and 20 Hz, to get the
filtered signal.
- Apply moving wave integration (MWI) with a Ricker
(Mexican hat) wavelet onto the filtered signal, and save the
square of the integrated signal.
- Conduct learning if specified, to initialize running
parameters of noise and QRS amplitudes, the QRS detection
threshold, and recent R-R intervals. If learning is unspecified
or fails, use default parameters. See the docstring for the
`_learn_init_params` method of this class for details.
- Run the main detection. Iterate through the local maxima of
the MWI signal. For each local maxima:
- Check if it is a QRS complex. To be classified as a QRS,
it must come after the refractory period, cross the QRS
detection threshold, and not be classified as a T-wave
if it comes close enough to the previous QRS. If
successfully classified, update running detection
threshold and heart rate parameters.
- If not a QRS, classify it as a noise peak and update
running parameters.
- Before continuing to the next local maxima, if no QRS
was detected within 1.66 times the recent R-R interval,
perform backsearch QRS detection. This checks previous
peaks using a lower QRS detection threshold.
Attributes
----------
sig : 1d ndarray
The input ECG signal to apply the QRS detection on.
fs : int, float
The sampling frequency of the input signal.
conf : XQRS.Conf object, optional
The configuration object specifying signal configuration
parameters. See the docstring of the XQRS.Conf class.
Examples
--------
>>> import wfdb
>>> from wfdb import processing
>>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
>>> xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])
>>> xqrs.detect()
>>> wfdb.plot_items(signal=sig, ann_samp=[xqrs.qrs_inds])
'''
def __init__(self, sig, fs, conf=None):
pass
class Conf(object):
'''
Initial signal configuration object for this QRS detector.
Attributes
----------
hr_init : int, float, optional
Initial heart rate in beats per minute. Used for calculating
recent R-R intervals.
hr_max : int, float, optional
Hard maximum heart rate between two beats, in beats per
minute. Used for refractory period.
hr_min : int, float, optional
Hard minimum heart rate between two beats, in beats per
minute. Used for calculating recent R-R intervals.
qrs_width : int, float, optional
Expected QRS width in seconds. Used for filter widths
indirect refractory period.
qrs_thr_init : int, float, optional
Initial QRS detection threshold in mV. Use when learning
is False, or learning fails.
qrs_thr_min : int, float, string, optional
Hard minimum detection threshold of QRS wave. Leave as 0
for no minimum.
ref_period : int, float, optional
The QRS refractory period.
t_inspect_period : int, float, optional
The period below which a potential QRS complex is inspected to
see if it is a T-wave. Leave as 0 for no T-wave inspection.
'''
def __init__(self, sig, fs, conf=None):
pass
def _set_conf(self):
'''
Set configuration parameters from the Conf object into the detector
object. Time values are converted to samples, and amplitude values
are in mV.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def _bandpass(self, fc_low=5, fc_high=20):
'''
Apply a bandpass filter onto the signal, and save the filtered
signal.
Parameters
----------
fc_low : int, float
The low frequency cutoff for the filter.
fc_high : int, float
The high frequency cutoff for the filter.
Returns
-------
N/A
'''
pass
def _mwi(self):
'''
Apply moving wave integration (MWI) with a Ricker (Mexican hat)
wavelet onto the filtered signal, and save the square of the
integrated signal. The width of the hat is equal to the QRS width.
After integration, find all local peaks in the MWI signal.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def _learn_init_params(self, n_calib_beats=8):
'''
Find a number of consecutive beats and use them to initialize:
- recent QRS amplitude
- recent noise amplitude
- recent R-R interval
- QRS detection threshold
The learning works as follows:
- Find all local maxima (largest sample within `qrs_radius`
samples) of the filtered signal.
- Inspect the local maxima until `n_calib_beats` beats are
found:
- Calculate the cross-correlation between a Ricker wavelet of
length `qrs_width`, and the filtered signal segment centered
around the local maximum.
- If the cross-correlation exceeds 0.6, classify it as a beat.
- Use the beats to initialize the previously described
parameters.
- If the system fails to find enough beats, the default
parameters will be used instead. See the docstring of
`XQRS._set_default_init_params` for details.
Parameters
----------
n_calib_beats : int, optional
Number of calibration beats to detect for learning
Returns
-------
N/A
'''
pass
def _set_init_params(
self, qrs_amp_recent, noise_amp_recent, rr_recent, last_qrs_ind
):
'''
Set initial online parameters.
Parameters
----------
qrs_amp_recent : int, float
The mean of the signal QRS amplitudes.
noise_amp_recent : int, float
The mean of the signal noise amplitudes.
rr_recent : int
The mean of the signal R-R interval values.
last_qrs_ind : int
The index of the signal's early QRS detected.
Returns
-------
N/A
'''
pass
def _set_default_init_params(self):
'''
Set initial running parameters using default values.
The steady state equation is:
`qrs_thr = 0.25*qrs_amp + 0.75*noise_amp`
Estimate that QRS amp is 10x noise amp, giving:
`qrs_thr = 0.325 * qrs_amp or 13/40 * qrs_amp`
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def _is_qrs(self, peak_num, backsearch=False):
'''
Check whether a peak is a QRS complex. It is classified as QRS
if it:
- Comes after the refractory period.
- Passes QRS threshold.
- Is not a T-wave (check it if the peak is close to the previous QRS).
Parameters
----------
peak_num : int
The peak number of the MWI signal to be inspected.
backsearch: bool, optional
Whether the peak is being inspected during backsearch.
Returns
-------
bool
Whether the peak is QRS (True) or not (False).
'''
pass
def _update_qrs(self, peak_num, backsearch=False):
'''
Update live QRS parameters. Adjust the recent R-R intervals and
QRS amplitudes, and the QRS threshold.
Parameters
----------
peak_num : int
The peak number of the MWI signal where the QRS is detected.
backsearch: bool, optional
Whether the QRS was found via backsearch.
Returns
-------
N/A
'''
pass
def _is_twave(self, peak_num):
'''
Check whether a segment is a T-wave. Compare the maximum gradient of
the filtered signal segment with that of the previous QRS segment.
Parameters
----------
peak_num : int
The peak number of the MWI signal where the QRS is detected.
Returns
-------
bool
Whether a segment is a T-wave (True) or not (False).
'''
pass
def _update_noise(self, peak_num):
'''
Update live noise parameters.
Parameters
----------
peak_num : int
The peak number.
Returns
-------
N/A
'''
pass
def _require_backsearch(self):
'''
Determine whether a backsearch should be performed on prior peaks.
Parameters
----------
N/A
Returns
-------
bool
Whether to require backsearch (True) or not (False).
'''
pass
def _backsearch(self):
'''
Inspect previous peaks from the last detected QRS peak (if any),
using a lower threshold.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def _run_detection(self):
'''
Run the QRS detection after all signals and parameters have been
configured and set.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def detect(self, sampfrom=0, sampto="end", learn=True, verbose=True):
'''
Detect QRS locations between two samples.
Parameters
----------
sampfrom : int, optional
The starting sample number to run the detection on.
sampto : int, optional
The final sample number to run the detection on. Set as
'end' to run on the entire signal.
learn : bool, optional
Whether to apply learning on the signal before running the
main detection. If learning fails or is not conducted, the
default configuration parameters will be used to initialize
these variables. See the `XQRS._learn_init_params` docstring
for details.
verbose : bool, optional
Whether to display the stages and outcomes of the detection
process.
Returns
-------
N/A
'''
pass
| 18 | 16 | 39 | 6 | 17 | 16 | 4 | 1.17 | 1 | 5 | 1 | 0 | 15 | 35 | 15 | 15 | 730 | 126 | 278 | 110 | 248 | 326 | 201 | 97 | 183 | 12 | 1 | 3 | 57 |
147,700 |
MIT-LCP/wfdb-python
|
wfdb/processing/qrs.py
|
wfdb.processing.qrs.GQRS
|
class GQRS(object):
"""
GQRS detection class.
Attributes
----------
N/A
"""
class Conf(object):
"""
Initial signal configuration object for this QRS detector.
Attributes
----------
fs : int, float
The sampling frequency of the input signal.
adc_gain : int, float
The analogue to digital gain of the signal (the number of adus per
physical unit).
hr : int, float, optional
Typical heart rate, in beats per minute.
RRdelta : int, float, optional
Typical difference between successive RR intervals in seconds.
RRmin : int, float, optional
Minimum RR interval ("refractory period"), in seconds.
RRmax : int, float, optional
Maximum RR interval, in seconds. Thresholds will be adjusted if no
peaks are detected within this interval.
QS : int, float, optional
Typical QRS duration, in seconds.
QT : int, float, optional
Typical QT interval, in seconds.
RTmin : int, float, optional
Minimum interval between R and T peaks, in seconds.
RTmax : int, float, optional
Maximum interval between R and T peaks, in seconds.
QRSa : int, float, optional
Typical QRS peak-to-peak amplitude, in microvolts.
QRSamin : int, float, optional
Minimum QRS peak-to-peak amplitude, in microvolts.
thresh : int, float, optional
The relative amplitude detection threshold. Used to initialize the peak
and QRS detection threshold.
"""
def __init__(
self,
fs,
adc_gain,
hr=75,
RRdelta=0.2,
RRmin=0.28,
RRmax=2.4,
QS=0.07,
QT=0.35,
RTmin=0.25,
RTmax=0.33,
QRSa=750,
QRSamin=130,
thresh=1.0,
):
self.fs = fs
self.sps = int(time_to_sample_number(1, fs))
self.spm = int(time_to_sample_number(60, fs))
self.hr = hr
self.RR = 60.0 / self.hr
self.RRdelta = RRdelta
self.RRmin = RRmin
self.RRmax = RRmax
self.QS = QS
self.QT = QT
self.RTmin = RTmin
self.RTmax = RTmax
self.QRSa = QRSa
self.QRSamin = QRSamin
self.thresh = thresh
self._NORMAL = 1 # normal beat
self._ARFCT = 16 # isolated QRS-like artifact
self._NOTE = 22 # comment annotation
self._TWAVE = 27 # T-wave peak
self._NPEAKS = 64 # number of peaks buffered (per signal)
self._BUFLN = 32768 # must be a power of 2, see qf()
self.rrmean = int(self.RR * self.sps)
self.rrdev = int(self.RRdelta * self.sps)
self.rrmin = int(self.RRmin * self.sps)
self.rrmax = int(self.RRmax * self.sps)
self.rrinc = int(self.rrmean / 40)
if self.rrinc < 1:
self.rrinc = 1
self.dt = int(self.QS * self.sps / 4)
if self.dt < 1:
raise Exception(
"Sampling rate is too low. Unable to use signal."
)
self.rtmin = int(self.RTmin * self.sps)
self.rtmean = int(0.75 * self.QT * self.sps)
self.rtmax = int(self.RTmax * self.sps)
dv = adc_gain * self.QRSamin * 0.001
self.pthr = int((self.thresh * dv * dv) / 6)
self.qthr = self.pthr << 1
self.pthmin = self.pthr >> 2
self.qthmin = int((self.pthmin << 2) / 3)
self.tamean = self.qthr # initial value for mean T-wave amplitude
# Filter constants and thresholds.
self.dt2 = 2 * self.dt
self.dt3 = 3 * self.dt
self.dt4 = 4 * self.dt
self.smdt = self.dt
self.v1norm = self.smdt * self.dt * 64
self.smt = 0
self.smt0 = 0 + self.smdt
class Peak(object):
"""
Holds all of the peak information for the QRS object.
Attributes
----------
peak_time : int, float
The time of the peak.
peak_amp : int, float
The amplitude of the peak.
peak_type : str
The type of the peak.
"""
def __init__(self, peak_time, peak_amp, peak_type):
self.time = peak_time
self.amp = peak_amp
self.type = peak_type
self.next_peak = None
self.prev_peak = None
class Annotation(object):
"""
Holds all of the annotation information for the QRS object.
Attributes
----------
ann_time : int, float
The time of the annotation.
ann_type : str
The type of the annotation.
ann_subtype : int
The subtype of the annotation.
ann_num : int
The number of the annotation.
"""
def __init__(self, ann_time, ann_type, ann_subtype, ann_num):
self.time = ann_time
self.type = ann_type
self.subtype = ann_subtype
self.num = ann_num
def putann(self, annotation):
"""
Add an annotation to the object.
Parameters
----------
annotation : Annotation object
The annotation to be added.
Returns
-------
N/A
"""
self.annotations.append(copy.deepcopy(annotation))
def detect(self, x, conf, adc_zero):
"""
Run detection.
Parameters
----------
x : ndarray
Array containing the digital signal.
conf : XQRS.Conf object
The configuration object specifying signal configuration
parameters. See the docstring of the XQRS.Conf class.
adc_zero : int
The value produced by the ADC given a 0 Volt input.
Returns
-------
QRS object
The annotations that have been detected.
"""
self.c = conf
self.annotations = []
self.sample_valid = False
if len(x) < 1:
return []
self.x = x
self.adc_zero = adc_zero
self.qfv = np.zeros((self.c._BUFLN), dtype="int64")
self.smv = np.zeros((self.c._BUFLN), dtype="int64")
self.v1 = 0
t0 = 0
self.tf = len(x) - 1
self.t = 0 - self.c.dt4
self.annot = GQRS.Annotation(0, "NOTE", 0, 0)
# Cicular buffer of Peaks
first_peak = GQRS.Peak(0, 0, 0)
tmp = first_peak
for _ in range(1, self.c._NPEAKS):
tmp.next_peak = GQRS.Peak(0, 0, 0)
tmp.next_peak.prev_peak = tmp
tmp = tmp.next_peak
tmp.next_peak = first_peak
first_peak.prev_peak = tmp
self.current_peak = first_peak
if self.c.spm > self.c._BUFLN:
if self.tf - t0 > self.c._BUFLN:
tf_learn = t0 + self.c._BUFLN - self.c.dt4
else:
tf_learn = self.tf - self.c.dt4
else:
if self.tf - t0 > self.c.spm:
tf_learn = t0 + self.c.spm - self.c.dt4
else:
tf_learn = self.tf - self.c.dt4
self.countdown = -1
self.state = "LEARNING"
self.gqrs(t0, tf_learn)
self.rewind_gqrs()
self.state = "RUNNING"
self.t = t0 - self.c.dt4
self.gqrs(t0, self.tf)
return self.annotations
def rewind_gqrs(self):
"""
Rewind the gqrs.
Parameters
----------
N/A
Returns
-------
N/A
"""
self.countdown = -1
self.at(self.t)
self.annot.time = 0
self.annot.type = "NORMAL"
self.annot.subtype = 0
self.annot.num = 0
p = self.current_peak
for _ in range(self.c._NPEAKS):
p.time = 0
p.type = 0
p.amp = 0
p = p.next_peak
def at(self, t):
"""
Determine the value of the sample at the specified time.
Parameters
----------
t : int
The time to search for the sample value.
Returns
-------
N/A
"""
if t < 0:
self.sample_valid = True
return self.x[0]
if t > len(self.x) - 1:
self.sample_valid = False
return self.x[-1]
self.sample_valid = True
return self.x[t]
def smv_at(self, t):
"""
Determine the SMV value of the sample at the specified time.
Parameters
----------
t : int
The time to search for the sample SMV value.
Returns
-------
N/A
"""
return self.smv[t & (self.c._BUFLN - 1)]
def smv_put(self, t, v):
"""
Put the SMV value of the sample at the specified time.
Parameters
----------
t : int
The time to search for the sample value.
v : int
The value of the SMV.
Returns
-------
N/A
"""
self.smv[t & (self.c._BUFLN - 1)] = v
def qfv_at(self, t):
"""
Determine the QFV value of the sample at the specified time.
Parameters
----------
t : int
The time to search for the sample QFV value.
Returns
-------
N/A
"""
return self.qfv[t & (self.c._BUFLN - 1)]
def qfv_put(self, t, v):
"""
Put the QFV value of the sample at the specified time.
Parameters
----------
t : int
The time with which to start the analysis.
v : int
The value of the QFV.
Returns
-------
N/A
"""
self.qfv[t & (self.c._BUFLN - 1)] = v
def sm(self, at_t):
"""
Implements a trapezoidal low pass (smoothing) filter (with a gain
of 4*smdt) applied to input signal sig before the QRS matched
filter qf(). Before attempting to 'rewind' by more than BUFLN-smdt
samples, reset smt and smt0.
Parameters
----------
at_t : int
The time where the filter will be implemented.
Returns
-------
smv_at : ndarray
The smoothed signal.
"""
# Calculate samp values from self.smt to at_t.
smt = self.c.smt
smdt = int(self.c.smdt)
v = 0
while at_t > smt:
smt += 1
# from dt+1 onwards
if smt > int(self.c.smt0):
tmp = int(
self.smv_at(smt - 1)
+ self.at(smt + smdt)
+ self.at(smt + smdt - 1)
- self.at(smt - smdt)
- self.at(smt - smdt - 1)
)
self.smv_put(smt, tmp)
self.SIG_SMOOTH.append(tmp)
# from 1 to dt. 0 is never calculated.
else:
v = int(self.at(smt))
for j in range(1, smdt):
smtpj = self.at(smt + j)
smtlj = self.at(smt - j)
v += int(smtpj + smtlj)
self.smv_put(
smt,
(v << 1)
+ self.at(smt + j + 1)
+ self.at(smt - j - 1)
- self.adc_zero * (smdt << 2),
)
self.SIG_SMOOTH.append(
(v << 1)
+ self.at(smt + j + 1)
+ self.at(smt - j - 1)
- self.adc_zero * (smdt << 2)
)
self.c.smt = smt
return self.smv_at(at_t)
def qf(self):
"""
Evaluate the QRS detector filter for the next sample.
Parameters
----------
N/A
Returns
-------
N/A
"""
# Do this first, to ensure that all of the other smoothed values
# needed below are in the buffer
dv2 = self.sm(self.t + self.c.dt4)
dv2 -= self.smv_at(self.t - self.c.dt4)
dv1 = int(
self.smv_at(self.t + self.c.dt) - self.smv_at(self.t - self.c.dt)
)
dv = dv1 << 1
dv -= int(
self.smv_at(self.t + self.c.dt2) - self.smv_at(self.t - self.c.dt2)
)
dv = dv << 1
dv += dv1
dv -= int(
self.smv_at(self.t + self.c.dt3) - self.smv_at(self.t - self.c.dt3)
)
dv = dv << 1
dv += dv2
self.v1 += dv
v0 = int(self.v1 / self.c.v1norm)
self.qfv_put(self.t, v0 * v0)
self.SIG_QRS.append(v0**2)
def gqrs(self, from_sample, to_sample):
"""
The GQRS algorithm.
Parameters
----------
from_sample : int
The sample to start at.
to_sample : int
The sample to end at.
Returns
-------
N/A
"""
q0 = None
q1 = 0
q2 = 0
rr = None
rrd = None
rt = None
rtd = None
rtdmin = None
p = None # (Peak)
q = None # (Peak)
r = None # (Peak)
tw = None # (Peak)
last_peak = from_sample
last_qrs = from_sample
self.SIG_SMOOTH = []
self.SIG_QRS = []
def add_peak(peak_time, peak_amp, peak_type):
"""
Add a peak.
Parameters
----------
peak_time : int, float
The time of the peak.
peak_amp : int, float
The amplitude of the peak.
peak_type : int
The type of peak.
Returns
-------
N/A
"""
p = self.current_peak.next_peak
p.time = peak_time
p.amp = peak_amp
p.type = peak_type
self.current_peak = p
p.next_peak.amp = 0
def peaktype(p):
"""
The neighborhood consists of all other peaks within rrmin.
Normally, "most prominent" is equivalent to "largest in
amplitude", but this is not always true. For example, consider
three consecutive peaks a, b, c such that a and b share a
neighborhood, b and c share a neighborhood, but a and c do not;
and suppose that amp(a) > amp(b) > amp(c). In this case, if
there are no other peaks, a is the most prominent peak in the (a, b)
neighborhood. Since b is thus identified as a non-prominent peak,
c becomes the most prominent peak in the (b, c) neighborhood.
This is necessary to permit detection of low-amplitude beats that
closely precede or follow beats with large secondary peaks (as,
for example, in R-on-T PVCs).
Parameters
----------
p : Peak object
The peak to be analyzed.
Returns
-------
int
Whether the input peak is the most prominent peak in its
neighborhood (1) or not (2).
"""
if p.type:
return p.type
else:
a = p.amp
t0 = p.time - self.c.rrmin
t1 = p.time + self.c.rrmin
if t0 < 0:
t0 = 0
pp = p.prev_peak
while t0 < pp.time and pp.time < pp.next_peak.time:
if pp.amp == 0:
break
if a < pp.amp and peaktype(pp) == 1:
p.type = 2
return p.type
# end:
pp = pp.prev_peak
pp = p.next_peak
while pp.time < t1 and pp.time > pp.prev_peak.time:
if pp.amp == 0:
break
if a < pp.amp and peaktype(pp) == 1:
p.type = 2
return p.type
# end:
pp = pp.next_peak
p.type = 1
return p.type
def find_missing(r, p):
"""
Find the missing peaks.
Parameters
----------
r : Peak object
The real peak.
p : Peak object
The peak to be analyzed.
Returns
-------
s : Peak object
The missing peak.
"""
if r is None or p is None:
return None
minrrerr = p.time - r.time
s = None
q = r.next_peak
while q.time < p.time:
if peaktype(q) == 1:
rrtmp = q.time - r.time
rrerr = rrtmp - self.c.rrmean
if rrerr < 0:
rrerr = -rrerr
if rrerr < minrrerr:
minrrerr = rrerr
s = q
# end:
q = q.next_peak
return s
r = None
next_minute = 0
minutes = 0
while self.t <= to_sample + self.c.sps:
if self.countdown < 0:
if self.sample_valid:
self.qf()
else:
self.countdown = int(time_to_sample_number(1, self.c.fs))
self.state = "CLEANUP"
else:
self.countdown -= 1
if self.countdown < 0:
break
q0 = self.qfv_at(self.t)
q1 = self.qfv_at(self.t - 1)
q2 = self.qfv_at(self.t - 2)
# state == RUNNING only
if (
q1 > self.c.pthr
and q2 < q1
and q1 >= q0
and self.t > self.c.dt4
):
add_peak(self.t - 1, q1, 0)
last_peak = self.t - 1
p = self.current_peak.next_peak
while p.time < self.t - self.c.rtmax:
if (
p.time >= self.annot.time + self.c.rrmin
and peaktype(p) == 1
):
if p.amp > self.c.qthr:
rr = p.time - self.annot.time
q = find_missing(r, p)
if (
rr > self.c.rrmean + 2 * self.c.rrdev
and rr > 2 * (self.c.rrmean - self.c.rrdev)
and q is not None
):
p = q
rr = p.time - self.annot.time
self.annot.subtype = 1
rrd = rr - self.c.rrmean
if rrd < 0:
rrd = -rrd
self.c.rrdev += (rrd - self.c.rrdev) >> 3
if rrd > self.c.rrinc:
rrd = self.c.rrinc
if rr > self.c.rrmean:
self.c.rrmean += rrd
else:
self.c.rrmean -= rrd
if p.amp > self.c.qthr * 4:
self.c.qthr += 1
elif p.amp < self.c.qthr:
self.c.qthr -= 1
if self.c.qthr > self.c.pthr * 20:
self.c.qthr = self.c.pthr * 20
last_qrs = p.time
if self.state == "RUNNING":
self.annot.time = p.time - self.c.dt2
self.annot.type = "NORMAL"
qsize = int(p.amp * 10.0 / self.c.qthr)
if qsize > 127:
qsize = 127
self.annot.num = qsize
self.putann(self.annot)
self.annot.time += self.c.dt2
# look for this beat's T-wave
tw = None
rtdmin = self.c.rtmean
q = p.next_peak
while q.time > self.annot.time:
rt = q.time - self.annot.time - self.c.dt2
if rt < self.c.rtmin:
# end:
q = q.next_peak
continue
if rt > self.c.rtmax:
break
rtd = rt - self.c.rtmean
if rtd < 0:
rtd = -rtd
if rtd < rtdmin:
rtdmin = rtd
tw = q
# end:
q = q.next_peak
if tw is not None:
tmp_time = tw.time - self.c.dt2
tann = GQRS.Annotation(
tmp_time,
"TWAVE",
(
1
if tmp_time
> self.annot.time + self.c.rtmean
else 0
),
rtdmin,
)
# if self.state == "RUNNING":
# self.putann(tann)
rt = tann.time - self.annot.time
self.c.rtmean += (rt - self.c.rtmean) >> 4
if self.c.rtmean > self.c.rtmax:
self.c.rtmean = self.c.rtmax
elif self.c.rtmean < self.c.rtmin:
self.c.rtmean = self.c.rrmin
tw.type = 2 # mark T-wave as secondary
r = p
q = None
self.annot.subtype = 0
elif (
self.t - last_qrs > self.c.rrmax
and self.c.qthr > self.c.qthmin
):
self.c.qthr -= self.c.qthr >> 4
# end:
p = p.next_peak
elif (
self.t - last_peak > self.c.rrmax
and self.c.pthr > self.c.pthmin
):
self.c.pthr -= self.c.pthr >> 4
self.t += 1
if self.t >= next_minute:
next_minute += self.c.spm
minutes += 1
if minutes >= 60:
minutes = 0
if self.state == "LEARNING":
return
# Mark the last beat or two.
p = self.current_peak.next_peak
while p.time < p.next_peak.time:
if (
p.time >= self.annot.time + self.c.rrmin
and p.time < self.tf
and peaktype(p) == 1
):
self.annot.type = "NORMAL"
self.annot.time = p.time
self.putann(self.annot)
# end:
p = p.next_peak
|
class GQRS(object):
'''
GQRS detection class.
Attributes
----------
N/A
'''
class Conf(object):
'''
Initial signal configuration object for this QRS detector.
Attributes
----------
fs : int, float
The sampling frequency of the input signal.
adc_gain : int, float
The analogue to digital gain of the signal (the number of adus per
physical unit).
hr : int, float, optional
Typical heart rate, in beats per minute.
RRdelta : int, float, optional
Typical difference between successive RR intervals in seconds.
RRmin : int, float, optional
Minimum RR interval ("refractory period"), in seconds.
RRmax : int, float, optional
Maximum RR interval, in seconds. Thresholds will be adjusted if no
peaks are detected within this interval.
QS : int, float, optional
Typical QRS duration, in seconds.
QT : int, float, optional
Typical QT interval, in seconds.
RTmin : int, float, optional
Minimum interval between R and T peaks, in seconds.
RTmax : int, float, optional
Maximum interval between R and T peaks, in seconds.
QRSa : int, float, optional
Typical QRS peak-to-peak amplitude, in microvolts.
QRSamin : int, float, optional
Minimum QRS peak-to-peak amplitude, in microvolts.
thresh : int, float, optional
The relative amplitude detection threshold. Used to initialize the peak
and QRS detection threshold.
'''
def __init__(
self,
fs,
adc_gain,
hr=75,
RRdelta=0.2,
RRmin=0.28,
RRmax=2.4,
QS=0.07,
QT=0.35,
RTmin=0.25,
RTmax=0.33,
QRSa=750,
QRSamin=130,
thresh=1.0,
):
pass
class Peak(object):
'''
Holds all of the peak information for the QRS object.
Attributes
----------
peak_time : int, float
The time of the peak.
peak_amp : int, float
The amplitude of the peak.
peak_type : str
The type of the peak.
'''
def __init__(
self,
fs,
adc_gain,
hr=75,
RRdelta=0.2,
RRmin=0.28,
RRmax=2.4,
QS=0.07,
QT=0.35,
RTmin=0.25,
RTmax=0.33,
QRSa=750,
QRSamin=130,
thresh=1.0,
):
pass
class Annotation(object):
'''
Holds all of the annotation information for the QRS object.
Attributes
----------
ann_time : int, float
The time of the annotation.
ann_type : str
The type of the annotation.
ann_subtype : int
The subtype of the annotation.
ann_num : int
The number of the annotation.
'''
def __init__(
self,
fs,
adc_gain,
hr=75,
RRdelta=0.2,
RRmin=0.28,
RRmax=2.4,
QS=0.07,
QT=0.35,
RTmin=0.25,
RTmax=0.33,
QRSa=750,
QRSamin=130,
thresh=1.0,
):
pass
def putann(self, annotation):
'''
Add an annotation to the object.
Parameters
----------
annotation : Annotation object
The annotation to be added.
Returns
-------
N/A
'''
pass
def detect(self, x, conf, adc_zero):
'''
Run detection.
Parameters
----------
x : ndarray
Array containing the digital signal.
conf : XQRS.Conf object
The configuration object specifying signal configuration
parameters. See the docstring of the XQRS.Conf class.
adc_zero : int
The value produced by the ADC given a 0 Volt input.
Returns
-------
QRS object
The annotations that have been detected.
'''
pass
def rewind_gqrs(self):
'''
Rewind the gqrs.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def at(self, t):
'''
Determine the value of the sample at the specified time.
Parameters
----------
t : int
The time to search for the sample value.
Returns
-------
N/A
'''
pass
def smv_at(self, t):
'''
Determine the SMV value of the sample at the specified time.
Parameters
----------
t : int
The time to search for the sample SMV value.
Returns
-------
N/A
'''
pass
def smv_put(self, t, v):
'''
Put the SMV value of the sample at the specified time.
Parameters
----------
t : int
The time to search for the sample value.
v : int
The value of the SMV.
Returns
-------
N/A
'''
pass
def qfv_at(self, t):
'''
Determine the QFV value of the sample at the specified time.
Parameters
----------
t : int
The time to search for the sample QFV value.
Returns
-------
N/A
'''
pass
def qfv_put(self, t, v):
'''
Put the QFV value of the sample at the specified time.
Parameters
----------
t : int
The time with which to start the analysis.
v : int
The value of the QFV.
Returns
-------
N/A
'''
pass
def smv_at(self, t):
'''
Implements a trapezoidal low pass (smoothing) filter (with a gain
of 4*smdt) applied to input signal sig before the QRS matched
filter qf(). Before attempting to 'rewind' by more than BUFLN-smdt
samples, reset smt and smt0.
Parameters
----------
at_t : int
The time where the filter will be implemented.
Returns
-------
smv_at : ndarray
The smoothed signal.
'''
pass
def qfv_at(self, t):
'''
Evaluate the QRS detector filter for the next sample.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def gqrs(self, from_sample, to_sample):
'''
The GQRS algorithm.
Parameters
----------
from_sample : int
The sample to start at.
to_sample : int
The sample to end at.
Returns
-------
N/A
'''
pass
def add_peak(peak_time, peak_amp, peak_type):
'''
Add a peak.
Parameters
----------
peak_time : int, float
The time of the peak.
peak_amp : int, float
The amplitude of the peak.
peak_type : int
The type of peak.
Returns
-------
N/A
'''
pass
def peaktype(p):
'''
The neighborhood consists of all other peaks within rrmin.
Normally, "most prominent" is equivalent to "largest in
amplitude", but this is not always true. For example, consider
three consecutive peaks a, b, c such that a and b share a
neighborhood, b and c share a neighborhood, but a and c do not;
and suppose that amp(a) > amp(b) > amp(c). In this case, if
there are no other peaks, a is the most prominent peak in the (a, b)
neighborhood. Since b is thus identified as a non-prominent peak,
c becomes the most prominent peak in the (b, c) neighborhood.
This is necessary to permit detection of low-amplitude beats that
closely precede or follow beats with large secondary peaks (as,
for example, in R-on-T PVCs).
Parameters
----------
p : Peak object
The peak to be analyzed.
Returns
-------
int
Whether the input peak is the most prominent peak in its
neighborhood (1) or not (2).
'''
pass
def find_missing(r, p):
'''
Find the missing peaks.
Parameters
----------
r : Peak object
The real peak.
p : Peak object
The peak to be analyzed.
Returns
-------
s : Peak object
The missing peak.
'''
pass
| 21 | 18 | 48 | 6 | 27 | 15 | 4 | 0.64 | 1 | 4 | 2 | 0 | 11 | 16 | 11 | 11 | 787 | 112 | 418 | 151 | 382 | 269 | 334 | 136 | 313 | 34 | 1 | 7 | 76 |
147,701 |
MIT-LCP/wfdb-python
|
wfdb/processing/evaluate.py
|
wfdb.processing.evaluate.Comparitor
|
class Comparitor(object):
"""
The class to implement and hold comparisons between two sets of
annotations. See methods `compare`, `print_summary` and `plot`.
Attributes
----------
ref_sample : ndarray
An array of the reference sample locations.
test_sample : ndarray
An array of the comparison sample locations.
window_width : int
The width of the window.
signal : 1d numpy array, optional
The signal array the annotation samples are labelling. Only used
for plotting.
Examples
--------
>>> import wfdb
>>> from wfdb import processing
>>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
>>> ann_ref = wfdb.rdann('sample-data/100','atr')
>>> xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])
>>> xqrs.detect()
>>> comparitor = processing.Comparitor(ann_ref.sample[1:],
xqrs.qrs_inds,
int(0.1 * fields['fs']),
sig[:,0])
>>> comparitor.compare()
>>> comparitor.print_summary()
>>> comparitor.plot()
"""
def __init__(self, ref_sample, test_sample, window_width, signal=None):
if len(ref_sample) > 1 and len(test_sample) > 1:
if min(np.diff(ref_sample)) < 0 or min(np.diff(test_sample)) < 0:
raise ValueError(
(
"The sample locations must be monotonically"
+ " increasing"
)
)
self.ref_sample = ref_sample
self.test_sample = test_sample
self.n_ref = len(ref_sample)
self.n_test = len(test_sample)
self.window_width = window_width
# The matching test sample number for each reference annotation.
# -1 for indices with no match
self.matching_sample_nums = np.full(self.n_ref, -1, dtype="int")
self.signal = signal
# TODO: rdann return annotations.where
def _calc_stats(self):
"""
Calculate performance statistics after the two sets of annotations
are compared.
Parameters
----------
N/A
Returns
-------
N/A
Example:
-------------------
ref=500 test=480
{ 30 { 470 } 10 }
-------------------
tp = 470
fp = 10
fn = 30
sensitivity = 470 / 500
positive_predictivity = 470 / 480
"""
# Reference annotation indices that were detected
self.matched_ref_inds = np.where(self.matching_sample_nums != -1)[0]
# Reference annotation indices that were missed
self.unmatched_ref_inds = np.where(self.matching_sample_nums == -1)[0]
# Test annotation indices that were matched to a reference annotation
self.matched_test_inds = self.matching_sample_nums[
self.matching_sample_nums != -1
]
# Test annotation indices that were unmatched to a reference annotation
self.unmatched_test_inds = np.setdiff1d(
np.array(range(self.n_test)),
self.matched_test_inds,
assume_unique=True,
)
# Sample numbers that were matched and unmatched
self.matched_ref_sample = self.ref_sample[self.matched_ref_inds]
self.unmatched_ref_sample = self.ref_sample[self.unmatched_ref_inds]
self.matched_test_sample = self.test_sample[self.matched_test_inds]
self.unmatched_test_sample = self.test_sample[self.unmatched_test_inds]
# True positives = matched reference samples
self.tp = len(self.matched_ref_inds)
# False positives = extra test samples not matched
self.fp = self.n_test - self.tp
# False negatives = undetected reference samples
self.fn = self.n_ref - self.tp
# No tn attribute
self.sensitivity = float(self.tp) / float(self.tp + self.fn)
self.positive_predictivity = float(self.tp) / self.n_test
def compare(self):
"""
Main comparison function. Note: Make sure to be able to handle
these ref/test scenarios:
Parameters
-------
N/A
Returns
-------
N/A
Example
-------
A:
o----o---o---o
x-------x----x
B:
o----o-----o---o
x--------x--x--x
C:
o------o-----o---o
x-x--------x--x--x
D:
o------o-----o---o
x-x--------x-----x
"""
test_samp_num = 0
ref_samp_num = 0
# Iterate through the reference sample numbers
while ref_samp_num < self.n_ref and test_samp_num < self.n_test:
# Get the closest testing sample number for this reference sample
closest_samp_num, smallest_samp_diff = self._get_closest_samp_num(
ref_samp_num, test_samp_num
)
# Get the closest testing sample number for the next reference
# sample. This doesn't need to be called for the last index.
if ref_samp_num < self.n_ref - 1:
(
closest_samp_num_next,
smallest_samp_diff_next,
) = self._get_closest_samp_num(ref_samp_num + 1, test_samp_num)
else:
# Set non-matching value if there is no next reference sample
# to compete for the test sample
closest_samp_num_next = -1
# Found a contested test sample number. Decide which
# reference sample it belongs to. If the sample is closer to
# the next reference sample, leave it to the next reference
# sample and label this reference sample as unmatched.
if (
closest_samp_num == closest_samp_num_next
and smallest_samp_diff_next < smallest_samp_diff
):
# Get the next closest sample for this reference sample,
# if not already assigned to a previous sample.
# It will be the previous testing sample number in any
# possible case (scenario D below), or nothing.
if closest_samp_num and (
not ref_samp_num
or closest_samp_num - 1
!= self.matching_sample_nums[ref_samp_num - 1]
):
# The previous test annotation is inspected
closest_samp_num = closest_samp_num - 1
smallest_samp_diff = abs(
self.ref_sample[ref_samp_num]
- self.test_sample[closest_samp_num]
)
# Assign the reference-test pair if close enough
if smallest_samp_diff < self.window_width:
self.matching_sample_nums[ref_samp_num] = (
closest_samp_num
)
# Set the starting test sample number to inspect
# for the next reference sample.
test_samp_num = closest_samp_num + 1
# Otherwise there is no matching test annotation
# If there is no clash, or the contested test sample is
# closer to the current reference, keep the test sample
# for this reference sample.
else:
# Assign the reference-test pair if close enough
if smallest_samp_diff < self.window_width:
self.matching_sample_nums[ref_samp_num] = closest_samp_num
# Increment the starting test sample number to inspect
# for the next reference sample.
test_samp_num = closest_samp_num + 1
ref_samp_num += 1
self._calc_stats()
def _get_closest_samp_num(self, ref_samp_num, start_test_samp_num):
"""
Return the closest testing sample number for the given reference
sample number. Limit the search from start_test_samp_num.
Parameters
----------
ref_samp_num : int
The desired reference sample number to get the closest result.
start_test_samp_num
The desired testing reference sample number to get the
closest result.
Returns
-------
closest_samp_num : int
The closest sample number to the reference sample number.
smallest_samp_diff : int
The smallest difference between the reference sample and
the testing sample.
"""
if start_test_samp_num >= self.n_test:
raise ValueError("Invalid starting test sample number.")
ref_samp = self.ref_sample[ref_samp_num]
test_samp = self.test_sample[start_test_samp_num]
samp_diff = ref_samp - test_samp
# Initialize running parameters
closest_samp_num = start_test_samp_num
smallest_samp_diff = abs(samp_diff)
# Iterate through the testing samples
for test_samp_num in range(start_test_samp_num, self.n_test):
test_samp = self.test_sample[test_samp_num]
samp_diff = ref_samp - test_samp
abs_samp_diff = abs(samp_diff)
# Found a better match
if abs_samp_diff < smallest_samp_diff:
closest_samp_num = test_samp_num
smallest_samp_diff = abs_samp_diff
# Stop iterating when the ref sample is first passed or reached
if samp_diff <= 0:
break
return closest_samp_num, smallest_samp_diff
def print_summary(self):
"""
Print summary metrics of the annotation comparisons.
Parameters
----------
N/A
Returns
-------
N/A
"""
if not hasattr(self, "sensitivity"):
self._calc_stats()
print(
"%d reference annotations, %d test annotations\n"
% (self.n_ref, self.n_test)
)
print("True Positives (matched samples): %d" % self.tp)
print("False Positives (unmatched test samples): %d" % self.fp)
print("False Negatives (unmatched reference samples): %d\n" % self.fn)
print(
"Sensitivity: %.4f (%d/%d)"
% (self.sensitivity, self.tp, self.n_ref)
)
print(
"Positive Predictivity: %.4f (%d/%d)"
% (self.positive_predictivity, self.tp, self.n_test)
)
def plot(self, sig_style="", title=None, figsize=None, return_fig=False):
"""
Plot the comparison of two sets of annotations, possibly
overlaid on their original signal.
Parameters
----------
sig_style : str, optional
The matplotlib style of the signal
title : str, optional
The title of the plot
figsize: tuple, optional
Tuple pair specifying the width, and height of the figure.
It is the'figsize' argument passed into matplotlib.pyplot's
`figure` function.
return_fig : bool, optional
Whether the figure is to be returned as an output argument.
Returns
-------
fig : matplotlib figure object
The figure information for the plot.
ax : matplotlib axes object
The axes information for the plot.
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1)
legend = [
"Signal",
"Matched Reference Annotations (%d/%d)" % (self.tp, self.n_ref),
"Unmatched Reference Annotations (%d/%d)" % (self.fn, self.n_ref),
"Matched Test Annotations (%d/%d)" % (self.tp, self.n_test),
"Unmatched Test Annotations (%d/%d)" % (self.fp, self.n_test),
]
# Plot the signal if any
if self.signal is not None:
ax.plot(self.signal, sig_style)
# Plot reference annotations
ax.plot(
self.matched_ref_sample,
self.signal[self.matched_ref_sample],
"ko",
)
ax.plot(
self.unmatched_ref_sample,
self.signal[self.unmatched_ref_sample],
"ko",
fillstyle="none",
)
# Plot test annotations
ax.plot(
self.matched_test_sample,
self.signal[self.matched_test_sample],
"g+",
)
ax.plot(
self.unmatched_test_sample,
self.signal[self.unmatched_test_sample],
"rx",
)
ax.legend(legend)
# Just plot annotations
else:
# Plot reference annotations
ax.plot(self.matched_ref_sample, np.ones(self.tp), "ko")
ax.plot(
self.unmatched_ref_sample,
np.ones(self.fn),
"ko",
fillstyle="none",
)
# Plot test annotations
ax.plot(self.matched_test_sample, 0.5 * np.ones(self.tp), "g+")
ax.plot(self.unmatched_test_sample, 0.5 * np.ones(self.fp), "rx")
ax.legend(legend[1:])
if title:
ax.set_title(title)
ax.set_xlabel("time/sample")
fig.show()
if return_fig:
return fig, ax
|
class Comparitor(object):
'''
The class to implement and hold comparisons between two sets of
annotations. See methods `compare`, `print_summary` and `plot`.
Attributes
----------
ref_sample : ndarray
An array of the reference sample locations.
test_sample : ndarray
An array of the comparison sample locations.
window_width : int
The width of the window.
signal : 1d numpy array, optional
The signal array the annotation samples are labelling. Only used
for plotting.
Examples
--------
>>> import wfdb
>>> from wfdb import processing
>>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
>>> ann_ref = wfdb.rdann('sample-data/100','atr')
>>> xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])
>>> xqrs.detect()
>>> comparitor = processing.Comparitor(ann_ref.sample[1:],
xqrs.qrs_inds,
int(0.1 * fields['fs']),
sig[:,0])
>>> comparitor.compare()
>>> comparitor.print_summary()
>>> comparitor.plot()
'''
def __init__(self, ref_sample, test_sample, window_width, signal=None):
pass
def _calc_stats(self):
'''
Calculate performance statistics after the two sets of annotations
are compared.
Parameters
----------
N/A
Returns
-------
N/A
Example:
-------------------
ref=500 test=480
{ 30 { 470 } 10 }
-------------------
tp = 470
fp = 10
fn = 30
sensitivity = 470 / 500
positive_predictivity = 470 / 480
'''
pass
def compare(self):
'''
Main comparison function. Note: Make sure to be able to handle
these ref/test scenarios:
Parameters
-------
N/A
Returns
-------
N/A
Example
-------
A:
o----o---o---o
x-------x----x
B:
o----o-----o---o
x--------x--x--x
C:
o------o-----o---o
x-x--------x--x--x
D:
o------o-----o---o
x-x--------x-----x
'''
pass
def _get_closest_samp_num(self, ref_samp_num, start_test_samp_num):
'''
Return the closest testing sample number for the given reference
sample number. Limit the search from start_test_samp_num.
Parameters
----------
ref_samp_num : int
The desired reference sample number to get the closest result.
start_test_samp_num
The desired testing reference sample number to get the
closest result.
Returns
-------
closest_samp_num : int
The closest sample number to the reference sample number.
smallest_samp_diff : int
The smallest difference between the reference sample and
the testing sample.
'''
pass
def print_summary(self):
'''
Print summary metrics of the annotation comparisons.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def plot(self, sig_style="", title=None, figsize=None, return_fig=False):
'''
Plot the comparison of two sets of annotations, possibly
overlaid on their original signal.
Parameters
----------
sig_style : str, optional
The matplotlib style of the signal
title : str, optional
The title of the plot
figsize: tuple, optional
Tuple pair specifying the width, and height of the figure.
It is the'figsize' argument passed into matplotlib.pyplot's
`figure` function.
return_fig : bool, optional
Whether the figure is to be returned as an output argument.
Returns
-------
fig : matplotlib figure object
The figure information for the plot.
ax : matplotlib axes object
The axes information for the plot.
'''
pass
| 7 | 6 | 59 | 9 | 27 | 23 | 4 | 1.04 | 1 | 3 | 0 | 0 | 6 | 20 | 6 | 6 | 397 | 63 | 164 | 43 | 156 | 170 | 96 | 42 | 88 | 7 | 1 | 4 | 22 |
147,702 |
MIT-LCP/wfdb-python
|
wfdb/io/record.py
|
wfdb.io.record.Record
|
class Record(BaseRecord, _header.HeaderMixin, _signal.SignalMixin):
"""
The class representing single segment WFDB records.
Record objects can be created using the initializer, by reading a WFDB
header with `rdheader`, or a WFDB record (header and associated dat files)
with `rdrecord`.
The attributes of the Record object give information about the record as
specified by: https://www.physionet.org/physiotools/wag/header-5.htm
In addition, the d_signal and p_signal attributes store the digital and
physical signals of WFDB records with at least one channel.
Attributes
----------
p_signal : ndarray, optional
An (MxN) 2d numpy array, where M is the signal length. Gives the
physical signal values intended to be written. Either p_signal or
d_signal must be set, but not both. If p_signal is set, this method will
use it to perform analogue-digital conversion, writing the resultant
digital values to the dat file(s). If fmt is set, gain and baseline must
be set or unset together. If fmt is unset, gain and baseline must both
be unset.
d_signal : ndarray, optional
An (MxN) 2d numpy array, where M is the signal length. Gives the
digital signal values intended to be directly written to the dat
file(s). The dtype must be an integer type. Either p_signal or d_signal
must be set, but not both. In addition, if d_signal is set, fmt, gain
and baseline must also all be set.
e_p_signal : ndarray, optional
The expanded physical conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
e_d_signal : ndarray, optional
The expanded digital conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
record_name : str, optional
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pn_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
n_sig : int, optional
Total number of signals.
fs : float, optional
The sampling frequency of the record.
counter_freq : float, optional
The frequency used to start counting.
base_counter : float, optional
The counter used at the start of the file.
sig_len : int, optional
The total length of the signal.
base_time : datetime.time, optional
The time of day at the beginning of the record.
base_date : datetime.date, optional
The date at the beginning of the record.
base_datetime : datetime.datetime, optional
The date and time at the beginning of the record, equivalent to
`datetime.combine(base_date, base_time)`.
file_name : str, optional
The name of the file used for analysis.
fmt : list, optional
A list of strings giving the WFDB format of each file used to store each
channel. Accepted formats are: '80','212','16','24', and '32'. There are
other WFDB formats as specified by:
https://www.physionet.org/physiotools/wag/signal-5.htm
but this library will not write (though it will read) those file types.
samps_per_frame : int, optional
The total number of samples per frame.
skew : float, optional
The offset used to allign signals.
byte_offset : int, optional
The byte offset used to allign signals.
adc_gain : list, optional
A list of numbers specifying the ADC gain.
baseline : list, optional
A list of integers specifying the digital baseline.
units : list, optional
A list of strings giving the units of each signal channel.
adc_res: int, optional
The value produced by the ADC given a given Volt input.
adc_zero: int, optional
The value produced by the ADC given a 0 Volt input.
init_value : list, optional
The initial value of the signal.
checksum : list, int, optional
The checksum of the signal.
block_size : str, optional
The dimensions of the field data.
sig_name : list, optional
A list of strings giving the signal name of each signal channel.
comments : list, optional
A list of string comments to be written to the header file.
Examples
--------
>>> record = wfdb.Record(record_name='r1', fs=250, n_sig=2, sig_len=1000,
file_name=['r1.dat','r1.dat'])
"""
def __init__(
self,
p_signal=None,
d_signal=None,
e_p_signal=None,
e_d_signal=None,
record_name=None,
n_sig=None,
fs=None,
counter_freq=None,
base_counter=None,
sig_len=None,
base_time=None,
base_date=None,
base_datetime=None,
file_name=None,
fmt=None,
samps_per_frame=None,
skew=None,
byte_offset=None,
adc_gain=None,
baseline=None,
units=None,
adc_res=None,
adc_zero=None,
init_value=None,
checksum=None,
block_size=None,
sig_name=None,
comments=None,
):
# Note the lack of the 'n_seg' field. Single segment records cannot
# have this field. Even n_seg = 1 makes the header a multi-segment
# header.
super(Record, self).__init__(
record_name=record_name,
n_sig=n_sig,
fs=fs,
counter_freq=counter_freq,
base_counter=base_counter,
sig_len=sig_len,
base_time=base_time,
base_date=base_date,
base_datetime=base_datetime,
comments=comments,
sig_name=sig_name,
)
self.p_signal = p_signal
self.d_signal = d_signal
self.e_p_signal = e_p_signal
self.e_d_signal = e_d_signal
self.file_name = file_name
self.fmt = fmt
self.samps_per_frame = samps_per_frame
self.skew = skew
self.byte_offset = byte_offset
self.adc_gain = adc_gain
self.baseline = baseline
self.units = units
self.adc_res = adc_res
self.adc_zero = adc_zero
self.init_value = init_value
self.checksum = checksum
self.block_size = block_size
# Equal comparison operator for objects of this type
def __eq__(self, other, verbose=False):
"""
Equal comparison operator for objects of this type.
Parameters
----------
other : object
The object that is being compared to self.
verbose : bool, optional
Whether to print details about equality (True) or not (False).
Returns
-------
bool
Determines if the objects are equal (True) or not equal (False).
"""
att1 = self.__dict__
att2 = other.__dict__
if set(att1.keys()) != set(att2.keys()):
if verbose:
print("Attributes members mismatch.")
return False
for k in att1.keys():
v1 = att1[k]
v2 = att2[k]
if type(v1) != type(v2):
if verbose:
print("Mismatch in attribute: %s" % k, v1, v2)
return False
if isinstance(v1, np.ndarray):
# Necessary for nans
np.testing.assert_array_equal(v1, v2)
elif (
isinstance(v1, list)
and len(v1) == len(v2)
and all(isinstance(e, np.ndarray) for e in v1)
):
for e1, e2 in zip(v1, v2):
np.testing.assert_array_equal(e1, e2)
else:
if v1 != v2:
if verbose:
print("Mismatch in attribute: %s" % k, v1, v2)
return False
return True
def wrsamp(self, expanded=False, write_dir=""):
"""
Write a WFDB header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files.
Returns
-------
N/A
"""
# Update the checksum field (except for channels that did not have
# a checksum to begin with, or where the checksum was already
# valid.)
if self.checksum is not None:
checksums = self.calc_checksum(expanded=expanded)
for ch, old_val in enumerate(self.checksum):
if old_val is None or (checksums[ch] - old_val) % 65536 == 0:
checksums[ch] = old_val
self.checksum = checksums
# Perform field validity and cohesion checks, and write the
# header file.
self.wrheader(write_dir=write_dir, expanded=expanded)
if self.n_sig > 0:
# Perform signal validity and cohesion checks, and write the
# associated dat files.
self.wr_dats(expanded=expanded, write_dir=write_dir)
def _arrange_fields(self, channels, sampfrom, smooth_frames):
"""
Arrange/edit object fields to reflect user channel and/or signal
range input.
Parameters
----------
channels : list
List of channel numbers specified.
sampfrom : int
Starting sample number read.
smooth_frames : bool
Whether to convert the expanded signal array (e_d_signal) into
a smooth signal array (d_signal).
Returns
-------
N/A
"""
# Rearrange signal specification fields
for field in _header.SIGNAL_SPECS.index:
item = getattr(self, field)
setattr(self, field, [item[c] for c in channels])
# Expanded signals - multiple samples per frame.
if not smooth_frames:
# Checksum and init_value to be updated if present
# unless the whole signal length was input
if self.sig_len != int(
len(self.e_d_signal[0]) / self.samps_per_frame[0]
):
self.checksum = self.calc_checksum(True)
self.init_value = [s[0] for s in self.e_d_signal]
self.n_sig = len(channels)
self.sig_len = int(
len(self.e_d_signal[0]) / self.samps_per_frame[0]
)
# MxN numpy array d_signal
else:
self.d_signal = self.smooth_frames("digital")
self.e_d_signal = None
# Checksum and init_value to be updated if present
# unless the whole signal length was input
if self.sig_len != self.d_signal.shape[0]:
if self.checksum is not None:
self.checksum = self.calc_checksum()
if self.init_value is not None:
ival = list(self.d_signal[0, :])
self.init_value = [int(i) for i in ival]
# Update record specification parameters
# Important that these get updated after^^
self.n_sig = len(channels)
self.sig_len = self.d_signal.shape[0]
# Adjust date and time if necessary
self._adjust_datetime(sampfrom=sampfrom)
def to_dataframe(self) -> pd.DataFrame:
"""
Create a dataframe containing the data from this record.
Returns
-------
A dataframe, with sig_name in the columns. The index is a DatetimeIndex
if both base_date and base_time were set, otherwise a TimedeltaIndex.
"""
if self.base_datetime is not None:
index = pd.date_range(
start=self.base_datetime,
periods=self.sig_len,
freq=pd.Timedelta(seconds=1 / self.fs),
)
else:
index = pd.timedelta_range(
start=pd.Timedelta(0),
periods=self.sig_len,
freq=pd.Timedelta(seconds=1 / self.fs),
)
if self.p_signal is not None:
data = self.p_signal
elif self.d_signal is not None:
data = self.d_signal
elif self.e_p_signal is not None:
data = np.array(self.e_p_signal).T
elif self.e_d_signal is not None:
data = np.array(self.e_d_signal).T
else:
raise ValueError("No signal in record.")
return pd.DataFrame(data=data, index=index, columns=self.sig_name)
|
class Record(BaseRecord, _header.HeaderMixin, _signal.SignalMixin):
'''
The class representing single segment WFDB records.
Record objects can be created using the initializer, by reading a WFDB
header with `rdheader`, or a WFDB record (header and associated dat files)
with `rdrecord`.
The attributes of the Record object give information about the record as
specified by: https://www.physionet.org/physiotools/wag/header-5.htm
In addition, the d_signal and p_signal attributes store the digital and
physical signals of WFDB records with at least one channel.
Attributes
----------
p_signal : ndarray, optional
An (MxN) 2d numpy array, where M is the signal length. Gives the
physical signal values intended to be written. Either p_signal or
d_signal must be set, but not both. If p_signal is set, this method will
use it to perform analogue-digital conversion, writing the resultant
digital values to the dat file(s). If fmt is set, gain and baseline must
be set or unset together. If fmt is unset, gain and baseline must both
be unset.
d_signal : ndarray, optional
An (MxN) 2d numpy array, where M is the signal length. Gives the
digital signal values intended to be directly written to the dat
file(s). The dtype must be an integer type. Either p_signal or d_signal
must be set, but not both. In addition, if d_signal is set, fmt, gain
and baseline must also all be set.
e_p_signal : ndarray, optional
The expanded physical conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
e_d_signal : ndarray, optional
The expanded digital conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
record_name : str, optional
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pn_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
n_sig : int, optional
Total number of signals.
fs : float, optional
The sampling frequency of the record.
counter_freq : float, optional
The frequency used to start counting.
base_counter : float, optional
The counter used at the start of the file.
sig_len : int, optional
The total length of the signal.
base_time : datetime.time, optional
The time of day at the beginning of the record.
base_date : datetime.date, optional
The date at the beginning of the record.
base_datetime : datetime.datetime, optional
The date and time at the beginning of the record, equivalent to
`datetime.combine(base_date, base_time)`.
file_name : str, optional
The name of the file used for analysis.
fmt : list, optional
A list of strings giving the WFDB format of each file used to store each
channel. Accepted formats are: '80','212','16','24', and '32'. There are
other WFDB formats as specified by:
https://www.physionet.org/physiotools/wag/signal-5.htm
but this library will not write (though it will read) those file types.
samps_per_frame : int, optional
The total number of samples per frame.
skew : float, optional
The offset used to allign signals.
byte_offset : int, optional
The byte offset used to allign signals.
adc_gain : list, optional
A list of numbers specifying the ADC gain.
baseline : list, optional
A list of integers specifying the digital baseline.
units : list, optional
A list of strings giving the units of each signal channel.
adc_res: int, optional
The value produced by the ADC given a given Volt input.
adc_zero: int, optional
The value produced by the ADC given a 0 Volt input.
init_value : list, optional
The initial value of the signal.
checksum : list, int, optional
The checksum of the signal.
block_size : str, optional
The dimensions of the field data.
sig_name : list, optional
A list of strings giving the signal name of each signal channel.
comments : list, optional
A list of string comments to be written to the header file.
Examples
--------
>>> record = wfdb.Record(record_name='r1', fs=250, n_sig=2, sig_len=1000,
file_name=['r1.dat','r1.dat'])
'''
def __init__(
self,
p_signal=None,
d_signal=None,
e_p_signal=None,
e_d_signal=None,
record_name=None,
n_sig=None,
fs=None,
counter_freq=None,
base_counter=None,
sig_len=None,
base_time=None,
base_date=None,
base_datetime=None,
file_name=None,
fmt=None,
samps_per_frame=None,
skew=None,
byte_offset=None,
adc_gain=None,
baseline=None,
units=None,
adc_res=None,
adc_zero=None,
init_value=None,
checksum=None,
block_size=None,
sig_name=None,
comments=None,
):
pass
def __eq__(self, other, verbose=False):
'''
Equal comparison operator for objects of this type.
Parameters
----------
other : object
The object that is being compared to self.
verbose : bool, optional
Whether to print details about equality (True) or not (False).
Returns
-------
bool
Determines if the objects are equal (True) or not equal (False).
'''
pass
def wrsamp(self, expanded=False, write_dir=""):
'''
Write a WFDB header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files.
Returns
-------
N/A
'''
pass
def _arrange_fields(self, channels, sampfrom, smooth_frames):
'''
Arrange/edit object fields to reflect user channel and/or signal
range input.
Parameters
----------
channels : list
List of channel numbers specified.
sampfrom : int
Starting sample number read.
smooth_frames : bool
Whether to convert the expanded signal array (e_d_signal) into
a smooth signal array (d_signal).
Returns
-------
N/A
'''
pass
def to_dataframe(self) -> pd.DataFrame:
'''
Create a dataframe containing the data from this record.
Returns
-------
A dataframe, with sig_name in the columns. The index is a DatetimeIndex
if both base_date and base_time were set, otherwise a TimedeltaIndex.
'''
pass
| 6 | 5 | 50 | 5 | 30 | 14 | 6 | 1.11 | 3 | 8 | 0 | 0 | 5 | 20 | 5 | 22 | 356 | 38 | 151 | 69 | 115 | 167 | 85 | 38 | 79 | 11 | 3 | 4 | 30 |
147,703 |
MIT-LCP/wfdb-python
|
wfdb/io/record.py
|
wfdb.io.record.MultiRecord
|
class MultiRecord(BaseRecord, _header.MultiHeaderMixin):
"""
The class representing multi-segment WFDB records.
MultiRecord objects can be created using the initializer, or by reading a
multi-segment WFDB record using 'rdrecord' with the `m2s` (multi to single)
input parameter set to False.
The attributes of the MultiRecord object give information about the entire
record as specified by: https://www.physionet.org/physiotools/wag/header-5.htm
In addition, the `segments` parameter is a list of Record objects
representing each individual segment, or None representing empty segments,
of the entire multi-segment record.
Notably, this class has no attribute representing the signals as a whole.
The 'multi_to_single' instance method can be called on MultiRecord objects
to return a single segment representation of the record as a Record object.
The resulting Record object will have its 'p_signal' field set.
Attributes
----------
segments : list, optional
The segments to be read.
layout : str, optional
Whether the record will be 'fixed' or 'variable'.
record_name : str, optional
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pn_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
n_sig : int, optional
Total number of signals.
fs : int, float, optional
The sampling frequency of the record.
counter_freq : float, optional
The frequency used to start counting.
base_counter : float, optional
The counter used at the start of the file.
sig_len : int, optional
The total length of the signal.
base_time : datetime.time, optional
The time of day at the beginning of the record.
base_date : datetime.date, optional
The date at the beginning of the record.
base_datetime : datetime.datetime, optional
The date and time at the beginning of the record, equivalent to
`datetime.combine(base_date, base_time)`.
seg_name : str, optional
The name of the segment.
seg_len : List[int], optional
The length of each segment.
comments : list, optional
A list of string comments to be written to the header file.
sig_name : str, optional
A list of strings giving the signal name of each signal channel.
sig_segments : list, optional
The signal segments to be read.
Examples
--------
>>> record_m = wfdb.MultiRecord(record_name='rm', fs=50, n_sig=8,
sig_len=9999, seg_name=['rm_1', '~', rm_2'],
seg_len=[800, 200, 900])
>>> # Get a MultiRecord object
>>> record_s = wfdb.rdsamp('s00001-2896-10-10-00-31', m2s=False)
>>> # Turn it into a single record
>>> record_s = record_s.multi_to_single()
record_s initially stores a `MultiRecord` object, and is then converted into
a `Record` object.
"""
def __init__(
self,
segments=None,
layout=None,
record_name=None,
n_sig=None,
fs=None,
counter_freq=None,
base_counter=None,
sig_len=None,
base_time=None,
base_date=None,
base_datetime=None,
seg_name=None,
seg_len=None,
comments=None,
sig_name=None,
sig_segments=None,
):
super(MultiRecord, self).__init__(
record_name=record_name,
n_sig=n_sig,
fs=fs,
counter_freq=counter_freq,
base_counter=base_counter,
sig_len=sig_len,
base_time=base_time,
base_date=base_date,
base_datetime=base_datetime,
comments=comments,
sig_name=sig_name,
)
self.layout = layout
self.segments = segments
self.seg_name = seg_name
self.seg_len = seg_len
self.sig_segments = sig_segments
if segments:
self.n_seg = len(segments)
if not seg_len:
self.seg_len = [segment.sig_len for segment in segments]
def wrsamp(self, write_dir=""):
"""
Write a multi-segment header, along with headers and dat files
for all segments, from this object.
Parameters
----------
write_dir : str, optional
The directory in which to write the files.
Returns
-------
N/A
"""
# Perform field validity and cohesion checks, and write the
# header file.
self.wrheader(write_dir=write_dir)
# Perform record validity and cohesion checks, and write the
# associated segments.
for seg in self.segments:
seg.wrsamp(write_dir=write_dir)
def _check_segment_cohesion(self):
"""
Check the cohesion of the segments field with other fields used
to write the record.
Parameters
----------
N/A
Returns
-------
N/A
"""
if self.n_seg != len(self.segments):
raise ValueError("Length of segments must match the 'n_seg' field")
for seg_num, segment in enumerate(self.segments):
# If segment 0 is a layout specification record, check that its file names are all == '~''
if seg_num == 0 and self.seg_len[0] == 0:
for file_name in segment.file_name:
if file_name != "~":
raise ValueError(
"Layout specification records must have all file_names named '~'"
)
# Sampling frequencies must all match the one in the master header
if segment.fs != self.fs:
raise ValueError(
"The 'fs' in each segment must match the overall record's 'fs'"
)
# Check the signal length of the segment against the corresponding seg_len field
if segment.sig_len != self.seg_len[seg_num]:
raise ValueError(
f"The signal length of segment {seg_num} does not match the corresponding segment length"
)
# No need to check the sum of sig_lens from each segment object against sig_len
# Already effectively done it when checking sum(seg_len) against sig_len
def _required_segments(self, sampfrom, sampto):
"""
Determine the segments and the samples within each segment in a
multi-segment record, that lie within a sample range.
Parameters
----------
sampfrom : int
The starting sample number to read for each channel.
sampto : int
The sample number at which to stop reading for each channel.
Returns
-------
seg_numbers : list
List of segment numbers to read.
readsamps : list
List of sample numbers to be read.
"""
# The starting segment with actual samples
if self.layout == "fixed":
startseg = 0
else:
startseg = 1
# Cumulative sum of segment lengths (ignoring layout segment)
cumsumlengths = list(np.cumsum(self.seg_len[startseg:]))
# Get first segment
seg_numbers = [[sampfrom < cs for cs in cumsumlengths].index(True)]
# Get final segment
if sampto == cumsumlengths[len(cumsumlengths) - 1]:
seg_numbers.append(len(cumsumlengths) - 1)
else:
seg_numbers.append(
[sampto <= cs for cs in cumsumlengths].index(True)
)
# Add 1 for variable layout records
seg_numbers = list(np.add(seg_numbers, startseg))
# Obtain the sampfrom and sampto to read for each segment
if seg_numbers[1] == seg_numbers[0]:
# Only one segment to read
seg_numbers = [seg_numbers[0]]
# The segment's first sample number relative to the entire record
segstartsamp = sum(self.seg_len[0 : seg_numbers[0]])
readsamps = [[sampfrom - segstartsamp, sampto - segstartsamp]]
else:
# More than one segment to read
seg_numbers = list(range(seg_numbers[0], seg_numbers[1] + 1))
readsamps = [[0, self.seg_len[s]] for s in seg_numbers]
# Starting sample for first segment.
readsamps[0][0] = (
sampfrom - ([0] + cumsumlengths)[seg_numbers[0] - startseg]
)
# End sample for last segment
readsamps[-1][1] = (
sampto - ([0] + cumsumlengths)[seg_numbers[-1] - startseg]
)
return (seg_numbers, readsamps)
def _required_channels(self, seg_numbers, channels, dir_name, pn_dir):
"""
Get the channel numbers to be read from each specified segment,
given the channel numbers specified for the entire record.
Parameters
----------
seg_numbers : list
List of segment numbers to read.
channels : list
The channel indices to read for the whole record. Same one
specified by user input.
dir_name : str
The local directory location of the header file. This parameter
is ignored if `pn_dir` is set.
pn_dir : str
Option used to stream data from Physionet. The Physionet
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/content/mitdb'
pn_dir='mitdb'.
Returns
-------
required_channels : list
List of lists, containing channel indices to read for each
desired segment.
"""
# Fixed layout. All channels are the same.
if self.layout == "fixed":
required_channels = [channels] * len(seg_numbers)
# Variable layout: figure out channels by matching record names
else:
required_channels = []
# The overall layout signal names
l_sig_names = self.segments[0].sig_name
# The wanted signals
w_sig_names = [l_sig_names[c] for c in channels]
# For each segment
for i in range(len(seg_numbers)):
# Skip empty segments
if self.seg_name[seg_numbers[i]] == "~":
required_channels.append([])
else:
# Get the signal names of the current segment
s_sig_names = rdheader(
os.path.join(dir_name, self.seg_name[seg_numbers[i]]),
pn_dir=pn_dir,
).sig_name
required_channels.append(
_get_wanted_channels(w_sig_names, s_sig_names)
)
return required_channels
def _arrange_fields(
self, seg_numbers, seg_ranges, channels, sampfrom=0, force_channels=True
):
"""
Arrange/edit object fields to reflect user channel and/or
signal range inputs. Updates layout specification header if
necessary.
Parameters
----------
seg_numbers : list
List of integer segment numbers read.
seg_ranges: list
List of integer pairs, giving the sample ranges for each
segment number read.
channels : list
List of channel numbers specified
sampfrom : int
Starting sample read.
force_channels : bool, optional
Used when reading multi-segment variable layout records.
Whether to update the layout specification record to match
the input `channels` argument, or to omit channels in which
no read segment contains the signals.
Returns
-------
N/A
"""
# Update seg_len values for relevant segments
for i in range(len(seg_numbers)):
self.seg_len[seg_numbers[i]] = seg_ranges[i][1] - seg_ranges[i][0]
# Get rid of the segments and segment line parameters
# outside the desired segment range
if self.layout == "fixed":
self.n_sig = len(channels)
self.segments = self.segments[seg_numbers[0] : seg_numbers[-1] + 1]
self.seg_name = self.seg_name[seg_numbers[0] : seg_numbers[-1] + 1]
self.seg_len = self.seg_len[seg_numbers[0] : seg_numbers[-1] + 1]
else:
self.segments = [self.segments[0]] + self.segments[
seg_numbers[0] : seg_numbers[-1] + 1
]
self.seg_name = [self.seg_name[0]] + self.seg_name[
seg_numbers[0] : seg_numbers[-1] + 1
]
self.seg_len = [self.seg_len[0]] + self.seg_len[
seg_numbers[0] : seg_numbers[-1] + 1
]
# Update the layout specification segment. At this point it
# should match the full original header
# Have to inspect existing channels of segments; requested
# input channels will not be enough on its own because not
# all signals may be present, depending on which section of
# the signal was read.
if not force_channels:
# The desired signal names.
desired_sig_names = [
self.segments[0].sig_name[ch] for ch in channels
]
# Actual contained signal names of individual segments
# contained_sig_names = [seg.sig_name for seg in self.segments[1:]]
contained_sig_names = set(
[
name
for seg in self.segments[1:]
if seg is not None
for name in seg.sig_name
]
)
# Remove non-present names. Keep the order.
sig_name = [
name
for name in desired_sig_names
if name in contained_sig_names
]
# Channel indices to keep for signal specification fields
channels = [
self.segments[0].sig_name.index(name) for name in sig_name
]
# Rearrange signal specification fields
for field in _header.SIGNAL_SPECS.index:
item = getattr(self.segments[0], field)
setattr(self.segments[0], field, [item[c] for c in channels])
self.segments[0].n_sig = self.n_sig = len(channels)
if self.n_sig == 0:
print(
"No signals of the desired channels are contained in the specified sample range."
)
# Update record specification parameters
self.sig_len = sum([sr[1] - sr[0] for sr in seg_ranges])
self.n_seg = len(self.segments)
self._adjust_datetime(sampfrom=sampfrom)
def multi_to_single(self, physical, return_res=64, expanded=False):
"""
Create a Record object from the MultiRecord object. All signal
segments will be combined into the new object's `p_signal` or
`d_signal` field. For digital format, the signals must have
the same storage format, baseline, and adc_gain in all segments.
Parameters
----------
physical : bool
Whether to convert the physical or digital signal.
return_res : int, optional
The return resolution of the `p_signal` field. Options are:
64, 32, and 16.
expanded : bool, optional
If false, combine the sample data from `p_signal` or `d_signal`
into a single two-dimensional array. If true, combine the
sample data from `e_p_signal` or `e_d_signal` into a list of
one-dimensional arrays.
Returns
-------
record : WFDB Record
The single segment record created.
"""
# The fields to transfer to the new object
fields = self.__dict__.copy()
# Remove multirecord fields
for attr in ["segments", "seg_name", "seg_len", "n_seg"]:
del fields[attr]
# Figure out single segment fields to set for the new Record
if self.layout == "fixed":
# Get the fields from the first segment
for attr in [
"fmt",
"adc_gain",
"baseline",
"units",
"sig_name",
"samps_per_frame",
]:
fields[attr] = getattr(self.segments[0], attr)
else:
# For variable layout records, inspect the segments for the
# attribute values.
# Coincidentally, if physical=False, figure out if this
# conversion can be performed. All signals of the same name
# must have the same fmt, gain, baseline, and units for all
# segments.
# For either physical or digital conversion, all signals
# of the same name must have the same samps_per_frame,
# which must match the value in the layout header.
# The layout header should be updated at this point to
# reflect channels. We can depend on it for sig_name and
# samps_per_frame, but not for fmt, adc_gain, units, and
# baseline.
# These signal names will be the key
signal_names = self.segments[0].sig_name
n_sig = len(signal_names)
# This will be the field dictionary to copy over.
reference_fields = {
"fmt": n_sig * [None],
"adc_gain": n_sig * [None],
"baseline": n_sig * [None],
"units": n_sig * [None],
"samps_per_frame": self.segments[0].samps_per_frame,
}
# For physical signals, mismatched fields will not be copied
# over. For digital, mismatches will cause an exception.
mismatched_fields = []
for seg in self.segments[1:]:
if seg is None:
continue
# For each signal, check fmt, adc_gain, baseline, and
# units of each signal
for seg_ch in range(seg.n_sig):
sig_name = seg.sig_name[seg_ch]
# The overall channel
ch = signal_names.index(sig_name)
for field in reference_fields:
item_ch = getattr(seg, field)[seg_ch]
if reference_fields[field][ch] is None:
reference_fields[field][ch] = item_ch
# mismatch case
elif reference_fields[field][ch] != item_ch:
if field == "samps_per_frame":
expected = reference_fields[field][ch]
raise ValueError(
f"Incorrect samples per frame "
f"({item_ch} != {expected}) "
f"for signal {signal_names[ch]} "
f"in segment {seg.record_name} "
f"of {self.record_name}"
)
elif physical:
mismatched_fields.append(field)
else:
raise Exception(
"This variable layout multi-segment record cannot be converted to single segment, in digital format."
)
# Remove mismatched signal fields for physical signals
for field in set(mismatched_fields):
del reference_fields[field]
# At this point, the fields should be set for all channels
fields.update(reference_fields)
fields["sig_name"] = signal_names
# Figure out signal attribute to set, and its dtype.
if physical:
if expanded:
sig_attr = "e_p_signal"
else:
sig_attr = "p_signal"
# Figure out the largest required dtype
dtype = _signal._np_dtype(return_res, discrete=False)
nan_vals = np.array([self.n_sig * [np.nan]], dtype=dtype)
else:
if expanded:
sig_attr = "e_d_signal"
else:
sig_attr = "d_signal"
# Figure out the largest required dtype
dtype = _signal._np_dtype(return_res, discrete=True)
nan_vals = np.array([_signal._digi_nan(fields["fmt"])], dtype=dtype)
samps_per_frame = fields["samps_per_frame"]
# Initialize the full signal array
if expanded:
combined_signal = []
for nan_val, spf in zip(nan_vals[0], samps_per_frame):
combined_signal.append(np.repeat(nan_val, spf * self.sig_len))
else:
combined_signal = np.repeat(nan_vals, self.sig_len, axis=0)
# Start and end samples in the overall array to place the
# segment samples into
start_samps = [0] + list(np.cumsum(self.seg_len)[0:-1])
end_samps = list(np.cumsum(self.seg_len))
if self.layout == "fixed":
# Copy over the signals directly. Recall there are no
# empty segments in fixed layout records.
for i in range(self.n_seg):
signals = getattr(self.segments[i], sig_attr)
if expanded:
for ch in range(self.n_sig):
start = start_samps[i] * samps_per_frame[ch]
end = end_samps[i] * samps_per_frame[ch]
combined_signal[ch][start:end] = signals[ch]
else:
start = start_samps[i]
end = end_samps[i]
combined_signal[start:end, :] = signals
else:
# Copy over the signals into the matching channels
for i in range(1, self.n_seg):
seg = self.segments[i]
if seg is not None:
# Get the segment channels to copy over for each
# overall channel
segment_channels = _get_wanted_channels(
fields["sig_name"], seg.sig_name, pad=True
)
signals = getattr(seg, sig_attr)
for ch in range(self.n_sig):
# Copy over relevant signal
if segment_channels[ch] is not None:
if expanded:
signal = signals[segment_channels[ch]]
start = start_samps[i] * samps_per_frame[ch]
end = end_samps[i] * samps_per_frame[ch]
combined_signal[ch][start:end] = signal
else:
signal = signals[:, segment_channels[ch]]
start = start_samps[i]
end = end_samps[i]
combined_signal[start:end, ch] = signal
# Create the single segment Record object and set attributes
record = Record()
for field in fields:
setattr(record, field, fields[field])
setattr(record, sig_attr, combined_signal)
# Use the signal to set record features
if physical:
record.set_p_features(expanded=expanded)
else:
record.set_d_features(expanded=expanded)
return record
|
class MultiRecord(BaseRecord, _header.MultiHeaderMixin):
'''
The class representing multi-segment WFDB records.
MultiRecord objects can be created using the initializer, or by reading a
multi-segment WFDB record using 'rdrecord' with the `m2s` (multi to single)
input parameter set to False.
The attributes of the MultiRecord object give information about the entire
record as specified by: https://www.physionet.org/physiotools/wag/header-5.htm
In addition, the `segments` parameter is a list of Record objects
representing each individual segment, or None representing empty segments,
of the entire multi-segment record.
Notably, this class has no attribute representing the signals as a whole.
The 'multi_to_single' instance method can be called on MultiRecord objects
to return a single segment representation of the record as a Record object.
The resulting Record object will have its 'p_signal' field set.
Attributes
----------
segments : list, optional
The segments to be read.
layout : str, optional
Whether the record will be 'fixed' or 'variable'.
record_name : str, optional
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pn_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
n_sig : int, optional
Total number of signals.
fs : int, float, optional
The sampling frequency of the record.
counter_freq : float, optional
The frequency used to start counting.
base_counter : float, optional
The counter used at the start of the file.
sig_len : int, optional
The total length of the signal.
base_time : datetime.time, optional
The time of day at the beginning of the record.
base_date : datetime.date, optional
The date at the beginning of the record.
base_datetime : datetime.datetime, optional
The date and time at the beginning of the record, equivalent to
`datetime.combine(base_date, base_time)`.
seg_name : str, optional
The name of the segment.
seg_len : List[int], optional
The length of each segment.
comments : list, optional
A list of string comments to be written to the header file.
sig_name : str, optional
A list of strings giving the signal name of each signal channel.
sig_segments : list, optional
The signal segments to be read.
Examples
--------
>>> record_m = wfdb.MultiRecord(record_name='rm', fs=50, n_sig=8,
sig_len=9999, seg_name=['rm_1', '~', rm_2'],
seg_len=[800, 200, 900])
>>> # Get a MultiRecord object
>>> record_s = wfdb.rdsamp('s00001-2896-10-10-00-31', m2s=False)
>>> # Turn it into a single record
>>> record_s = record_s.multi_to_single()
record_s initially stores a `MultiRecord` object, and is then converted into
a `Record` object.
'''
def __init__(
self,
segments=None,
layout=None,
record_name=None,
n_sig=None,
fs=None,
counter_freq=None,
base_counter=None,
sig_len=None,
base_time=None,
base_date=None,
base_datetime=None,
seg_name=None,
seg_len=None,
comments=None,
sig_name=None,
sig_segments=None,
):
pass
def wrsamp(self, write_dir=""):
'''
Write a multi-segment header, along with headers and dat files
for all segments, from this object.
Parameters
----------
write_dir : str, optional
The directory in which to write the files.
Returns
-------
N/A
'''
pass
def _check_segment_cohesion(self):
'''
Check the cohesion of the segments field with other fields used
to write the record.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def _required_segments(self, sampfrom, sampto):
'''
Determine the segments and the samples within each segment in a
multi-segment record, that lie within a sample range.
Parameters
----------
sampfrom : int
The starting sample number to read for each channel.
sampto : int
The sample number at which to stop reading for each channel.
Returns
-------
seg_numbers : list
List of segment numbers to read.
readsamps : list
List of sample numbers to be read.
'''
pass
def _required_channels(self, seg_numbers, channels, dir_name, pn_dir):
'''
Get the channel numbers to be read from each specified segment,
given the channel numbers specified for the entire record.
Parameters
----------
seg_numbers : list
List of segment numbers to read.
channels : list
The channel indices to read for the whole record. Same one
specified by user input.
dir_name : str
The local directory location of the header file. This parameter
is ignored if `pn_dir` is set.
pn_dir : str
Option used to stream data from Physionet. The Physionet
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/content/mitdb'
pn_dir='mitdb'.
Returns
-------
required_channels : list
List of lists, containing channel indices to read for each
desired segment.
'''
pass
def _arrange_fields(
self, seg_numbers, seg_ranges, channels, sampfrom=0, force_channels=True
):
'''
Arrange/edit object fields to reflect user channel and/or
signal range inputs. Updates layout specification header if
necessary.
Parameters
----------
seg_numbers : list
List of integer segment numbers read.
seg_ranges: list
List of integer pairs, giving the sample ranges for each
segment number read.
channels : list
List of channel numbers specified
sampfrom : int
Starting sample read.
force_channels : bool, optional
Used when reading multi-segment variable layout records.
Whether to update the layout specification record to match
the input `channels` argument, or to omit channels in which
no read segment contains the signals.
Returns
-------
N/A
'''
pass
def multi_to_single(self, physical, return_res=64, expanded=False):
'''
Create a Record object from the MultiRecord object. All signal
segments will be combined into the new object's `p_signal` or
`d_signal` field. For digital format, the signals must have
the same storage format, baseline, and adc_gain in all segments.
Parameters
----------
physical : bool
Whether to convert the physical or digital signal.
return_res : int, optional
The return resolution of the `p_signal` field. Options are:
64, 32, and 16.
expanded : bool, optional
If false, combine the sample data from `p_signal` or `d_signal`
into a single two-dimensional array. If true, combine the
sample data from `e_p_signal` or `e_d_signal` into a list of
one-dimensional arrays.
Returns
-------
record : WFDB Record
The single segment record created.
'''
pass
| 8 | 7 | 75 | 8 | 40 | 27 | 8 | 0.92 | 2 | 9 | 1 | 0 | 7 | 8 | 7 | 27 | 610 | 71 | 281 | 84 | 253 | 258 | 168 | 63 | 160 | 29 | 3 | 6 | 56 |
147,704 |
MIT-LCP/wfdb-python
|
wfdb/io/_url.py
|
wfdb.io._url.NetFilePermissionError
|
class NetFilePermissionError(NetFileError, PermissionError):
"""The client does not have permission to access a remote file."""
|
class NetFilePermissionError(NetFileError, PermissionError):
'''The client does not have permission to access a remote file.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 5 | 0 | 0 |
147,705 |
MIT-LCP/wfdb-python
|
wfdb/io/record.py
|
wfdb.io.record.BaseRecord
|
class BaseRecord(object):
"""
The base WFDB class extended by the Record and MultiRecord classes.
Attributes
----------
record_name : str, optional
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pn_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
n_sig : int, optional
Total number of signals.
fs : int, float, optional
The sampling frequency of the record.
counter_freq : float, optional
The frequency used to start counting.
base_counter : float, optional
The counter used at the start of the file.
sig_len : int, optional
The total length of the signal.
base_time : datetime.time, optional
The time of day at the beginning of the record.
base_date : datetime.date, optional
The date at the beginning of the record.
base_datetime : datetime.datetime, optional
The date and time at the beginning of the record, equivalent to
`datetime.combine(base_date, base_time)`.
comments : list, optional
A list of string comments to be written to the header file.
sig_name : str, optional
A list of strings giving the signal name of each signal channel.
"""
# The base WFDB class extended by the Record and MultiRecord classes.
def __init__(
self,
record_name=None,
n_sig=None,
fs=None,
counter_freq=None,
base_counter=None,
sig_len=None,
base_time=None,
base_date=None,
base_datetime=None,
comments=None,
sig_name=None,
):
self.record_name = record_name
self.n_sig = n_sig
self.fs = fs
self.counter_freq = counter_freq
self.base_counter = base_counter
self.sig_len = sig_len
if base_datetime is not None:
if base_time is not None:
raise TypeError(
"cannot specify both base_time and base_datetime"
)
if base_date is not None:
raise TypeError(
"cannot specify both base_date and base_datetime"
)
self.base_datetime = base_datetime
else:
self.base_time = base_time
self.base_date = base_date
self.comments = comments
self.sig_name = sig_name
@property
def base_datetime(self):
if self.base_date is None or self.base_time is None:
return None
else:
return datetime.datetime.combine(
date=self.base_date, time=self.base_time
)
@base_datetime.setter
def base_datetime(self, value):
if value is None:
self.base_date = None
self.base_time = None
elif isinstance(value, datetime.datetime) and value.tzinfo is None:
self.base_date = value.date()
self.base_time = value.time()
else:
raise TypeError(f"invalid base_datetime value: {value!r}")
def get_frame_number(self, time_value):
"""
Convert a time value to a frame number.
A time value may be specified as:
- An integer or floating-point number, representing the number of
WFDB frames elapsed from the start of the record.
- A `datetime.timedelta` object, representing elapsed time from the
start of the record.
- A `datetime.datetime` object, representing an absolute date and
time (if the record starting time is known.)
Note that this function may return a value that is less than zero
or greater than the actual length of the record.
Parameters
----------
time_value : number or timedelta or datetime
A time value.
Returns
-------
frame_number : float
Frame number (possibly a fractional frame number).
"""
if hasattr(time_value, "__float__"):
return float(time_value)
if isinstance(time_value, datetime.datetime):
if not self.base_datetime:
raise ValueError(
"base_datetime is unknown; cannot convert absolute "
"date/time to a frame number"
)
time_value -= self.base_datetime
if isinstance(time_value, datetime.timedelta):
return time_value.total_seconds() * self.fs
raise TypeError(f"invalid time value: {time_value!r}")
def get_elapsed_time(self, time_value):
"""
Convert a time value to an elapsed time in seconds.
A time value may be specified as:
- An integer or floating-point number, representing the number of
WFDB frames elapsed from the start of the record.
- A `datetime.timedelta` object, representing elapsed time from the
start of the record.
- A `datetime.datetime` object, representing an absolute date and
time (if the record starting time is known.)
Parameters
----------
time_value : number or timedelta or datetime
A time value.
Returns
-------
elapsed_time : timedelta
Elapsed time from the start of the record.
"""
time_value = self.get_frame_number(time_value)
return datetime.timedelta(seconds=time_value / self.fs)
def get_absolute_time(self, time_value):
"""
Convert a time value to an absolute date and time.
A time value may be specified as:
- An integer or floating-point number, representing the number of
WFDB frames elapsed from the start of the record.
- A `datetime.timedelta` object, representing elapsed time from the
start of the record.
- A `datetime.datetime` object, representing an absolute date and
time (if the record starting time is known.)
Parameters
----------
time_value : number or timedelta or datetime
A time value.
Returns
-------
absolute_time : datetime
Absolute date and time.
"""
time_value = self.get_elapsed_time(time_value)
if not self.base_datetime:
raise ValueError(
"base_datetime is unknown; cannot convert frame number "
"to an absolute date/time"
)
return time_value + self.base_datetime
def check_field(self, field, required_channels="all"):
"""
Check whether a single field is valid in its basic form. Does
not check compatibility with other fields.
Parameters
----------
field : str
The field name.
required_channels : list, optional
Used for signal specification fields. All channels are
checked for their integrity if present, but channels that do
not lie in this field may be None.
Returns
-------
N/A
Notes
-----
This function is called from wrheader to check fields before
writing. It is also supposed to be usable at any point to
check a specific field.
"""
item = getattr(self, field)
if item is None:
raise Exception("Missing field required: %s" % field)
# We should have a list specifying these automatically.
# Whether the item should be a list. Watch out for required_channels for `segments`
expect_list = True if field in LIST_FIELDS else False
# Check the type of the field (and of its elements if it should
# be a list)
_check_item_type(
item,
field_name=field,
allowed_types=ALLOWED_TYPES[field],
expect_list=expect_list,
required_channels=required_channels,
)
# Individual specific field checks
if field in ["d_signal", "p_signal"]:
check_np_array(
item=item,
field_name=field,
ndim=2,
parent_class=(
lambda f: np.integer if f == "d_signal" else np.floating
)(field),
)
elif field in ["e_d_signal", "e_p_signal"]:
for ch in range(len(item)):
check_np_array(
item=item[ch],
field_name=field,
ndim=1,
parent_class=(
lambda f: (
np.integer if f == "e_d_signal" else np.floating
)
)(field),
channel_num=ch,
)
# Record specification fields
elif field == "record_name":
# Allow letters, digits, hyphens, and underscores.
accepted_string = re.match(r"[-\w]+", self.record_name)
if (
not accepted_string
or accepted_string.string != self.record_name
):
raise ValueError(
"record_name must only comprise of letters, digits, hyphens, and underscores."
)
elif field == "n_seg":
if self.n_seg <= 0:
raise ValueError("n_seg must be a positive integer")
elif field == "n_sig":
if self.n_sig <= 0:
raise ValueError("n_sig must be a positive integer")
elif field == "fs":
if self.fs <= 0:
raise ValueError("fs must be a positive number")
elif field == "counter_freq":
if self.counter_freq <= 0:
raise ValueError("counter_freq must be a positive number")
elif field == "base_counter":
if self.base_counter <= 0:
raise ValueError("base_counter must be a positive number")
elif field == "sig_len":
if self.sig_len < 0:
raise ValueError("sig_len must be a non-negative integer")
# Signal specification fields
elif field in _header.SIGNAL_SPECS.index:
if required_channels == "all":
required_channels = range(len(item))
for ch in range(len(item)):
# If the element is allowed to be None
if ch not in required_channels:
if item[ch] is None:
continue
if field == "file_name":
# Check for file_name characters
accepted_string = re.match(r"[-\w]+\.?[\w]+", item[ch])
if (
not accepted_string
or accepted_string.string != item[ch]
):
raise ValueError(
"File names should only contain alphanumerics, hyphens, and an extension. eg. record-100.dat"
)
# Check that dat files are grouped together
if not util.is_monotonic(self.file_name):
raise ValueError(
"Signals in a record that share a given file must be consecutive."
)
elif field == "fmt":
if item[ch] not in _signal.DAT_FMTS:
raise ValueError(
"File formats must be valid WFDB dat formats:",
_signal.DAT_FMTS,
)
elif field == "samps_per_frame":
if item[ch] < 1:
raise ValueError(
"samps_per_frame values must be positive integers"
)
elif field == "skew":
if item[ch] < 0:
raise ValueError(
"skew values must be non-negative integers"
)
elif field == "byte_offset":
if item[ch] < 0:
raise ValueError(
"byte_offset values must be non-negative integers"
)
elif field == "adc_gain":
if item[ch] <= 0:
raise ValueError("adc_gain values must be positive")
elif field == "baseline":
# Original WFDB library 10.5.24 only has 4 bytes for baseline.
if item[ch] < -2147483648 or item[ch] > 2147483648:
raise ValueError(
"baseline values must be between -2147483648 (-2^31) and 2147483647 (2^31 -1)"
)
elif field == "units":
if re.search(r"\s", item[ch]):
raise ValueError(
"units strings may not contain whitespaces."
)
elif field == "adc_res":
if item[ch] < 0:
raise ValueError(
"adc_res values must be non-negative integers"
)
elif field == "block_size":
if item[ch] < 0:
raise ValueError(
"block_size values must be non-negative integers"
)
elif field == "sig_name":
if item[ch][:1].isspace() or item[ch][-1:].isspace():
raise ValueError(
"sig_name strings may not begin or end with "
"whitespace."
)
if re.search(r"[\x00-\x1f\x7f-\x9f]", item[ch]):
raise ValueError(
"sig_name strings may not contain "
"control characters."
)
if len(set(item)) != len(item):
raise ValueError("sig_name strings must be unique.")
# Segment specification fields and comments
elif field in _header.SEGMENT_SPECS.index:
for ch in range(len(item)):
if field == "seg_name":
# Segment names must be alphanumerics or just a single '~'
if item[ch] == "~":
continue
accepted_string = re.match(r"[-\w]+", item[ch])
if (
not accepted_string
or accepted_string.string != item[ch]
):
raise ValueError(
"Non-null segment names may only contain alphanumerics and dashes. Null segment names must be set to '~'"
)
elif field == "seg_len":
# For records with more than 1 segment, the first
# segment may be the layout specification segment
# with a length of 0
min_len = 0 if ch == 0 else 1
if item[ch] < min_len:
raise ValueError(
"seg_len values must be positive integers. Only seg_len[0] may be 0 to indicate a layout segment"
)
# Comment field
elif field == "comments":
if item[ch].startswith("#"):
print(
"Note: comment strings do not need to begin with '#'. This library adds them automatically."
)
if re.search("[\t\n\r\f\v]", item[ch]):
raise ValueError(
"comments may not contain tabs or newlines (they may contain spaces and underscores)."
)
def check_read_inputs(
self, sampfrom, sampto, channels, physical, smooth_frames, return_res
):
"""
Ensure that input read parameters (from rdsamp) are valid for
the record.
Parameters
----------
sampfrom : int
The starting sample number to read for all channels.
sampto : int, 'end'
The sample number at which to stop reading for all channels.
Reads the entire duration by default.
channels : list
List of integer indices specifying the channels to be read.
Reads all channels by default.
physical : bool
Specifies whether to return signals in physical units in the
`p_signal` field (True), or digital units in the `d_signal`
field (False).
smooth_frames : bool
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
return_res : int
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
Returns
-------
N/A
"""
# Data Type Check
if not hasattr(sampfrom, "__index__"):
raise TypeError("sampfrom must be an integer")
if not hasattr(sampto, "__index__"):
raise TypeError("sampto must be an integer")
if not isinstance(channels, list):
raise TypeError("channels must be a list of integers")
# Duration Ranges
if sampfrom < 0:
raise ValueError("sampfrom must be a non-negative integer")
if sampfrom > self.sig_len:
raise ValueError("sampfrom must be shorter than the signal length")
if sampto < 0:
raise ValueError("sampto must be a non-negative integer")
if sampto > self.sig_len:
raise ValueError("sampto must be shorter than the signal length")
if sampto <= sampfrom:
raise ValueError("sampto must be greater than sampfrom")
# Channel Ranges
if len(channels):
if min(channels) < 0:
raise ValueError(
"Input channels must all be non-negative integers"
)
if max(channels) > self.n_sig - 1:
raise ValueError(
"Input channels must all be lower than the total number of channels"
)
if return_res not in [64, 32, 16, 8]:
raise ValueError(
"return_res must be one of the following: 64, 32, 16, 8"
)
if physical and return_res == 8:
raise ValueError(
"return_res must be one of the following when physical is True: 64, 32, 16"
)
def _adjust_datetime(self, sampfrom: int):
"""
Adjust date and time fields to reflect user input if possible.
Helper function for the `_arrange_fields` of both Record and
MultiRecord objects.
Parameters
----------
sampfrom : int
The starting sample number to read for all channels.
Returns
-------
N/A
"""
if sampfrom:
dt_seconds = sampfrom / self.fs
if self.base_date and self.base_time:
self.base_datetime += datetime.timedelta(seconds=dt_seconds)
# We can calculate the time even if there is no date
elif self.base_time:
tmp_datetime = datetime.datetime.combine(
datetime.datetime.today().date(), self.base_time
)
self.base_time = (
tmp_datetime + datetime.timedelta(seconds=dt_seconds)
).time()
|
class BaseRecord(object):
'''
The base WFDB class extended by the Record and MultiRecord classes.
Attributes
----------
record_name : str, optional
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pn_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
n_sig : int, optional
Total number of signals.
fs : int, float, optional
The sampling frequency of the record.
counter_freq : float, optional
The frequency used to start counting.
base_counter : float, optional
The counter used at the start of the file.
sig_len : int, optional
The total length of the signal.
base_time : datetime.time, optional
The time of day at the beginning of the record.
base_date : datetime.date, optional
The date at the beginning of the record.
base_datetime : datetime.datetime, optional
The date and time at the beginning of the record, equivalent to
`datetime.combine(base_date, base_time)`.
comments : list, optional
A list of string comments to be written to the header file.
sig_name : str, optional
A list of strings giving the signal name of each signal channel.
'''
def __init__(
self,
record_name=None,
n_sig=None,
fs=None,
counter_freq=None,
base_counter=None,
sig_len=None,
base_time=None,
base_date=None,
base_datetime=None,
comments=None,
sig_name=None,
):
pass
@property
def base_datetime(self):
pass
@base_datetime.setter
def base_datetime(self):
pass
def get_frame_number(self, time_value):
'''
Convert a time value to a frame number.
A time value may be specified as:
- An integer or floating-point number, representing the number of
WFDB frames elapsed from the start of the record.
- A `datetime.timedelta` object, representing elapsed time from the
start of the record.
- A `datetime.datetime` object, representing an absolute date and
time (if the record starting time is known.)
Note that this function may return a value that is less than zero
or greater than the actual length of the record.
Parameters
----------
time_value : number or timedelta or datetime
A time value.
Returns
-------
frame_number : float
Frame number (possibly a fractional frame number).
'''
pass
def get_elapsed_time(self, time_value):
'''
Convert a time value to an elapsed time in seconds.
A time value may be specified as:
- An integer or floating-point number, representing the number of
WFDB frames elapsed from the start of the record.
- A `datetime.timedelta` object, representing elapsed time from the
start of the record.
- A `datetime.datetime` object, representing an absolute date and
time (if the record starting time is known.)
Parameters
----------
time_value : number or timedelta or datetime
A time value.
Returns
-------
elapsed_time : timedelta
Elapsed time from the start of the record.
'''
pass
def get_absolute_time(self, time_value):
'''
Convert a time value to an absolute date and time.
A time value may be specified as:
- An integer or floating-point number, representing the number of
WFDB frames elapsed from the start of the record.
- A `datetime.timedelta` object, representing elapsed time from the
start of the record.
- A `datetime.datetime` object, representing an absolute date and
time (if the record starting time is known.)
Parameters
----------
time_value : number or timedelta or datetime
A time value.
Returns
-------
absolute_time : datetime
Absolute date and time.
'''
pass
def check_field(self, field, required_channels="all"):
'''
Check whether a single field is valid in its basic form. Does
not check compatibility with other fields.
Parameters
----------
field : str
The field name.
required_channels : list, optional
Used for signal specification fields. All channels are
checked for their integrity if present, but channels that do
not lie in this field may be None.
Returns
-------
N/A
Notes
-----
This function is called from wrheader to check fields before
writing. It is also supposed to be usable at any point to
check a specific field.
'''
pass
def check_read_inputs(
self, sampfrom, sampto, channels, physical, smooth_frames, return_res
):
'''
Ensure that input read parameters (from rdsamp) are valid for
the record.
Parameters
----------
sampfrom : int
The starting sample number to read for all channels.
sampto : int, 'end'
The sample number at which to stop reading for all channels.
Reads the entire duration by default.
channels : list
List of integer indices specifying the channels to be read.
Reads all channels by default.
physical : bool
Specifies whether to return signals in physical units in the
`p_signal` field (True), or digital units in the `d_signal`
field (False).
smooth_frames : bool
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
return_res : int
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
Returns
-------
N/A
'''
pass
def _adjust_datetime(self, sampfrom: int):
'''
Adjust date and time fields to reflect user input if possible.
Helper function for the `_arrange_fields` of both Record and
MultiRecord objects.
Parameters
----------
sampfrom : int
The starting sample number to read for all channels.
Returns
-------
N/A
'''
pass
| 12 | 7 | 52 | 4 | 32 | 16 | 11 | 0.62 | 1 | 10 | 0 | 2 | 9 | 10 | 9 | 9 | 520 | 50 | 292 | 44 | 265 | 180 | 155 | 27 | 145 | 61 | 1 | 4 | 96 |
147,706 |
MIT-LCP/wfdb-python
|
wfdb/io/header.py
|
wfdb.io.header.HeaderSyntaxError
|
class HeaderSyntaxError(ValueError):
"""Invalid syntax found in a WFDB header file."""
|
class HeaderSyntaxError(ValueError):
'''Invalid syntax found in a WFDB header file.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 4 | 0 | 0 |
147,707 |
MIT-LCP/wfdb-python
|
tests/test_url.py
|
tests.test_url.DummyHTTPRequestHandler
|
class DummyHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
"""
HTTPRequestHandler used to simulate a web server for testing.
"""
def do_HEAD(self):
self.send_head()
def do_GET(self):
body = self.send_head()
self.wfile.write(body)
def log_message(self, message, *args):
pass
def send_head(self):
content = self.server.file_content.get(self.path)
if content is None:
self.send_error(404)
return b""
headers = {"Content-Type": "text/plain"}
status = 200
if self.server.allow_gzip:
headers["Vary"] = "Accept-Encoding"
if "gzip" in self.headers.get("Accept-Encoding", ""):
content = gzip.compress(content)
headers["Content-Encoding"] = "gzip"
if self.server.allow_range:
headers["Accept-Ranges"] = "bytes"
req_range = self.headers.get("Range", "")
if req_range.startswith("bytes="):
start, end = req_range.split("=")[1].split("-")
start = int(start)
if end == "":
end = len(content)
else:
end = min(len(content), int(end) + 1)
if start < end:
status = 206
resp_range = "bytes %d-%d/%d" % (
start,
end - 1,
len(content),
)
content = content[start:end]
else:
status = 416
resp_range = "bytes */%d" % len(content)
content = b""
headers["Content-Range"] = resp_range
headers["Content-Length"] = len(content)
self.send_response(status)
for h, v in sorted(headers.items()):
self.send_header(h, v)
self.end_headers()
return content
|
class DummyHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
'''
HTTPRequestHandler used to simulate a web server for testing.
'''
def do_HEAD(self):
pass
def do_GET(self):
pass
def log_message(self, message, *args):
pass
def send_head(self):
pass
| 5 | 1 | 13 | 1 | 12 | 0 | 3 | 0.06 | 1 | 1 | 0 | 0 | 4 | 0 | 4 | 27 | 60 | 8 | 49 | 13 | 44 | 3 | 43 | 13 | 38 | 9 | 3 | 3 | 12 |
147,708 |
MIT-LCP/wfdb-python
|
wfdb/io/_url.py
|
wfdb.io._url.NetFileError
|
class NetFileError(OSError):
"""An error occurred while reading a remote file."""
def __init__(self, message, url=None, status_code=None):
super().__init__(message)
self.url = url
self.status_code = status_code
|
class NetFileError(OSError):
'''An error occurred while reading a remote file.'''
def __init__(self, message, url=None, status_code=None):
pass
| 2 | 1 | 4 | 0 | 4 | 0 | 1 | 0.2 | 1 | 1 | 0 | 2 | 1 | 2 | 1 | 14 | 7 | 1 | 5 | 4 | 3 | 1 | 5 | 4 | 3 | 1 | 4 | 0 | 1 |
147,709 |
MIT-LCP/wfdb-python
|
wfdb/io/_url.py
|
wfdb.io._url.NetFileNotFoundError
|
class NetFileNotFoundError(NetFileError, FileNotFoundError):
"""A remote file does not exist."""
|
class NetFileNotFoundError(NetFileError, FileNotFoundError):
'''A remote file does not exist.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 5 | 0 | 0 |
147,710 |
MIT-LCP/wfdb-python
|
wfdb/io/_signal.py
|
wfdb.io._signal.SignalMixin
|
class SignalMixin(object):
"""
Mixin class with signal methods. Inherited by Record class.
Attributes
----------
N/A
"""
def wr_dats(self, expanded, write_dir):
"""
Write all dat files associated with a record
expanded=True to use e_d_signal instead of d_signal.
Parameters
----------
expanded : bool
Whether to transform the `e_d_signal` attribute (True) or
the `d_signal` attribute (False).
write_dir : str
The directory to write the output file to.
Returns
-------
N/A
"""
if not self.n_sig:
return
# Get all the fields used to write the header
# Assuming this method was called through wrsamp,
# these will have already been checked in wrheader()
_, _ = self.get_write_fields()
if expanded:
# Using list of arrays e_d_signal
self.check_field("e_d_signal")
else:
# Check the validity of the d_signal field
self.check_field("d_signal")
# Check the cohesion of the d_signal field against the other
# fields used to write the header. (Note that for historical
# reasons, this doesn't actually check any of the optional
# header fields.)
self.check_sig_cohesion([], expanded)
# Write each of the specified dat files
self.wr_dat_files(expanded=expanded, write_dir=write_dir)
def check_sig_cohesion(self, write_fields, expanded):
"""
Check the cohesion of the d_signal/e_d_signal field with the other
fields used to write the record.
Parameters
----------
write_fields : list
All the fields used to write the header.
expanded : bool
Whether to transform the `e_d_signal` attribute (True) or
the `d_signal` attribute (False).
Returns
-------
N/A
"""
# Using list of arrays e_d_signal
if expanded:
# Set default samps_per_frame
spf = self.samps_per_frame
for ch in range(len(spf)):
if spf[ch] is None:
spf[ch] = 1
# Match the actual signal shape against stated length and number of channels
if self.n_sig != len(self.e_d_signal):
raise ValueError(
"n_sig does not match the length of e_d_signal"
)
for ch in range(self.n_sig):
if len(self.e_d_signal[ch]) != spf[ch] * self.sig_len:
raise ValueError(
f"Length of channel {ch} does not match "
f"samps_per_frame[{ch}]*sig_len"
)
# For each channel (if any), make sure the digital format has no values out of bounds
for ch in range(self.n_sig):
fmt = self.fmt[ch]
dmin, dmax = _digi_bounds(self.fmt[ch])
chmin = min(self.e_d_signal[ch])
chmax = max(self.e_d_signal[ch])
if (chmin < dmin) or (chmax > dmax):
raise IndexError(
"Channel "
+ str(ch)
+ " contain values outside allowed range ["
+ str(dmin)
+ ", "
+ str(dmax)
+ "] for fmt "
+ str(fmt)
)
# Ensure that the checksums and initial value fields match the digital signal (if the fields are present)
if self.n_sig > 0:
if "checksum" in write_fields:
realchecksum = self.calc_checksum(expanded)
if self.checksum != realchecksum:
print(
"The actual checksum of e_d_signal is: ",
realchecksum,
)
raise ValueError(
"checksum field does not match actual checksum of e_d_signal"
)
if "init_value" in write_fields:
realinit_value = [
self.e_d_signal[ch][0] for ch in range(self.n_sig)
]
if self.init_value != realinit_value:
print(
"The actual init_value of e_d_signal is: ",
realinit_value,
)
raise ValueError(
"init_value field does not match actual init_value of e_d_signal"
)
# Using uniform d_signal
else:
# Match the actual signal shape against stated length and number of channels
if (self.sig_len, self.n_sig) != self.d_signal.shape:
print("sig_len: ", self.sig_len)
print("n_sig: ", self.n_sig)
print("d_signal.shape: ", self.d_signal.shape)
raise ValueError(
"sig_len and n_sig do not match shape of d_signal"
)
# For each channel (if any), make sure the digital format has no values out of bounds
for ch in range(self.n_sig):
fmt = self.fmt[ch]
dmin, dmax = _digi_bounds(self.fmt[ch])
chmin = min(self.d_signal[:, ch])
chmax = max(self.d_signal[:, ch])
if (chmin < dmin) or (chmax > dmax):
raise IndexError(
"Channel "
+ str(ch)
+ " contain values outside allowed range ["
+ str(dmin)
+ ", "
+ str(dmax)
+ "] for fmt "
+ str(fmt)
)
# Ensure that the checksums and initial value fields match the digital signal (if the fields are present)
if self.n_sig > 0:
if "checksum" in write_fields:
realchecksum = self.calc_checksum()
if self.checksum != realchecksum:
print(
"The actual checksum of d_signal is: ", realchecksum
)
raise ValueError(
"checksum field does not match actual checksum of d_signal"
)
if "init_value" in write_fields:
realinit_value = list(self.d_signal[0, :])
if self.init_value != realinit_value:
print(
"The actual init_value of d_signal is: ",
realinit_value,
)
raise ValueError(
"init_value field does not match actual init_value of d_signal"
)
def set_p_features(self, do_dac=False, expanded=False):
"""
Use properties of the physical signal field to set the following
features: n_sig, sig_len.
Parameters
----------
do_dac : bool, optional
Whether to use the digital signal field to perform dac
conversion to get the physical signal field beforehand.
expanded : bool, optional
Whether to transform the `e_p_signal` attribute (True) or
the `p_signal` attribute (False). If True, the `samps_per_frame`
attribute is also required.
Returns
-------
N/A
Notes
-----
Regarding dac conversion:
- fmt, gain, and baseline must all be set in order to perform
dac.
- Unlike with adc, there is no way to infer these fields.
- Using the fmt, gain and baseline fields, dac is performed,
and (e_)p_signal is set.
*Developer note: Seems this function will be very infrequently used.
The set_d_features function seems far more useful.
"""
if expanded:
if do_dac:
self.check_field("e_d_signal")
self.check_field("fmt", "all")
self.check_field("adc_gain", "all")
self.check_field("baseline", "all")
self.check_field("samps_per_frame", "all")
# All required fields are present and valid. Perform DAC
self.e_p_signal = self.dac(expanded)
# Use e_p_signal to set fields
self.check_field("e_p_signal", "all")
self.sig_len = int(
len(self.e_p_signal[0]) / self.samps_per_frame[0]
)
self.n_sig = len(self.e_p_signal)
else:
if do_dac:
self.check_field("d_signal")
self.check_field("fmt", "all")
self.check_field("adc_gain", "all")
self.check_field("baseline", "all")
# All required fields are present and valid. Perform DAC
self.p_signal = self.dac()
# Use p_signal to set fields
self.check_field("p_signal")
self.sig_len = self.p_signal.shape[0]
self.n_sig = self.p_signal.shape[1]
def set_d_features(self, do_adc=False, single_fmt=True, expanded=False):
"""
Use properties of the digital signal field to set the following
features: n_sig, sig_len, init_value, checksum, and possibly
*(fmt, adc_gain, baseline).
Parameters
----------
do_adc : bools, optional
Whether to use the physical signal field to perform adc
conversion to get the digital signal field beforehand.
single_fmt : bool, optional
Whether to use a single digital format during adc, if it is
performed.
expanded : bool, optional
Whether to transform the `e_p_signal` attribute (True) or
the `p_signal` attribute (False).
Returns
-------
N/A
Notes
-----
Regarding adc conversion:
- If fmt is unset:
- Neither adc_gain nor baseline may be set. If the digital values
used to store the signal are known, then the file format should
also be known.
- The most appropriate fmt for the signals will be calculated and the
`fmt` attribute will be set. Given that neither `adc_gain` nor
`baseline` is allowed to be set, optimal values for those fields will
then be calculated and set as well.
- If fmt is set:
- If both adc_gain and baseline are unset, optimal values for those
fields will be calculated the fields will be set.
- If both adc_gain and baseline are set, the function will continue.
- If only one of adc_gain and baseline are set, this function will
raise an error. It makes no sense to know only one of those fields.
- ADC will occur after valid values for fmt, adc_gain, and baseline are
present, using all three fields.
"""
if expanded:
# adc is performed.
if do_adc:
self.check_field("e_p_signal", "all")
# If there is no fmt set it, adc_gain, and baseline
if self.fmt is None:
# Make sure that neither adc_gain nor baseline are set
if self.adc_gain is not None or self.baseline is not None:
raise Exception(
"If fmt is not set, gain and baseline may not be set either."
)
# Choose appropriate fmts based on estimated signal resolutions.
res = est_res(self.e_p_signal)
self.fmt = _wfdb_fmt(res, single_fmt)
# If there is a fmt set
else:
self.check_field("fmt", "all")
# Neither field set
if self.adc_gain is None and self.baseline is None:
# Calculate and set optimal gain and baseline values to convert physical signals
self.adc_gain, self.baseline = self.calc_adc_params()
# Exactly one field set
elif (self.adc_gain is None) ^ (self.baseline is None):
raise Exception(
"If fmt is set, gain and baseline should both be set or not set."
)
self.check_field("adc_gain", "all")
self.check_field("baseline", "all")
# All required fields are present and valid. Perform ADC
self.e_d_signal = self.adc(expanded)
# Use e_d_signal to set fields
self.check_field("e_d_signal", "all")
self.sig_len = int(
len(self.e_d_signal[0]) / self.samps_per_frame[0]
)
self.n_sig = len(self.e_d_signal)
self.init_value = [sig[0] for sig in self.e_d_signal]
self.checksum = self.calc_checksum(expanded)
else:
# adc is performed.
if do_adc:
self.check_field("p_signal")
# If there is no fmt set
if self.fmt is None:
# Make sure that neither adc_gain nor baseline are set
if self.adc_gain is not None or self.baseline is not None:
raise Exception(
"If fmt is not set, gain and baseline may not be set either."
)
# Choose appropriate fmts based on estimated signal resolutions.
res = est_res(self.p_signal)
self.fmt = _wfdb_fmt(res, single_fmt)
# Calculate and set optimal gain and baseline values to convert physical signals
self.adc_gain, self.baseline = self.calc_adc_params()
# If there is a fmt set
else:
self.check_field("fmt", "all")
# Neither field set
if self.adc_gain is None and self.baseline is None:
# Calculate and set optimal gain and baseline values to convert physical signals
self.adc_gain, self.baseline = self.calc_adc_params()
# Exactly one field set
elif (self.adc_gain is None) ^ (self.baseline is None):
raise Exception(
"If fmt is set, gain and baseline should both be set or not set."
)
self.check_field("adc_gain", "all")
self.check_field("baseline", "all")
# All required fields are present and valid. Perform ADC
self.d_signal = self.adc()
# Use d_signal to set fields
self.check_field("d_signal")
self.sig_len = self.d_signal.shape[0]
self.n_sig = self.d_signal.shape[1]
self.init_value = list(self.d_signal[0, :])
self.checksum = self.calc_checksum()
def adc(self, expanded=False, inplace=False):
"""
Performs analogue to digital conversion of the physical signal stored
in p_signal if expanded is False, or e_p_signal if expanded is True.
The p_signal/e_p_signal, fmt, gain, and baseline fields must all be
valid.
If inplace is True, the adc will be performed inplace on the variable,
the d_signal/e_d_signal attribute will be set, and the
p_signal/e_p_signal field will be set to None.
Parameters
----------
expanded : bool, optional
Whether to transform the `e_p_signal` attribute (True) or
the `p_signal` attribute (False).
inplace : bool, optional
Whether to automatically set the object's corresponding
digital signal attribute and set the physical
signal attribute to None (True), or to return the converted
signal as a separate variable without changing the original
physical signal attribute (False).
Returns
-------
d_signal : ndarray, optional
The digital conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
Examples:
---------
>>> import wfdb
>>> record = wfdb.rdsamp('sample-data/100')
>>> d_signal = record.adc()
>>> record.adc(inplace=True)
>>> record.dac(inplace=True)
"""
# The digital NAN values for each channel
d_nans = _digi_nan(self.fmt)
# To do: choose the minimum return res needed
intdtype = "int64"
# Convert a physical (1D or 2D) signal array to digital. Note that
# the input array is modified!
def adc_inplace(p_signal, adc_gain, baseline, d_nan):
nanlocs = np.isnan(p_signal)
np.multiply(p_signal, adc_gain, p_signal)
np.add(p_signal, baseline, p_signal)
np.round(p_signal, 0, p_signal)
np.copyto(p_signal, d_nan, where=nanlocs)
d_signal = p_signal.astype(intdtype, copy=False)
return d_signal
# Do inplace conversion and set relevant variables.
if inplace:
if expanded:
for ch, ch_p_signal in enumerate(self.e_p_signal):
ch_d_signal = adc_inplace(
ch_p_signal,
self.adc_gain[ch],
self.baseline[ch],
d_nans[ch],
)
self.e_p_signal[ch] = ch_d_signal
self.e_d_signal = self.e_p_signal
self.e_p_signal = None
else:
self.d_signal = adc_inplace(
self.p_signal,
self.adc_gain,
self.baseline,
d_nans,
)
self.p_signal = None
# Return the variable
else:
if expanded:
e_d_signal = []
for ch, ch_p_signal in enumerate(self.e_p_signal):
ch_d_signal = adc_inplace(
ch_p_signal.copy(),
self.adc_gain[ch],
self.baseline[ch],
d_nans[ch],
)
e_d_signal.append(ch_d_signal)
return e_d_signal
else:
return adc_inplace(
self.p_signal.copy(),
self.adc_gain,
self.baseline,
d_nans,
)
def dac(self, expanded=False, return_res=64, inplace=False):
"""
Performs the digital to analogue conversion of the signal stored
in `d_signal` if expanded is False, or `e_d_signal` if expanded
is True.
The d_signal/e_d_signal, fmt, gain, and baseline fields must all be
valid.
If inplace is True, the dac will be performed inplace on the
variable, the p_signal/e_p_signal attribute will be set, and the
d_signal/e_d_signal field will be set to None.
Parameters
----------
expanded : bool, optional
Whether to transform the `e_d_signal attribute` (True) or
the `d_signal` attribute (False).
return_res : int, optional
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
inplace : bool, optional
Whether to automatically set the object's corresponding
physical signal attribute and set the digital signal
attribute to None (True), or to return the converted
signal as a separate variable without changing the original
digital signal attribute (False).
Returns
-------
p_signal : ndarray, optional
The physical conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
Examples
--------
>>> import wfdb
>>> record = wfdb.rdsamp('sample-data/100', physical=False)
>>> p_signal = record.dac()
>>> record.dac(inplace=True)
>>> record.adc(inplace=True)
"""
# The digital NAN values for each channel
d_nans = _digi_nan(self.fmt)
# Get the appropriate float dtype
if return_res == 64:
floatdtype = "float64"
elif return_res == 32:
floatdtype = "float32"
else:
floatdtype = "float16"
# Do inplace conversion and set relevant variables.
if inplace:
if expanded:
for ch in range(self.n_sig):
# NAN locations for the channel
ch_nanlocs = self.e_d_signal[ch] == d_nans[ch]
self.e_d_signal[ch] = self.e_d_signal[ch].astype(
floatdtype, copy=False
)
np.subtract(
self.e_d_signal[ch],
self.baseline[ch],
self.e_d_signal[ch],
)
np.divide(
self.e_d_signal[ch],
self.adc_gain[ch],
self.e_d_signal[ch],
)
self.e_d_signal[ch][ch_nanlocs] = np.nan
self.e_p_signal = self.e_d_signal
self.e_d_signal = None
else:
nanlocs = self.d_signal == d_nans
# Do float conversion immediately to avoid potential under/overflow
# of efficient int dtype
self.d_signal = self.d_signal.astype(floatdtype, copy=False)
np.subtract(self.d_signal, self.baseline, self.d_signal)
np.divide(self.d_signal, self.adc_gain, self.d_signal)
self.d_signal[nanlocs] = np.nan
self.p_signal = self.d_signal
self.d_signal = None
# Return the variable
else:
if expanded:
p_signal = []
for ch in range(self.n_sig):
# NAN locations for the channel
ch_nanlocs = self.e_d_signal[ch] == d_nans[ch]
ch_p_signal = self.e_d_signal[ch].astype(
floatdtype, copy=False
)
np.subtract(ch_p_signal, self.baseline[ch], ch_p_signal)
np.divide(ch_p_signal, self.adc_gain[ch], ch_p_signal)
ch_p_signal[ch_nanlocs] = np.nan
p_signal.append(ch_p_signal)
else:
nanlocs = self.d_signal == d_nans
p_signal = self.d_signal.astype(floatdtype, copy=False)
np.subtract(p_signal, self.baseline, p_signal)
np.divide(p_signal, self.adc_gain, p_signal)
p_signal[nanlocs] = np.nan
return p_signal
def calc_adc_gain_baseline(self, ch, minvals, maxvals):
"""
Compute adc_gain and baseline parameters for a given channel.
Parameters
----------
ch: int
The channel that the adc_gain and baseline are being computed for.
minvals: list
The minimum values for each channel.
maxvals: list
The maximum values for each channel.
Returns
-------
adc_gain : float
Calculated `adc_gain` value for a given channel.
baseline : int
Calculated `baseline` value for a given channel.
Notes
-----
This is the mapping equation:
`digital - baseline / adc_gain = physical`
`physical * adc_gain + baseline = digital`
The original WFDB library stores `baseline` as int32.
Constrain abs(adc_gain) <= 2**31 == 2147483648.
This function does carefully deal with overflow for calculated
int32 `baseline` values, but does not consider over/underflow
for calculated float `adc_gain` values.
"""
# Get the minimum and maximum (valid) storage values
dmin, dmax = _digi_bounds(self.fmt[ch])
# add 1 because the lowest value is used to store nans
dmin = dmin + 1
pmin = minvals[ch]
pmax = maxvals[ch]
# Figure out digital samples used to store physical samples
# If the entire signal is NAN, gain/baseline won't be used
if pmin == np.nan:
adc_gain = 1
baseline = 1
# If the signal is just one value, store one digital value.
elif pmin == pmax:
if pmin == 0:
adc_gain = 1
baseline = 1
else:
# All digital values are +1 or -1. Keep adc_gain > 0
adc_gain = abs(1 / pmin)
baseline = 0
# Regular varied signal case.
else:
# The equation is: p = (d - b) / g
# Approximately, pmax maps to dmax, and pmin maps to
# dmin. Gradient will be equal to, or close to
# delta(d) / delta(p), since intercept baseline has
# to be an integer.
# Constraint: baseline must be between +/- 2**31
adc_gain = (dmax - dmin) / (pmax - pmin)
baseline = dmin - adc_gain * pmin
# Make adjustments for baseline to be an integer
# This up/down round logic of baseline is to ensure
# there is no overshoot of dmax. Now pmax will map
# to dmax or dmax-1 which is also fine.
if pmin > 0:
baseline = int(np.ceil(baseline))
else:
baseline = int(np.floor(baseline))
# After baseline is set, adjust gain correspondingly.Set
# the gain to map pmin to dmin, and p==0 to baseline.
# In the case where pmin == 0 and dmin == baseline,
# adc_gain is already correct. Avoid dividing by 0.
if dmin != baseline:
adc_gain = (dmin - baseline) / pmin
# Remap signal if baseline exceeds boundaries.
# This may happen if pmax < 0
if baseline > MAX_I32:
# pmin maps to dmin, baseline maps to 2**31 - 1
# pmax will map to a lower value than before
adc_gain = (MAX_I32) - dmin / abs(pmin)
baseline = MAX_I32
# This may happen if pmin > 0
elif baseline < MIN_I32:
# pmax maps to dmax, baseline maps to -2**31 + 1
adc_gain = (dmax - MIN_I32) / pmax
baseline = MIN_I32
return adc_gain, baseline
def calc_adc_params(self):
"""
Compute appropriate adc_gain and baseline parameters for adc
conversion, given the physical signal and the fmts.
Parameters
----------
N/A
Returns
-------
adc_gains : list
List of calculated `adc_gain` values for each channel.
baselines : list
List of calculated `baseline` values for each channel
"""
adc_gains = []
baselines = []
if self.p_signal is not None:
if np.where(np.isinf(self.p_signal))[0].size:
raise ValueError("Signal contains inf. Cannot perform adc.")
# min and max ignoring nans, unless whole channel is NAN.
# Should suppress warning message.
minvals = np.nanmin(self.p_signal, axis=0)
maxvals = np.nanmax(self.p_signal, axis=0)
for ch in range(np.shape(self.p_signal)[1]):
adc_gain, baseline = self.calc_adc_gain_baseline(
ch, minvals, maxvals
)
adc_gains.append(adc_gain)
baselines.append(baseline)
elif self.e_p_signal is not None:
minvals = []
maxvals = []
for ch in self.e_p_signal:
minvals.append(np.nanmin(ch))
maxvals.append(np.nanmax(ch))
if any(x == math.inf for x in minvals) or any(
x == math.inf for x in maxvals
):
raise ValueError("Signal contains inf. Cannot perform adc.")
for ch, _ in enumerate(self.e_p_signal):
adc_gain, baseline = self.calc_adc_gain_baseline(
ch, minvals, maxvals
)
adc_gains.append(adc_gain)
baselines.append(baseline)
else:
raise Exception(
"Must supply p_signal or e_p_signal to calc_adc_params"
)
return (adc_gains, baselines)
def convert_dtype(self, physical, return_res, smooth_frames):
"""
Convert the dtype of the signal.
Parameters
----------
physical : bool
Specifies whether to return dtype in physical (float) units in the
`p_signal` field (True), or digital (int) units in the `d_signal`
field (False).
return_res : int
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
smooth_frames : bool
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
Returns
-------
N/A
"""
if physical:
return_dtype = "float" + str(return_res)
if smooth_frames:
current_dtype = self.p_signal.dtype
if current_dtype != return_dtype:
self.p_signal = self.p_signal.astype(
return_dtype, copy=False
)
else:
for ch in range(self.n_sig):
if self.e_p_signal[ch].dtype != return_dtype:
self.e_p_signal[ch] = self.e_p_signal[ch].astype(
return_dtype, copy=False
)
else:
return_dtype = "int" + str(return_res)
if smooth_frames:
current_dtype = self.d_signal.dtype
if current_dtype != return_dtype:
# Do not allow changing integer dtype to lower value due to over/underflow
if int(str(current_dtype)[3:]) > int(str(return_dtype)[3:]):
raise Exception(
"Cannot convert digital samples to lower dtype. Risk of overflow/underflow."
)
self.d_signal = self.d_signal.astype(
return_dtype, copy=False
)
else:
for ch in range(self.n_sig):
current_dtype = self.e_d_signal[ch].dtype
if current_dtype != return_dtype:
# Do not allow changing integer dtype to lower value due to over/underflow
if int(str(current_dtype)[3:]) > int(
str(return_dtype)[3:]
):
raise Exception(
"Cannot convert digital samples to lower dtype. Risk of overflow/underflow."
)
self.e_d_signal[ch] = self.e_d_signal[ch].astype(
return_dtype, copy=False
)
return
def calc_checksum(self, expanded=False):
"""
Calculate the checksum(s) of the input signal.
Parameters
----------
expanded : bool, optional
Whether to transform the `e_d_signal` attribute (True) or
the `d_signal` attribute (False).
Returns
-------
cs : list
The resulting checksum-ed signal.
"""
if expanded:
cs = [int(np.sum(s) % 65536) for s in self.e_d_signal]
else:
cs = np.sum(self.d_signal, 0) % 65536
cs = [int(c) for c in cs]
return cs
def wr_dat_files(self, expanded=False, write_dir=""):
"""
Write each of the specified dat files.
Parameters
----------
expanded : bool, optional
Whether to transform the `e_d_signal` attribute (True) or
the `d_signal` attribute (False).
write_dir : str, optional
The directory to write the output file to.
Returns
-------
N/A
"""
# Get the set of dat files to be written, and
# the channels to be written to each file.
file_names, dat_channels = describe_list_indices(self.file_name)
# Get the fmt and byte offset corresponding to each dat file
DAT_FMTS = {}
dat_offsets = {}
for fn in file_names:
DAT_FMTS[fn] = self.fmt[dat_channels[fn][0]]
# byte_offset may not be present
if self.byte_offset is None:
dat_offsets[fn] = 0
else:
dat_offsets[fn] = self.byte_offset[dat_channels[fn][0]]
# Write the dat files
if expanded:
for fn in file_names:
wr_dat_file(
fn,
DAT_FMTS[fn],
None,
dat_offsets[fn],
True,
[self.e_d_signal[ch] for ch in dat_channels[fn]],
[self.samps_per_frame[ch] for ch in dat_channels[fn]],
write_dir=write_dir,
)
else:
dsig = self.d_signal
for fn in file_names:
wr_dat_file(
fn,
DAT_FMTS[fn],
dsig[:, dat_channels[fn][0] : dat_channels[fn][-1] + 1],
dat_offsets[fn],
write_dir=write_dir,
)
def smooth_frames(self, sigtype="physical"):
"""
Convert expanded signals with different samples/frame into
a uniform numpy array.
Parameters
----------
sigtype (default='physical') : str
Specifies whether to mooth the e_p_signal field ('physical'), or the e_d_signal
field ('digital').
Returns
-------
signal : ndarray
Tranformed expanded signal into uniform signal.
"""
spf = self.samps_per_frame[:]
for ch in range(len(spf)):
if spf[ch] is None:
spf[ch] = 1
# The output data type should be the smallest type that can
# represent any input sample value. The intermediate data type
# must be able to represent the sum of spf[ch] sample values.
if sigtype == "physical":
expanded_signal = self.e_p_signal
intermediate_dtype = np.dtype("float64")
allowed_dtypes = [
np.dtype("float32"),
np.dtype("float64"),
]
elif sigtype == "digital":
expanded_signal = self.e_d_signal
intermediate_dtype = np.dtype("int64")
allowed_dtypes = [
np.dtype("int8"),
np.dtype("int16"),
np.dtype("int32"),
np.dtype("int64"),
]
else:
raise ValueError("sigtype must be 'physical' or 'digital'")
n_sig = len(expanded_signal)
sig_len = len(expanded_signal[0]) // spf[0]
input_dtypes = set()
for ch in range(n_sig):
if len(expanded_signal[ch]) != sig_len * spf[ch]:
raise ValueError(
"length mismatch: signal %d has %d samples,"
" expected %dx%d"
% (ch, len(expanded_signal), sig_len, spf[ch])
)
input_dtypes.add(expanded_signal[ch].dtype)
for output_dtype in allowed_dtypes:
if all(dt <= output_dtype for dt in input_dtypes):
break
signal = np.empty((sig_len, n_sig), dtype=output_dtype)
# Large input arrays will be processed in chunks to avoid the need
# to allocate a single huge temporary array.
CHUNK_SIZE = 65536
for ch in range(n_sig):
if spf[ch] == 1:
signal[:, ch] = expanded_signal[ch]
else:
frames = expanded_signal[ch].reshape(-1, spf[ch])
for chunk_start in range(0, sig_len, CHUNK_SIZE):
chunk_end = chunk_start + CHUNK_SIZE
signal_sum = np.sum(
frames[chunk_start:chunk_end, :],
axis=1,
dtype=intermediate_dtype,
)
signal[chunk_start:chunk_end, ch] = signal_sum / spf[ch]
return signal
|
class SignalMixin(object):
'''
Mixin class with signal methods. Inherited by Record class.
Attributes
----------
N/A
'''
def wr_dats(self, expanded, write_dir):
'''
Write all dat files associated with a record
expanded=True to use e_d_signal instead of d_signal.
Parameters
----------
expanded : bool
Whether to transform the `e_d_signal` attribute (True) or
the `d_signal` attribute (False).
write_dir : str
The directory to write the output file to.
Returns
-------
N/A
'''
pass
def check_sig_cohesion(self, write_fields, expanded):
'''
Check the cohesion of the d_signal/e_d_signal field with the other
fields used to write the record.
Parameters
----------
write_fields : list
All the fields used to write the header.
expanded : bool
Whether to transform the `e_d_signal` attribute (True) or
the `d_signal` attribute (False).
Returns
-------
N/A
'''
pass
def set_p_features(self, do_dac=False, expanded=False):
'''
Use properties of the physical signal field to set the following
features: n_sig, sig_len.
Parameters
----------
do_dac : bool, optional
Whether to use the digital signal field to perform dac
conversion to get the physical signal field beforehand.
expanded : bool, optional
Whether to transform the `e_p_signal` attribute (True) or
the `p_signal` attribute (False). If True, the `samps_per_frame`
attribute is also required.
Returns
-------
N/A
Notes
-----
Regarding dac conversion:
- fmt, gain, and baseline must all be set in order to perform
dac.
- Unlike with adc, there is no way to infer these fields.
- Using the fmt, gain and baseline fields, dac is performed,
and (e_)p_signal is set.
*Developer note: Seems this function will be very infrequently used.
The set_d_features function seems far more useful.
'''
pass
def set_d_features(self, do_adc=False, single_fmt=True, expanded=False):
'''
Use properties of the digital signal field to set the following
features: n_sig, sig_len, init_value, checksum, and possibly
*(fmt, adc_gain, baseline).
Parameters
----------
do_adc : bools, optional
Whether to use the physical signal field to perform adc
conversion to get the digital signal field beforehand.
single_fmt : bool, optional
Whether to use a single digital format during adc, if it is
performed.
expanded : bool, optional
Whether to transform the `e_p_signal` attribute (True) or
the `p_signal` attribute (False).
Returns
-------
N/A
Notes
-----
Regarding adc conversion:
- If fmt is unset:
- Neither adc_gain nor baseline may be set. If the digital values
used to store the signal are known, then the file format should
also be known.
- The most appropriate fmt for the signals will be calculated and the
`fmt` attribute will be set. Given that neither `adc_gain` nor
`baseline` is allowed to be set, optimal values for those fields will
then be calculated and set as well.
- If fmt is set:
- If both adc_gain and baseline are unset, optimal values for those
fields will be calculated the fields will be set.
- If both adc_gain and baseline are set, the function will continue.
- If only one of adc_gain and baseline are set, this function will
raise an error. It makes no sense to know only one of those fields.
- ADC will occur after valid values for fmt, adc_gain, and baseline are
present, using all three fields.
'''
pass
def adc(self, expanded=False, inplace=False):
'''
Performs analogue to digital conversion of the physical signal stored
in p_signal if expanded is False, or e_p_signal if expanded is True.
The p_signal/e_p_signal, fmt, gain, and baseline fields must all be
valid.
If inplace is True, the adc will be performed inplace on the variable,
the d_signal/e_d_signal attribute will be set, and the
p_signal/e_p_signal field will be set to None.
Parameters
----------
expanded : bool, optional
Whether to transform the `e_p_signal` attribute (True) or
the `p_signal` attribute (False).
inplace : bool, optional
Whether to automatically set the object's corresponding
digital signal attribute and set the physical
signal attribute to None (True), or to return the converted
signal as a separate variable without changing the original
physical signal attribute (False).
Returns
-------
d_signal : ndarray, optional
The digital conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
Examples:
---------
>>> import wfdb
>>> record = wfdb.rdsamp('sample-data/100')
>>> d_signal = record.adc()
>>> record.adc(inplace=True)
>>> record.dac(inplace=True)
'''
pass
def adc_inplace(p_signal, adc_gain, baseline, d_nan):
pass
def dac(self, expanded=False, return_res=64, inplace=False):
'''
Performs the digital to analogue conversion of the signal stored
in `d_signal` if expanded is False, or `e_d_signal` if expanded
is True.
The d_signal/e_d_signal, fmt, gain, and baseline fields must all be
valid.
If inplace is True, the dac will be performed inplace on the
variable, the p_signal/e_p_signal attribute will be set, and the
d_signal/e_d_signal field will be set to None.
Parameters
----------
expanded : bool, optional
Whether to transform the `e_d_signal attribute` (True) or
the `d_signal` attribute (False).
return_res : int, optional
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
inplace : bool, optional
Whether to automatically set the object's corresponding
physical signal attribute and set the digital signal
attribute to None (True), or to return the converted
signal as a separate variable without changing the original
digital signal attribute (False).
Returns
-------
p_signal : ndarray, optional
The physical conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
Examples
--------
>>> import wfdb
>>> record = wfdb.rdsamp('sample-data/100', physical=False)
>>> p_signal = record.dac()
>>> record.dac(inplace=True)
>>> record.adc(inplace=True)
'''
pass
def calc_adc_gain_baseline(self, ch, minvals, maxvals):
'''
Compute adc_gain and baseline parameters for a given channel.
Parameters
----------
ch: int
The channel that the adc_gain and baseline are being computed for.
minvals: list
The minimum values for each channel.
maxvals: list
The maximum values for each channel.
Returns
-------
adc_gain : float
Calculated `adc_gain` value for a given channel.
baseline : int
Calculated `baseline` value for a given channel.
Notes
-----
This is the mapping equation:
`digital - baseline / adc_gain = physical`
`physical * adc_gain + baseline = digital`
The original WFDB library stores `baseline` as int32.
Constrain abs(adc_gain) <= 2**31 == 2147483648.
This function does carefully deal with overflow for calculated
int32 `baseline` values, but does not consider over/underflow
for calculated float `adc_gain` values.
'''
pass
def calc_adc_params(self):
'''
Compute appropriate adc_gain and baseline parameters for adc
conversion, given the physical signal and the fmts.
Parameters
----------
N/A
Returns
-------
adc_gains : list
List of calculated `adc_gain` values for each channel.
baselines : list
List of calculated `baseline` values for each channel
'''
pass
def convert_dtype(self, physical, return_res, smooth_frames):
'''
Convert the dtype of the signal.
Parameters
----------
physical : bool
Specifies whether to return dtype in physical (float) units in the
`p_signal` field (True), or digital (int) units in the `d_signal`
field (False).
return_res : int
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
smooth_frames : bool
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
Returns
-------
N/A
'''
pass
def calc_checksum(self, expanded=False):
'''
Calculate the checksum(s) of the input signal.
Parameters
----------
expanded : bool, optional
Whether to transform the `e_d_signal` attribute (True) or
the `d_signal` attribute (False).
Returns
-------
cs : list
The resulting checksum-ed signal.
'''
pass
def wr_dat_files(self, expanded=False, write_dir=""):
'''
Write each of the specified dat files.
Parameters
----------
expanded : bool, optional
Whether to transform the `e_d_signal` attribute (True) or
the `d_signal` attribute (False).
write_dir : str, optional
The directory to write the output file to.
Returns
-------
N/A
'''
pass
def smooth_frames(self, sigtype="physical"):
'''
Convert expanded signals with different samples/frame into
a uniform numpy array.
Parameters
----------
sigtype (default='physical') : str
Specifies whether to mooth the e_p_signal field ('physical'), or the e_d_signal
field ('digital').
Returns
-------
signal : ndarray
Tranformed expanded signal into uniform signal.
'''
pass
| 14 | 13 | 75 | 8 | 39 | 28 | 8 | 0.75 | 1 | 9 | 0 | 0 | 12 | 11 | 12 | 12 | 988 | 125 | 494 | 84 | 480 | 369 | 319 | 84 | 305 | 22 | 1 | 5 | 104 |
147,711 |
MIT-LCP/wfdb-python
|
wfdb/io/_header.py
|
wfdb.io._header.MultiHeaderMixin
|
class MultiHeaderMixin(BaseHeaderMixin):
"""
Mixin class with multi-segment header methods. Inherited by
MultiRecord class.
Attributes
----------
N/A
"""
n_seg: int
seg_len: Sequence[int]
segments: Optional[Sequence]
def set_defaults(self):
"""
Set defaults for fields needed to write the header if they have
defaults. This is NOT called by rdheader. It is only called by the
gateway wrsamp for convenience. It is also not called by wrheader since
it is supposed to be an explicit function. Not responsible for
initializing the attributes. That is done by the constructor.
Parameters
----------
N/A
Returns
-------
N/A
"""
for field in self.get_write_fields():
self.set_default(field)
def wrheader(self, write_dir=""):
"""
Write a multi-segment WFDB header file. The signals or segments are
not used. Before writing:
- Get the fields used to write the header for this instance.
- Check each required field.
- Check that the fields are cohesive with one another.
Parameters
----------
write_dir : str, optional
The output directory in which the header is written.
Returns
-------
N/A
Notes
-----
This function does NOT call `set_defaults`. Essential fields
must be set beforehand.
"""
# Get all the fields used to write the header
write_fields = self.get_write_fields()
# Check the validity of individual fields used to write the header
for field in write_fields:
self.check_field(field)
# Check the cohesion of fields used to write the header
self.check_field_cohesion()
# Write the header file using the specified fields
self.wr_header_file(write_fields, write_dir)
def get_write_fields(self):
"""
Get the list of fields used to write the multi-segment header.
Parameters
----------
N/A
Returns
-------
write_fields : list
All the default required fields, the user defined fields,
and their dependencies.
"""
# Record specification fields
write_fields = self.get_write_subset("record")
# Segment specification fields are all mandatory
write_fields = write_fields + ["seg_name", "seg_len"]
# Comments
if self.comments != None:
write_fields.append("comments")
return write_fields
def set_default(self, field):
"""
Set a field to its default value if there is a default.
Parameters
----------
field : str
The desired attribute of the object.
Returns
-------
N/A
"""
# Record specification fields
if field in RECORD_SPECS:
# Return if no default to set, or if the field is already present.
if (
RECORD_SPECS[field].write_def is None
or getattr(self, field) is not None
):
return
setattr(self, field, RECORD_SPECS[field].write_def)
def check_field_cohesion(self):
"""
Check the cohesion of fields used to write the header.
Parameters
----------
N/A
Returns
-------
N/A
"""
# The length of seg_name and seg_len must match n_seg
for f in ["seg_name", "seg_len"]:
if len(getattr(self, f)) != self.n_seg:
raise ValueError(
"The length of field: " + f + " does not match field n_seg."
)
# Check the sum of the 'seg_len' fields against 'sig_len'
if np.sum(self.seg_len) != self.sig_len:
raise ValueError(
"The sum of the 'seg_len' fields do not match the 'sig_len' field"
)
def wr_header_file(self, write_fields, write_dir):
"""
Write a header file using the specified fields.
Parameters
----------
write_fields : list
All the default required fields, the user defined fields,
and their dependencies.
write_dir : str
The output directory in which the header is written.
Returns
-------
N/A
"""
# Create record specification line
record_line = ""
# Traverse the ordered dictionary
for field in RECORD_SPECS.index:
# If the field is being used, add it with its delimiter
if field in write_fields:
record_line += RECORD_SPECS.loc[field, "delimiter"] + str(
getattr(self, field)
)
header_lines = [record_line]
# Create segment specification lines
segment_lines = self.n_seg * [""]
# For both fields, add each of its elements with the delimiter
# to the appropriate line
for field in SEGMENT_SPECS.index:
for seg_num in range(self.n_seg):
segment_lines[seg_num] += SEGMENT_SPECS.loc[
field, "delimiter"
] + str(getattr(self, field)[seg_num])
header_lines = header_lines + segment_lines
# Create comment lines (if any)
if "comments" in write_fields:
comment_lines = ["# " + comment for comment in self.comments]
header_lines += comment_lines
util.lines_to_file(self.record_name + ".hea", write_dir, header_lines)
def get_sig_segments(self, sig_name=None):
"""
Get a list of the segment numbers that contain a particular signal
(or a dictionary of segment numbers for a list of signals).
Only works if information about the segments has been read in.
Parameters
----------
sig_name : str, list
The name of the signals to be segmented.
Returns
-------
sig_dict : dict
Segments for each desired signal.
sig_segs : list
Segments for the desired signal.
"""
if self.segments is None:
raise Exception(
"The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rsegment_fieldsments=True"
)
# Default value = all signal names.
if sig_name is None:
sig_name = self.get_sig_name()
if isinstance(sig_name, list):
sig_dict = {}
for sig in sig_name:
sig_dict[sig] = self.get_sig_segments(sig)
return sig_dict
elif isinstance(sig_name, str):
sig_segs = []
for i in range(self.n_seg):
if (
self.seg_name[i] != "~"
and sig_name in self.segments[i].sig_name
):
sig_segs.append(i)
return sig_segs
else:
raise TypeError("sig_name must be a string or a list of strings")
def get_sig_name(self):
"""
Get the signal names for the entire record.
Parameters
----------
N/A
Returns
-------
sig_name : str, list
The name of the signals to be segmented.
"""
if self.segments is None:
raise Exception(
"The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rd_segments=True"
)
if self.layout == "fixed":
for i in range(self.n_seg):
if self.seg_name[i] != "~":
sig_name = self.segments[i].sig_name
break
else:
sig_name = self.segments[0].sig_name
return sig_name
def contained_ranges(self, sig_name: str) -> List[Tuple[int, int]]:
"""
Given a signal name, return the sample ranges that contain signal values,
relative to the start of the full record. Does not account for NaNs/missing
values.
This function is mainly useful for variable layout records, but can also be
used for fixed-layout records. Only works if the headers from the individual
segment records have already been read in.
Parameters
----------
sig_name : str
The name of the signal to query.
Returns
-------
ranges : List[Tuple[int, int]]
Tuple pairs which specify thee sample ranges in which the signal is contained.
The second value of each tuple pair will be one beyond the signal index.
eg. A length 1000 signal would generate a tuple of: (0, 1000), allowing
selection using signal[0:1000].
"""
if self.segments is None:
raise Exception(
"The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rd_segments=True"
)
ranges = []
seg_start = 0
range_start = None
# TODO: Add shortcut for fixed-layout records
# Cannot process segments only because missing segments are None
# and do not contain length information.
for seg_num in range(self.n_seg):
seg_len = self.seg_len[seg_num]
segment = self.segments[seg_num]
if seg_len == 0:
continue
# Open signal range
if (
range_start is None
and segment is not None
and sig_name in segment.sig_name
):
range_start = seg_start
# Close signal range
elif range_start is not None and (
segment is None or sig_name not in segment.sig_name
):
ranges.append((range_start, seg_start))
range_start = None
seg_start += seg_len
# Account for final segment
if range_start is not None:
ranges.append((range_start, seg_start))
return ranges
def contained_combined_ranges(
self,
sig_names: Sequence[str],
) -> List[Tuple[int, int]]:
"""
Given a collection of signal name, return the sample ranges that
contain all of the specified signals, relative to the start of the
full record. Does not account for NaNs/missing values.
This function is mainly useful for variable layout records, but can also be
used for fixed-layout records. Only works if the headers from the individual
segment records have already been read in.
Parameters
----------
sig_names : List[str]
The names of the signals to query.
Returns
-------
ranges : List[Tuple[int, int]]
Tuple pairs which specify thee sample ranges in which the signal is contained.
The second value of each tuple pair will be one beyond the signal index.
eg. A length 1000 signal would generate a tuple of: (0, 1000), allowing
selection using signal[0:1000].
"""
# TODO: Add shortcut for fixed-layout records
if len(sig_names) == 0:
return []
combined_ranges = self.contained_ranges(sig_names[0])
if len(sig_names) > 1:
for name in sig_names[1:]:
combined_ranges = util.overlapping_ranges(
combined_ranges, self.contained_ranges(name)
)
return combined_ranges
|
class MultiHeaderMixin(BaseHeaderMixin):
'''
Mixin class with multi-segment header methods. Inherited by
MultiRecord class.
Attributes
----------
N/A
'''
def set_defaults(self):
'''
Set defaults for fields needed to write the header if they have
defaults. This is NOT called by rdheader. It is only called by the
gateway wrsamp for convenience. It is also not called by wrheader since
it is supposed to be an explicit function. Not responsible for
initializing the attributes. That is done by the constructor.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def wrheader(self, write_dir=""):
'''
Write a multi-segment WFDB header file. The signals or segments are
not used. Before writing:
- Get the fields used to write the header for this instance.
- Check each required field.
- Check that the fields are cohesive with one another.
Parameters
----------
write_dir : str, optional
The output directory in which the header is written.
Returns
-------
N/A
Notes
-----
This function does NOT call `set_defaults`. Essential fields
must be set beforehand.
'''
pass
def get_write_fields(self):
'''
Get the list of fields used to write the multi-segment header.
Parameters
----------
N/A
Returns
-------
write_fields : list
All the default required fields, the user defined fields,
and their dependencies.
'''
pass
def set_defaults(self):
'''
Set a field to its default value if there is a default.
Parameters
----------
field : str
The desired attribute of the object.
Returns
-------
N/A
'''
pass
def check_field_cohesion(self):
'''
Check the cohesion of fields used to write the header.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def wr_header_file(self, write_fields, write_dir):
'''
Write a header file using the specified fields.
Parameters
----------
write_fields : list
All the default required fields, the user defined fields,
and their dependencies.
write_dir : str
The output directory in which the header is written.
Returns
-------
N/A
'''
pass
def get_sig_segments(self, sig_name=None):
'''
Get a list of the segment numbers that contain a particular signal
(or a dictionary of segment numbers for a list of signals).
Only works if information about the segments has been read in.
Parameters
----------
sig_name : str, list
The name of the signals to be segmented.
Returns
-------
sig_dict : dict
Segments for each desired signal.
sig_segs : list
Segments for the desired signal.
'''
pass
def get_sig_name(self):
'''
Get the signal names for the entire record.
Parameters
----------
N/A
Returns
-------
sig_name : str, list
The name of the signals to be segmented.
'''
pass
def contained_ranges(self, sig_name: str) -> List[Tuple[int, int]]:
'''
Given a signal name, return the sample ranges that contain signal values,
relative to the start of the full record. Does not account for NaNs/missing
values.
This function is mainly useful for variable layout records, but can also be
used for fixed-layout records. Only works if the headers from the individual
segment records have already been read in.
Parameters
----------
sig_name : str
The name of the signal to query.
Returns
-------
ranges : List[Tuple[int, int]]
Tuple pairs which specify thee sample ranges in which the signal is contained.
The second value of each tuple pair will be one beyond the signal index.
eg. A length 1000 signal would generate a tuple of: (0, 1000), allowing
selection using signal[0:1000].
'''
pass
def contained_combined_ranges(
self,
sig_names: Sequence[str],
) -> List[Tuple[int, int]]:
'''
Given a collection of signal name, return the sample ranges that
contain all of the specified signals, relative to the start of the
full record. Does not account for NaNs/missing values.
This function is mainly useful for variable layout records, but can also be
used for fixed-layout records. Only works if the headers from the individual
segment records have already been read in.
Parameters
----------
sig_names : List[str]
The names of the signals to query.
Returns
-------
ranges : List[Tuple[int, int]]
Tuple pairs which specify thee sample ranges in which the signal is contained.
The second value of each tuple pair will be one beyond the signal index.
eg. A length 1000 signal would generate a tuple of: (0, 1000), allowing
selection using signal[0:1000].
'''
pass
| 11 | 11 | 35 | 6 | 13 | 16 | 4 | 1.29 | 1 | 7 | 0 | 1 | 10 | 0 | 10 | 11 | 376 | 73 | 133 | 39 | 119 | 171 | 98 | 36 | 87 | 8 | 2 | 3 | 43 |
147,712 |
MIT-LCP/wfdb-python
|
wfdb/io/_header.py
|
wfdb.io._header.HeaderMixin
|
class HeaderMixin(BaseHeaderMixin):
"""
Mixin class with single-segment header methods. Inherited by Record class.
Attributes
----------
N/A
"""
def set_defaults(self):
"""
Set defaults for fields needed to write the header if they have
defaults.
Parameters
----------
N/A
Returns
-------
N/A
Notes
-----
- This is NOT called by `rdheader`. It is only automatically
called by the gateway `wrsamp` for convenience.
- This is also not called by `wrheader` since it is supposed to
be an explicit function.
- This is not responsible for initializing the attributes. That
is done by the constructor.
See also `set_p_features` and `set_d_features`.
"""
rfields, sfields = self.get_write_fields()
for f in rfields:
self.set_default(f)
for f in sfields:
self.set_default(f)
def wrheader(self, write_dir="", expanded=True):
"""
Write a WFDB header file. The signals are not used. Before
writing:
- Get the fields used to write the header for this instance.
- Check each required field.
- Check that the fields are cohesive with one another.
Parameters
----------
write_dir : str, optional
The output directory in which the header is written.
expanded : bool, optional
Whether the header file should include `samps_per_frame` (this
should only be true if the signal files are written using
`expanded=True`).
Returns
-------
N/A
Notes
-----
This function does NOT call `set_defaults`. Essential fields
must be set beforehand.
"""
# Get all the fields used to write the header
# sig_write_fields is a dictionary of
# {field_name:required_channels}
rec_write_fields, sig_write_fields = self.get_write_fields()
if not expanded:
sig_write_fields.pop("samps_per_frame", None)
# Check the validity of individual fields used to write the header
# Record specification fields (and comments)
for field in rec_write_fields:
self.check_field(field)
# Signal specification fields.
for field in sig_write_fields:
self.check_field(field, required_channels=sig_write_fields[field])
# Check the cohesion of fields used to write the header
self.check_field_cohesion(rec_write_fields, list(sig_write_fields))
# Write the header file using the specified fields
self.wr_header_file(rec_write_fields, sig_write_fields, write_dir)
def get_write_fields(self):
"""
Get the list of fields used to write the header, separating
record and signal specification fields. Returns the default
required fields, the user defined fields, and their dependencies.
Does NOT include `d_signal` or `e_d_signal`.
Parameters
----------
N/A
Returns
-------
rec_write_fields : list
Record specification fields to be written. Includes
'comment' if present.
sig_write_fields : dict
Dictionary of signal specification fields to be written,
with values equal to the channels that need to be present
for each field.
"""
# Record specification fields
rec_write_fields = self.get_write_subset("record")
# Add comments if any
if self.comments != None:
rec_write_fields.append("comments")
# Get required signal fields if signals are present.
self.check_field("n_sig")
if self.n_sig > 0:
sig_write_fields = self.get_write_subset("signal")
else:
sig_write_fields = None
return rec_write_fields, sig_write_fields
def _auto_signal_file_names(self):
fmt = self.fmt or [None] * self.n_sig
spf = self.samps_per_frame or [None] * self.n_sig
num_groups = 0
group_number = []
prev_fmt = prev_spf = None
channels_in_group = 0
for ch_fmt, ch_spf in zip(fmt, spf):
if ch_fmt != prev_fmt:
num_groups += 1
channels_in_group = 0
elif ch_fmt in ("508", "516", "524"):
if channels_in_group >= 8 or ch_spf != prev_spf:
num_groups += 1
channels_in_group = 0
group_number.append(num_groups)
channels_in_group += 1
prev_fmt = ch_fmt
prev_spf = ch_spf
if num_groups < 2:
return [self.record_name + ".dat"] * self.n_sig
else:
digits = len(str(group_number[-1]))
return [
self.record_name + "_" + str(g).rjust(digits, "0") + ".dat"
for g in group_number
]
def set_default(self, field):
"""
Set the object's attribute to its default value if it is missing
and there is a default. Not responsible for initializing the
attribute. That is done by the constructor.
Parameters
----------
field : str
The desired attribute of the object.
Returns
-------
N/A
"""
# Record specification fields
if field in RECORD_SPECS.index:
# Return if no default to set, or if the field is already
# present.
if (
RECORD_SPECS.loc[field, "write_default"] is None
or getattr(self, field) is not None
):
return
setattr(self, field, RECORD_SPECS.loc[field, "write_default"])
# Signal specification fields
# Setting entire list default, not filling in blanks in lists.
elif field in SIGNAL_SPECS.index:
# Specific dynamic case
if field == "file_name" and self.file_name is None:
self.file_name = self._auto_signal_file_names()
return
item = getattr(self, field)
# Return if no default to set, or if the field is already
# present.
if (
SIGNAL_SPECS.loc[field, "write_default"] is None
or item is not None
):
return
# Set more specific defaults if possible
if field == "adc_res" and self.fmt is not None:
self.adc_res = _signal._fmt_res(self.fmt)
return
setattr(
self,
field,
[SIGNAL_SPECS.loc[field, "write_default"]] * self.n_sig,
)
def check_field_cohesion(self, rec_write_fields, sig_write_fields):
"""
Check the cohesion of fields used to write the header.
Parameters
----------
rec_write_fields : list
List of record specification fields to write.
sig_write_fields : dict
Dictionary of signal specification fields to write, values
being equal to a list of channels to write for each field.
Returns
-------
N/A
"""
# If there are no signal specification fields, there is nothing to check.
if self.n_sig > 0:
# The length of all signal specification fields must match n_sig
# even if some of its elements are None.
for f in sig_write_fields:
if len(getattr(self, f)) != self.n_sig:
raise ValueError(
"The length of field: " + f + " must match field n_sig."
)
# Each file_name must correspond to only one fmt, (and only one byte offset if defined).
datfmts = {}
for ch in range(self.n_sig):
if self.file_name[ch] not in datfmts:
datfmts[self.file_name[ch]] = self.fmt[ch]
else:
if datfmts[self.file_name[ch]] != self.fmt[ch]:
raise ValueError(
"Each file_name (dat file) specified must have the same fmt"
)
datoffsets = {}
if self.byte_offset is not None:
# At least one byte offset value exists
for ch in range(self.n_sig):
if self.byte_offset[ch] is None:
continue
if self.file_name[ch] not in datoffsets:
datoffsets[self.file_name[ch]] = self.byte_offset[ch]
else:
if (
datoffsets[self.file_name[ch]]
!= self.byte_offset[ch]
):
raise ValueError(
"Each file_name (dat file) specified must have the same byte offset"
)
def wr_header_file(self, rec_write_fields, sig_write_fields, write_dir):
"""
Write a header file using the specified fields. Converts Record
attributes into appropriate WFDB format strings.
Parameters
----------
rec_write_fields : list
List of record specification fields to write.
sig_write_fields : dict
Dictionary of signal specification fields to write, values
being equal to a list of channels to write for each field.
write_dir : str
The directory in which to write the header file.
Returns
-------
N/A
"""
# Create record specification line
record_line = ""
# Traverse the ordered dictionary
for field in RECORD_SPECS.index:
# If the field is being used, add it with its delimiter
if field in rec_write_fields:
string_field = str(getattr(self, field))
# Certain fields need extra processing
if field == "fs" and isinstance(self.fs, float):
if round(self.fs, 8) == float(int(self.fs)):
string_field = str(int(self.fs))
elif field == "base_time" and "." in string_field:
string_field = string_field.rstrip("0")
elif field == "base_date":
string_field = "/".join(
(string_field[8:], string_field[5:7], string_field[:4])
)
record_line += (
RECORD_SPECS.loc[field, "delimiter"] + string_field
)
# The 'base_counter' field needs to be closed with ')'
if field == "base_counter":
record_line += ")"
header_lines = [record_line]
# Create signal specification lines (if any) one channel at a time
if self.n_sig > 0:
signal_lines = self.n_sig * [""]
for ch in range(self.n_sig):
# Traverse the signal fields
for field in SIGNAL_SPECS.index:
# If the field is being used, add each of its
# elements with the delimiter to the appropriate
# line
if (
field in sig_write_fields
and ch in sig_write_fields[field]
):
signal_lines[ch] += SIGNAL_SPECS.loc[
field, "delimiter"
] + str(getattr(self, field)[ch])
# The 'baseline' field needs to be closed with ')'
if field == "baseline":
signal_lines[ch] += ")"
header_lines += signal_lines
# Create comment lines (if any)
if "comments" in rec_write_fields:
comment_lines = ["# " + comment for comment in self.comments]
header_lines += comment_lines
util.lines_to_file(self.record_name + ".hea", write_dir, header_lines)
|
class HeaderMixin(BaseHeaderMixin):
'''
Mixin class with single-segment header methods. Inherited by Record class.
Attributes
----------
N/A
'''
def set_defaults(self):
'''
Set defaults for fields needed to write the header if they have
defaults.
Parameters
----------
N/A
Returns
-------
N/A
Notes
-----
- This is NOT called by `rdheader`. It is only automatically
called by the gateway `wrsamp` for convenience.
- This is also not called by `wrheader` since it is supposed to
be an explicit function.
- This is not responsible for initializing the attributes. That
is done by the constructor.
See also `set_p_features` and `set_d_features`.
'''
pass
def wrheader(self, write_dir="", expanded=True):
'''
Write a WFDB header file. The signals are not used. Before
writing:
- Get the fields used to write the header for this instance.
- Check each required field.
- Check that the fields are cohesive with one another.
Parameters
----------
write_dir : str, optional
The output directory in which the header is written.
expanded : bool, optional
Whether the header file should include `samps_per_frame` (this
should only be true if the signal files are written using
`expanded=True`).
Returns
-------
N/A
Notes
-----
This function does NOT call `set_defaults`. Essential fields
must be set beforehand.
'''
pass
def get_write_fields(self):
'''
Get the list of fields used to write the header, separating
record and signal specification fields. Returns the default
required fields, the user defined fields, and their dependencies.
Does NOT include `d_signal` or `e_d_signal`.
Parameters
----------
N/A
Returns
-------
rec_write_fields : list
Record specification fields to be written. Includes
'comment' if present.
sig_write_fields : dict
Dictionary of signal specification fields to be written,
with values equal to the channels that need to be present
for each field.
'''
pass
def _auto_signal_file_names(self):
pass
def set_defaults(self):
'''
Set the object's attribute to its default value if it is missing
and there is a default. Not responsible for initializing the
attribute. That is done by the constructor.
Parameters
----------
field : str
The desired attribute of the object.
Returns
-------
N/A
'''
pass
def check_field_cohesion(self, rec_write_fields, sig_write_fields):
'''
Check the cohesion of fields used to write the header.
Parameters
----------
rec_write_fields : list
List of record specification fields to write.
sig_write_fields : dict
Dictionary of signal specification fields to write, values
being equal to a list of channels to write for each field.
Returns
-------
N/A
'''
pass
def wr_header_file(self, rec_write_fields, sig_write_fields, write_dir):
'''
Write a header file using the specified fields. Converts Record
attributes into appropriate WFDB format strings.
Parameters
----------
rec_write_fields : list
List of record specification fields to write.
sig_write_fields : dict
Dictionary of signal specification fields to write, values
being equal to a list of channels to write for each field.
write_dir : str
The directory in which to write the header file.
Returns
-------
N/A
'''
pass
| 8 | 7 | 47 | 7 | 21 | 20 | 7 | 0.97 | 1 | 7 | 0 | 1 | 7 | 2 | 7 | 8 | 347 | 55 | 149 | 36 | 141 | 144 | 110 | 36 | 102 | 14 | 2 | 5 | 49 |
147,713 |
MIT-LCP/wfdb-python
|
wfdb/io/_header.py
|
wfdb.io._header.BaseHeaderMixin
|
class BaseHeaderMixin(object):
"""
Mixin class with multi-segment header methods. Inherited by Record and
MultiRecord classes.
Attributes
----------
N/A
"""
def get_write_subset(self, spec_type):
"""
Get a set of fields used to write the header; either 'record'
or 'signal' specification fields. Helper function for
`get_write_fields`. Gets the default required fields, the user
defined fields, and their dependencies.
Parameters
----------
spec_type : str
The set of specification fields desired. Either 'record' or
'signal'.
Returns
-------
write_fields : list or dict
For record fields, returns a list of all fields needed. For
signal fields, it returns a dictionary of all fields needed,
with keys = field and value = list of channels that must be
present for the field.
"""
if spec_type == "record":
write_fields = []
record_specs = RECORD_SPECS.copy()
# Remove the n_seg requirement for single segment items
if not hasattr(self, "n_seg"):
record_specs.drop("n_seg", inplace=True)
for field in record_specs.index[-1::-1]:
# Continue if the field has already been included
if field in write_fields:
continue
# If the field is required by default or has been
# defined by the user
if (
record_specs.loc[field, "write_required"]
or getattr(self, field) is not None
):
req_field = field
# Add the field and its recursive dependencies
while req_field is not None:
write_fields.append(req_field)
req_field = record_specs.loc[req_field, "dependency"]
# Add comments if any
if getattr(self, "comments") is not None:
write_fields.append("comments")
# signal spec field. Need to return a potentially different list for each channel.
elif spec_type == "signal":
# List of lists for each channel
write_fields = []
signal_specs = SIGNAL_SPECS.copy()
for ch in range(self.n_sig):
# The fields needed for this channel
write_fields_ch = []
for field in signal_specs.index[-1::-1]:
if field in write_fields_ch:
continue
item = getattr(self, field)
# If the field is required by default or has been defined by the user
if signal_specs.loc[field, "write_required"] or (
item is not None and item[ch] is not None
):
req_field = field
# Add the field and its recursive dependencies
while req_field is not None:
write_fields_ch.append(req_field)
req_field = signal_specs.loc[
req_field, "dependency"
]
write_fields.append(write_fields_ch)
# Convert the list of lists to a single dictionary.
# keys = field and value = list of channels in which the
# field is required.
dict_write_fields = {}
# For fields present in any channel:
for field in set(
[i for write_fields_ch in write_fields for i in write_fields_ch]
):
dict_write_fields[field] = []
for ch in range(self.n_sig):
if field in write_fields[ch]:
dict_write_fields[field].append(ch)
write_fields = dict_write_fields
return write_fields
|
class BaseHeaderMixin(object):
'''
Mixin class with multi-segment header methods. Inherited by Record and
MultiRecord classes.
Attributes
----------
N/A
'''
def get_write_subset(self, spec_type):
'''
Get a set of fields used to write the header; either 'record'
or 'signal' specification fields. Helper function for
`get_write_fields`. Gets the default required fields, the user
defined fields, and their dependencies.
Parameters
----------
spec_type : str
The set of specification fields desired. Either 'record' or
'signal'.
Returns
-------
write_fields : list or dict
For record fields, returns a list of all fields needed. For
signal fields, it returns a dictionary of all fields needed,
with keys = field and value = list of channels that must be
present for the field.
'''
pass
| 2 | 2 | 95 | 14 | 48 | 33 | 17 | 0.82 | 1 | 2 | 0 | 2 | 1 | 0 | 1 | 1 | 106 | 17 | 49 | 11 | 47 | 40 | 39 | 11 | 37 | 17 | 1 | 5 | 17 |
147,714 |
MIT-LCP/wfdb-python
|
tests/test_url.py
|
tests.test_url.TestRemoteFLACFiles
|
class TestRemoteFLACFiles(unittest.TestCase):
"""
Test reading FLAC files over HTTP.
"""
def test_whole_file(self):
"""
Test reading a complete FLAC file using local and HTTP APIs.
This tests that we can read the file 'sample-data/flacformats.d2'
(a 24-bit FLAC stream) using the soundfile library, first by
reading the file from the local filesystem, and then using
wfdb.io._url.openurl() to access it through a simulated web server.
This is meant to verify that the soundfile library works using only
the standard Python file object API (as implemented by
wfdb.io._url.NetFile), and doesn't require the input file to be an
actual io.FileIO object.
Parameters
----------
N/A
Returns
-------
N/A
"""
import soundfile
import numpy as np
data_file_path = "sample-data/flacformats.d2"
expected_format = "FLAC"
expected_subtype = "PCM_24"
# Read the file using standard file I/O
sf1 = soundfile.SoundFile(data_file_path)
self.assertEqual(sf1.format, expected_format)
self.assertEqual(sf1.subtype, expected_subtype)
data1 = sf1.read()
# Read the file using HTTP
with open(data_file_path, "rb") as f:
file_content = {"/foo.dat": f.read()}
with DummyHTTPServer(file_content) as server:
url = server.url("/foo.dat")
file2 = wfdb.io._url.openurl(url, "rb")
sf2 = soundfile.SoundFile(file2)
self.assertEqual(sf2.format, expected_format)
self.assertEqual(sf2.subtype, expected_subtype)
data2 = sf2.read()
# Check that results are equal
np.testing.assert_array_equal(data1, data2)
|
class TestRemoteFLACFiles(unittest.TestCase):
'''
Test reading FLAC files over HTTP.
'''
def test_whole_file(self):
'''
Test reading a complete FLAC file using local and HTTP APIs.
This tests that we can read the file 'sample-data/flacformats.d2'
(a 24-bit FLAC stream) using the soundfile library, first by
reading the file from the local filesystem, and then using
wfdb.io._url.openurl() to access it through a simulated web server.
This is meant to verify that the soundfile library works using only
the standard Python file object API (as implemented by
wfdb.io._url.NetFile), and doesn't require the input file to be an
actual io.FileIO object.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
| 2 | 2 | 49 | 9 | 20 | 20 | 1 | 1.1 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 73 | 54 | 10 | 21 | 16 | 17 | 23 | 21 | 14 | 17 | 1 | 2 | 1 | 1 |
147,715 |
MIT-LCP/wfdb-python
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MIT-LCP_wfdb-python/wfdb/processing/qrs.py
|
wfdb.processing.qrs.XQRS.Conf
|
class Conf(object):
"""
Initial signal configuration object for this QRS detector.
Attributes
----------
hr_init : int, float, optional
Initial heart rate in beats per minute. Used for calculating
recent R-R intervals.
hr_max : int, float, optional
Hard maximum heart rate between two beats, in beats per
minute. Used for refractory period.
hr_min : int, float, optional
Hard minimum heart rate between two beats, in beats per
minute. Used for calculating recent R-R intervals.
qrs_width : int, float, optional
Expected QRS width in seconds. Used for filter widths
indirect refractory period.
qrs_thr_init : int, float, optional
Initial QRS detection threshold in mV. Use when learning
is False, or learning fails.
qrs_thr_min : int, float, string, optional
Hard minimum detection threshold of QRS wave. Leave as 0
for no minimum.
ref_period : int, float, optional
The QRS refractory period.
t_inspect_period : int, float, optional
The period below which a potential QRS complex is inspected to
see if it is a T-wave. Leave as 0 for no T-wave inspection.
"""
def __init__(
self,
hr_init=75,
hr_max=200,
hr_min=25,
qrs_width=0.1,
qrs_thr_init=0.13,
qrs_thr_min=0,
ref_period=0.2,
t_inspect_period=0,
):
if hr_min < 0:
raise ValueError("'hr_min' must be >= 0")
if not hr_min < hr_init < hr_max:
raise ValueError(
"'hr_min' < 'hr_init' < 'hr_max' must be True")
if qrs_thr_init < qrs_thr_min:
raise ValueError("qrs_thr_min must be <= qrs_thr_init")
self.hr_init = hr_init
self.hr_max = hr_max
self.hr_min = hr_min
self.qrs_width = qrs_width
self.qrs_radius = self.qrs_width / 2
self.qrs_thr_init = qrs_thr_init
self.qrs_thr_min = qrs_thr_min
self.ref_period = ref_period
self.t_inspect_period = t_inspect_period
|
class Conf(object):
'''
Initial signal configuration object for this QRS detector.
Attributes
----------
hr_init : int, float, optional
Initial heart rate in beats per minute. Used for calculating
recent R-R intervals.
hr_max : int, float, optional
Hard maximum heart rate between two beats, in beats per
minute. Used for refractory period.
hr_min : int, float, optional
Hard minimum heart rate between two beats, in beats per
minute. Used for calculating recent R-R intervals.
qrs_width : int, float, optional
Expected QRS width in seconds. Used for filter widths
indirect refractory period.
qrs_thr_init : int, float, optional
Initial QRS detection threshold in mV. Use when learning
is False, or learning fails.
qrs_thr_min : int, float, string, optional
Hard minimum detection threshold of QRS wave. Leave as 0
for no minimum.
ref_period : int, float, optional
The QRS refractory period.
t_inspect_period : int, float, optional
The period below which a potential QRS complex is inspected to
see if it is a T-wave. Leave as 0 for no T-wave inspection.
'''
def __init__(
self,
hr_init=75,
hr_max=200,
hr_min=25,
qrs_width=0.1,
qrs_thr_init=0.13,
qrs_thr_min=0,
ref_period=0.2,
t_inspect_period=0,
):
pass
| 2 | 1 | 29 | 3 | 26 | 0 | 4 | 1.04 | 1 | 1 | 0 | 0 | 1 | 9 | 1 | 1 | 61 | 6 | 27 | 21 | 15 | 28 | 17 | 11 | 15 | 4 | 1 | 1 | 4 |
147,716 |
MIT-LCP/wfdb-python
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MIT-LCP_wfdb-python/wfdb/processing/qrs.py
|
wfdb.processing.qrs.GQRS.Peak
|
class Peak(object):
"""
Holds all of the peak information for the QRS object.
Attributes
----------
peak_time : int, float
The time of the peak.
peak_amp : int, float
The amplitude of the peak.
peak_type : str
The type of the peak.
"""
def __init__(self, peak_time, peak_amp, peak_type):
self.time = peak_time
self.amp = peak_amp
self.type = peak_type
self.next_peak = None
self.prev_peak = None
|
class Peak(object):
'''
Holds all of the peak information for the QRS object.
Attributes
----------
peak_time : int, float
The time of the peak.
peak_amp : int, float
The amplitude of the peak.
peak_type : str
The type of the peak.
'''
def __init__(self, peak_time, peak_amp, peak_type):
pass
| 2 | 1 | 6 | 0 | 6 | 0 | 1 | 1.57 | 1 | 0 | 0 | 0 | 1 | 5 | 1 | 1 | 21 | 3 | 7 | 7 | 5 | 11 | 7 | 7 | 5 | 1 | 1 | 0 | 1 |
147,717 |
MIT-LCP/wfdb-python
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MIT-LCP_wfdb-python/wfdb/processing/qrs.py
|
wfdb.processing.qrs.GQRS.Conf
|
class Conf(object):
"""
Initial signal configuration object for this QRS detector.
Attributes
----------
fs : int, float
The sampling frequency of the input signal.
adc_gain : int, float
The analogue to digital gain of the signal (the number of adus per
physical unit).
hr : int, float, optional
Typical heart rate, in beats per minute.
RRdelta : int, float, optional
Typical difference between successive RR intervals in seconds.
RRmin : int, float, optional
Minimum RR interval ("refractory period"), in seconds.
RRmax : int, float, optional
Maximum RR interval, in seconds. Thresholds will be adjusted if no
peaks are detected within this interval.
QS : int, float, optional
Typical QRS duration, in seconds.
QT : int, float, optional
Typical QT interval, in seconds.
RTmin : int, float, optional
Minimum interval between R and T peaks, in seconds.
RTmax : int, float, optional
Maximum interval between R and T peaks, in seconds.
QRSa : int, float, optional
Typical QRS peak-to-peak amplitude, in microvolts.
QRSamin : int, float, optional
Minimum QRS peak-to-peak amplitude, in microvolts.
thresh : int, float, optional
The relative amplitude detection threshold. Used to initialize the peak
and QRS detection threshold.
"""
def __init__(
self,
fs,
adc_gain,
hr=75,
RRdelta=0.2,
RRmin=0.28,
RRmax=2.4,
QS=0.07,
QT=0.35,
RTmin=0.25,
RTmax=0.33,
QRSa=750,
QRSamin=130,
thresh=1.0,
):
self.fs = fs
self.sps = int(time_to_sample_number(1, fs))
self.spm = int(time_to_sample_number(60, fs))
self.hr = hr
self.RR = 60.0 / self.hr
self.RRdelta = RRdelta
self.RRmin = RRmin
self.RRmax = RRmax
self.QS = QS
self.QT = QT
self.RTmin = RTmin
self.RTmax = RTmax
self.QRSa = QRSa
self.QRSamin = QRSamin
self.thresh = thresh
self._NORMAL = 1 # normal beat
self._ARFCT = 16 # isolated QRS-like artifact
self._NOTE = 22 # comment annotation
self._TWAVE = 27 # T-wave peak
self._NPEAKS = 64 # number of peaks buffered (per signal)
self._BUFLN = 32768 # must be a power of 2, see qf()
self.rrmean = int(self.RR * self.sps)
self.rrdev = int(self.RRdelta * self.sps)
self.rrmin = int(self.RRmin * self.sps)
self.rrmax = int(self.RRmax * self.sps)
self.rrinc = int(self.rrmean / 40)
if self.rrinc < 1:
self.rrinc = 1
self.dt = int(self.QS * self.sps / 4)
if self.dt < 1:
raise Exception(
"Sampling rate is too low. Unable to use signal."
)
self.rtmin = int(self.RTmin * self.sps)
self.rtmean = int(0.75 * self.QT * self.sps)
self.rtmax = int(self.RTmax * self.sps)
dv = adc_gain * self.QRSamin * 0.001
self.pthr = int((self.thresh * dv * dv) / 6)
self.qthr = self.pthr << 1
self.pthmin = self.pthr >> 2
self.qthmin = int((self.pthmin << 2) / 3)
self.tamean = self.qthr # initial value for mean T-wave amplitude
# Filter constants and thresholds.
self.dt2 = 2 * self.dt
self.dt3 = 3 * self.dt
self.dt4 = 4 * self.dt
self.smdt = self.dt
self.v1norm = self.smdt * self.dt * 64
self.smt = 0
self.smt0 = 0 + self.smdt
|
class Conf(object):
'''
Initial signal configuration object for this QRS detector.
Attributes
----------
fs : int, float
The sampling frequency of the input signal.
adc_gain : int, float
The analogue to digital gain of the signal (the number of adus per
physical unit).
hr : int, float, optional
Typical heart rate, in beats per minute.
RRdelta : int, float, optional
Typical difference between successive RR intervals in seconds.
RRmin : int, float, optional
Minimum RR interval ("refractory period"), in seconds.
RRmax : int, float, optional
Maximum RR interval, in seconds. Thresholds will be adjusted if no
peaks are detected within this interval.
QS : int, float, optional
Typical QRS duration, in seconds.
QT : int, float, optional
Typical QT interval, in seconds.
RTmin : int, float, optional
Minimum interval between R and T peaks, in seconds.
RTmax : int, float, optional
Maximum interval between R and T peaks, in seconds.
QRSa : int, float, optional
Typical QRS peak-to-peak amplitude, in microvolts.
QRSamin : int, float, optional
Minimum QRS peak-to-peak amplitude, in microvolts.
thresh : int, float, optional
The relative amplitude detection threshold. Used to initialize the peak
and QRS detection threshold.
'''
def __init__(
self,
fs,
adc_gain,
hr=75,
RRdelta=0.2,
RRmin=0.28,
RRmax=2.4,
QS=0.07,
QT=0.35,
RTmin=0.25,
RTmax=0.33,
QRSa=750,
QRSamin=130,
thresh=1.0,
):
pass
| 2 | 1 | 77 | 11 | 65 | 8 | 3 | 0.64 | 1 | 2 | 0 | 0 | 1 | 42 | 1 | 1 | 115 | 14 | 66 | 60 | 49 | 42 | 49 | 45 | 47 | 3 | 1 | 1 | 3 |
147,718 |
MIT-LCP/wfdb-python
|
wfdb/io/datasource.py
|
wfdb.io.datasource.DataSourceType
|
class DataSourceType(Enum):
LOCAL = auto()
HTTP = auto()
|
class DataSourceType(Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
147,719 |
MIT-LCP/wfdb-python
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MIT-LCP_wfdb-python/wfdb/processing/qrs.py
|
wfdb.processing.qrs.GQRS.Annotation
|
class Annotation(object):
"""
Holds all of the annotation information for the QRS object.
Attributes
----------
ann_time : int, float
The time of the annotation.
ann_type : str
The type of the annotation.
ann_subtype : int
The subtype of the annotation.
ann_num : int
The number of the annotation.
"""
def __init__(self, ann_time, ann_type, ann_subtype, ann_num):
self.time = ann_time
self.type = ann_type
self.subtype = ann_subtype
self.num = ann_num
|
class Annotation(object):
'''
Holds all of the annotation information for the QRS object.
Attributes
----------
ann_time : int, float
The time of the annotation.
ann_type : str
The type of the annotation.
ann_subtype : int
The subtype of the annotation.
ann_num : int
The number of the annotation.
'''
def __init__(self, ann_time, ann_type, ann_subtype, ann_num):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 1 | 2.17 | 1 | 0 | 0 | 0 | 1 | 4 | 1 | 1 | 22 | 3 | 6 | 6 | 4 | 13 | 6 | 6 | 4 | 1 | 1 | 0 | 1 |
147,720 |
MIT-LCP/wfdb-python
|
tests/test_url.py
|
tests.test_url.TestNetFiles
|
class TestNetFiles(unittest.TestCase):
"""
Test accessing remote files.
"""
def test_requests(self):
"""
Test reading a remote file using various APIs.
This tests that we can create a file object using
wfdb.io._url.openurl(), and tests that the object implements
the standard Python API functions for a file of the
appropriate type.
Parameters
----------
N/A
Returns
-------
N/A
"""
text_data = """
BERNARDO: Who's there?
FRANCISCO: Nay, answer me: stand, and unfold yourself.
BERNARDO: Long live the king!
FRANCISCO: Bernardo?
BERNARDO: He.
FRANCISCO: You come most carefully upon your hour.
BERNARDO: 'Tis now struck twelve; get thee to bed, Francisco.
"""
binary_data = text_data.encode()
file_content = {"/foo.txt": binary_data}
# Test all possible combinations of:
# - whether or not the server supports compression
# - whether or not the server supports random access
# - chosen buffering policy
for allow_gzip in (False, True):
for allow_range in (False, True):
with DummyHTTPServer(
file_content=file_content,
allow_gzip=allow_gzip,
allow_range=allow_range,
) as server:
url = server.url("/foo.txt")
for buffering in (-2, -1, 0, 20):
self._test_text(url, text_data, buffering)
self._test_binary(url, binary_data, buffering)
def _test_text(self, url, content, buffering):
"""
Test reading a URL using text-mode file APIs.
Parameters
----------
url : str
URL of the remote resource.
content : str
Expected content of the resource.
buffering : int
Buffering policy for openurl().
Returns
-------
N/A
"""
# read(-1), readable(), seekable()
with wfdb.io._url.openurl(url, "r", buffering=buffering) as tf:
self.assertTrue(tf.readable())
self.assertTrue(tf.seekable())
self.assertEqual(tf.read(), content)
self.assertEqual(tf.read(), "")
# read(10)
with wfdb.io._url.openurl(url, "r", buffering=buffering) as tf:
result = ""
while True:
chunk = tf.read(10)
result += chunk
if len(chunk) < 10:
break
self.assertEqual(result, content)
# readline(), seek(), tell()
with wfdb.io._url.openurl(url, "r", buffering=buffering) as tf:
result = ""
while True:
rpos = tf.tell()
tf.seek(0)
tf.seek(rpos)
chunk = tf.readline()
result += chunk
if len(chunk) == 0:
break
self.assertEqual(result, content)
def _test_binary(self, url, content, buffering):
"""
Test reading a URL using binary-mode file APIs.
Parameters
----------
url : str
URL of the remote resource.
content : bytes
Expected content of the resource.
buffering : int
Buffering policy for openurl().
Returns
-------
N/A
"""
# read(-1), readable(), seekable()
with wfdb.io._url.openurl(url, "rb", buffering=buffering) as bf:
self.assertTrue(bf.readable())
self.assertTrue(bf.seekable())
self.assertEqual(bf.read(), content)
self.assertEqual(bf.read(), b"")
self.assertEqual(bf.tell(), len(content))
# read(10)
with wfdb.io._url.openurl(url, "rb", buffering=buffering) as bf:
result = b""
while True:
chunk = bf.read(10)
result += chunk
if len(chunk) < 10:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# readline()
with wfdb.io._url.openurl(url, "rb", buffering=buffering) as bf:
result = b""
while True:
chunk = bf.readline()
result += chunk
if len(chunk) == 0:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# read1(10), seek(), tell()
with wfdb.io._url.openurl(url, "rb", buffering=buffering) as bf:
bf.seek(0, 2)
self.assertEqual(bf.tell(), len(content))
bf.seek(0)
result = b""
while True:
rpos = bf.tell()
bf.seek(0)
bf.seek(rpos)
chunk = bf.read1(10)
result += chunk
if len(chunk) == 0:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# readinto(bytearray(10))
with wfdb.io._url.openurl(url, "rb", buffering=buffering) as bf:
result = b""
chunk = bytearray(10)
while True:
count = bf.readinto(chunk)
result += chunk[:count]
if count < 10:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# readinto1(bytearray(10))
with wfdb.io._url.openurl(url, "rb", buffering=buffering) as bf:
result = b""
chunk = bytearray(10)
while True:
count = bf.readinto1(chunk)
result += chunk[:count]
if count == 0:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
|
class TestNetFiles(unittest.TestCase):
'''
Test accessing remote files.
'''
def test_requests(self):
'''
Test reading a remote file using various APIs.
This tests that we can create a file object using
wfdb.io._url.openurl(), and tests that the object implements
the standard Python API functions for a file of the
appropriate type.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def _test_text(self, url, content, buffering):
'''
Test reading a URL using text-mode file APIs.
Parameters
----------
url : str
URL of the remote resource.
content : str
Expected content of the resource.
buffering : int
Buffering policy for openurl().
Returns
-------
N/A
'''
pass
def _test_binary(self, url, content, buffering):
'''
Test reading a URL using binary-mode file APIs.
Parameters
----------
url : str
URL of the remote resource.
content : bytes
Expected content of the resource.
buffering : int
Buffering policy for openurl().
Returns
-------
N/A
'''
pass
| 4 | 4 | 60 | 6 | 36 | 18 | 7 | 0.52 | 1 | 2 | 1 | 0 | 3 | 0 | 3 | 75 | 188 | 22 | 109 | 21 | 105 | 57 | 97 | 18 | 93 | 11 | 2 | 4 | 20 |
147,721 |
MIT-LCP/wfdb-python
|
wfdb/io/_url.py
|
wfdb.io._url.NetFile
|
class NetFile(io.BufferedIOBase):
"""
File object providing random access to a remote file over HTTP.
Attributes
----------
url : str
URL of the remote file.
buffering : int, optional
Buffering policy. If buffering = 0, internal buffering is
disabled; each operation on the stream requires a separate
request to the server. If buffering = -2, the entire file is
downloaded in a single request. If buffering > 0, it
specifies the minimum size of the internal buffer. If
buffering = -1, the default buffer size is used.
"""
def __init__(self, url, buffering=-1):
self.url = url
self.name = url
self.buffering = buffering
self._pos = 0
self._file_size = None
self._buffer = b""
self._buffer_start = 0
self._buffer_end = 0
self._current_url = self.url
def _read_buffered_range(self, start, end):
"""
Read a range of bytes from the internal buffer.
Parameters
----------
start : int
Starting byte offset of the desired range.
end : int
Ending byte offset of the desired range.
Returns
-------
data : memoryview
A memoryview of the given byte range.
"""
bstart = start - self._buffer_start
bend = end - self._buffer_start
if 0 <= bstart <= bend:
return memoryview(self._buffer)[bstart:bend]
else:
return memoryview(b"")
def _read_range(self, start, end):
"""
Read a range of bytes from the remote file.
The result is returned as a sequence of chunks; the sizes of
the individual chunks are unspecified. The total size may be
less than requested if the end of the file is reached.
Parameters
----------
start : int
Starting byte offset of the desired range.
end : int or None
Ending byte offset of the desired range, or None to read
all data up to the end of the file.
Yields
------
data : memoryview
A memoryview containing a chunk of the desired range.
"""
# Read buffered data if available.
if self._buffer_start <= start < self._buffer_end:
if end is None:
range_end = self._buffer_end
else:
range_end = min(end, self._buffer_end)
yield self._read_buffered_range(start, range_end)
start = range_end
if end is not None and start >= end:
return
if self._file_size is not None and start >= self._file_size:
return
buffer_store = False
if self.buffering == BUFFER_WHOLE_FILE:
# Request entire file and save it in the internal buffer.
req_start = 0
req_end = None
buffer_store = True
elif end is None:
# Request range from start to EOF and don't save it in the
# buffer (since the result will be immediately consumed.)
req_start = start
req_end = None
else:
# Request a fixed range of bytes. Save it in the buffer
# if it is smaller than the maximum buffer size.
buffer_size = self.buffering
if buffer_size < 0:
buffer_size = DEFAULT_BUFFER_SIZE
req_start = start
req_end = end
if req_end < req_start + buffer_size:
req_end = req_start + buffer_size
buffer_store = True
with RangeTransfer(self._current_url, req_start, req_end) as xfer:
# Update current file URL.
self._current_url = xfer.response_url
# If we requested a range but the server doesn't support
# random access, then unless buffering is disabled, save
# entire file in the buffer.
if self.buffering == 0:
buffer_store = False
elif xfer.is_complete and (start, end) != (0, None):
buffer_store = True
if buffer_store:
# Load data into buffer and then return a copy to the
# caller.
(start, data) = xfer.content()
self._buffer = data
self._buffer_start = start
self._buffer_end = start + len(data)
if end is None:
end = self._buffer_end
yield self._read_buffered_range(start, end)
else:
# Return requested data to caller without buffering.
for chunk_start, chunk_data in xfer.iter_chunks():
rel_start = start - chunk_start
if 0 <= rel_start < len(chunk_data):
if end is None:
rel_end = len(chunk_data)
else:
rel_end = min(end - chunk_start, len(chunk_data))
yield memoryview(chunk_data)[rel_start:rel_end]
start = chunk_start + rel_end
# Update file size.
if self.buffering != 0:
self._file_size = xfer.file_size
def _get_size(self):
"""
Determine the size of the remote file.
Parameters
----------
N/A
Returns
-------
size : int or None
Size of the remote file, if known.
"""
size = self._file_size
if size is None:
if self.buffering == BUFFER_WHOLE_FILE:
for _ in self._read_range(0, None):
pass
else:
with RangeTransfer(self._current_url, 0, 0) as xfer:
self._current_url = xfer.response_url
self._file_size = xfer.file_size
size = self._file_size
if self.buffering == 0:
self._file_size = None
return size
def readable(self):
"""
Determine whether the file supports read() and read1() operations.
Parameters
----------
N/A
Returns
-------
True
"""
return True
def read(self, size=-1):
"""
Read bytes from the file.
Parameters
----------
size : int
Number of bytes to read, or -1 to read as many bytes as
possible.
Returns
-------
data : bytes
Bytes retrieved from the file. When the end of the file
is reached, the length will be less than the requested
size.
"""
start = self._pos
if size in (-1, None):
end = None
elif size >= 0:
end = start + size
else:
raise ValueError("invalid size: %r" % (size,))
result = b"".join(self._read_range(start, end))
self._pos += len(result)
return result
def read1(self, size=-1):
"""
Read bytes from the file.
Parameters
----------
size : int
Maximum number of bytes to read, or -1 to read as many
bytes as possible.
Returns
-------
data : bytes
Bytes retrieved from the file. When the end of the file
is reached, the length will be zero.
"""
return self.read(size)
def readinto(self, b):
"""
Read bytes from the file.
Parameters
----------
b : writable bytes-like object
Buffer in which to store the retrieved bytes.
Returns
-------
count : int
Number of bytes retrieved from the file and stored in b.
When the end of the file is reached, the count will be
less than the requested size.
"""
b = memoryview(b).cast("B")
start = self._pos
end = start + len(b)
count = 0
for chunk in self._read_range(start, end):
b[count : count + len(chunk)] = chunk
count += len(chunk)
self._pos += count
return count
def readinto1(self, b):
"""
Read bytes from the file.
Parameters
----------
b : writable bytes-like object
Buffer in which to store the retrieved bytes.
Returns
-------
count : int
Number of bytes retrieved from the file and stored in b.
When the end of the file is reached, the count will be
zero.
"""
return self.readinto(b)
def seekable(self):
"""
Determine whether the file supports seek() and tell() operations.
Parameters
----------
N/A
Returns
-------
True
"""
return True
def seek(self, offset, whence=os.SEEK_SET):
"""
Set the current file position.
Parameters
----------
offset : int
Byte offset of the new file position, relative to the base
position specified by whence.
whence : int, optional
SEEK_SET (0, default) if offset is relative to the start
of the file; SEEK_CUR (1) if offset is relative to the
current file position; SEEK_END (2) if offset is relative
to the end of the file.
Returns
-------
offset : int
Byte offset of the new file position.
"""
if whence == os.SEEK_SET:
pos = offset
elif whence == os.SEEK_CUR:
pos = self._pos + offset
elif whence == os.SEEK_END:
size = self._get_size()
if size is None:
raise NetFileError(
"size of remote file is unknown", url=self._current_url
)
pos = size + offset
else:
raise ValueError("invalid whence: %r" % (whence,))
if pos < 0:
raise ValueError("pos < 0")
self._pos = pos
return pos
def tell(self):
"""
Retrieve the current file position.
Parameters
----------
N/A
Returns
-------
offset : int
Byte offset of the current file position.
"""
return self._pos
|
class NetFile(io.BufferedIOBase):
'''
File object providing random access to a remote file over HTTP.
Attributes
----------
url : str
URL of the remote file.
buffering : int, optional
Buffering policy. If buffering = 0, internal buffering is
disabled; each operation on the stream requires a separate
request to the server. If buffering = -2, the entire file is
downloaded in a single request. If buffering > 0, it
specifies the minimum size of the internal buffer. If
buffering = -1, the default buffer size is used.
'''
def __init__(self, url, buffering=-1):
pass
def _read_buffered_range(self, start, end):
'''
Read a range of bytes from the internal buffer.
Parameters
----------
start : int
Starting byte offset of the desired range.
end : int
Ending byte offset of the desired range.
Returns
-------
data : memoryview
A memoryview of the given byte range.
'''
pass
def _read_range(self, start, end):
'''
Read a range of bytes from the remote file.
The result is returned as a sequence of chunks; the sizes of
the individual chunks are unspecified. The total size may be
less than requested if the end of the file is reached.
Parameters
----------
start : int
Starting byte offset of the desired range.
end : int or None
Ending byte offset of the desired range, or None to read
all data up to the end of the file.
Yields
------
data : memoryview
A memoryview containing a chunk of the desired range.
'''
pass
def _get_size(self):
'''
Determine the size of the remote file.
Parameters
----------
N/A
Returns
-------
size : int or None
Size of the remote file, if known.
'''
pass
def readable(self):
'''
Determine whether the file supports read() and read1() operations.
Parameters
----------
N/A
Returns
-------
True
'''
pass
def readable(self):
'''
Read bytes from the file.
Parameters
----------
size : int
Number of bytes to read, or -1 to read as many bytes as
possible.
Returns
-------
data : bytes
Bytes retrieved from the file. When the end of the file
is reached, the length will be less than the requested
size.
'''
pass
def read1(self, size=-1):
'''
Read bytes from the file.
Parameters
----------
size : int
Maximum number of bytes to read, or -1 to read as many
bytes as possible.
Returns
-------
data : bytes
Bytes retrieved from the file. When the end of the file
is reached, the length will be zero.
'''
pass
def readinto(self, b):
'''
Read bytes from the file.
Parameters
----------
b : writable bytes-like object
Buffer in which to store the retrieved bytes.
Returns
-------
count : int
Number of bytes retrieved from the file and stored in b.
When the end of the file is reached, the count will be
less than the requested size.
'''
pass
def readinto1(self, b):
'''
Read bytes from the file.
Parameters
----------
b : writable bytes-like object
Buffer in which to store the retrieved bytes.
Returns
-------
count : int
Number of bytes retrieved from the file and stored in b.
When the end of the file is reached, the count will be
zero.
'''
pass
def seekable(self):
'''
Determine whether the file supports seek() and tell() operations.
Parameters
----------
N/A
Returns
-------
True
'''
pass
def seekable(self):
'''
Set the current file position.
Parameters
----------
offset : int
Byte offset of the new file position, relative to the base
position specified by whence.
whence : int, optional
SEEK_SET (0, default) if offset is relative to the start
of the file; SEEK_CUR (1) if offset is relative to the
current file position; SEEK_END (2) if offset is relative
to the end of the file.
Returns
-------
offset : int
Byte offset of the new file position.
'''
pass
def tell(self):
'''
Retrieve the current file position.
Parameters
----------
N/A
Returns
-------
offset : int
Byte offset of the current file position.
'''
pass
| 13 | 12 | 27 | 4 | 11 | 13 | 3 | 1.22 | 1 | 4 | 2 | 0 | 12 | 9 | 12 | 32 | 358 | 56 | 136 | 46 | 123 | 166 | 121 | 44 | 108 | 17 | 5 | 5 | 41 |
147,722 |
MIT-LCP/wfdb-python
|
wfdb/io/_url.py
|
wfdb.io._url.RangeTransfer
|
class RangeTransfer:
"""
A single HTTP transfer representing a range of bytes.
Parameters
----------
url : str
URL of the remote file.
start : int, optional
Start of the byte range to download, as an offset from the
beginning of the file (inclusive, 0-based.)
end : int or None
End of the byte range to download, as an offset from the
beginning of the file (exclusive, 0-based.) If None, request
all data until the end of the file.
Attributes
----------
request_url : str
Original URL that was requested.
response_url : str
URL that was actually retrieved (after following redirections.)
is_complete : bool
True if the response contains the entire file; False if the
response contains a byte range.
file_size : int or None
Total size of the remote file. This may be None if the length
is unknown.
Notes
-----
The start and end parameters are requests that the server may or
may not honor. After creating a RangeTransfer object, call
content() or iter_chunks() to retrieve the actual response data,
which may be a subset or a superset of the requested range.
"""
def __init__(self, url, start, end):
self.request_url = url
if start == 0 and end is None:
method = "GET"
headers = {}
elif end is None:
method = "GET"
headers = {
"Range": "bytes=%d-" % start,
"Accept-Encoding": None,
}
elif end > start:
method = "GET"
headers = {
"Range": "bytes=%d-%d" % (start, end - 1),
"Accept-Encoding": None,
}
else:
method = "HEAD"
headers = {
"Accept-Encoding": None,
}
session = _get_session()
self._response = session.request(
method, url, headers=headers, stream=True
)
self._content_iter = self._response.iter_content(4096)
try:
self._parse_headers(method, self._response)
except Exception:
self.close()
raise
def _parse_headers(self, method, response):
"""
Parse the headers of the response object.
Parameters
----------
method : str
The HTTP method used for the request.
response : requests.Response
The resulting response object.
Returns
-------
N/A
Notes
-----
- response_url is set to the URL of the response
- file_size is set to the total file size
- is_complete is set to true if the response is complete
- _current_pos is set to the starting position
- _expected_end_pos is set to the expected end position
"""
self.response_url = response.url
self.file_size = None
self.is_complete = False
self._current_pos = 0
self._expected_end_pos = None
# Raise an exception if an error occurs.
if response.status_code >= 400 and response.status_code != 416:
_LOGGER.info(
"%s %s: %s", method, response.url, response.status_code
)
if response.status_code in (401, 403):
cls = NetFilePermissionError
elif response.status_code == 404:
cls = NetFileNotFoundError
else:
cls = NetFileError
raise cls(
"%s Error: %s for url: %s"
% (response.status_code, response.reason, response.url),
url=response.url,
status_code=response.status_code,
)
# Parse the Content-Range if this is a partial response.
elif response.status_code in (206, 416):
content_range = response.headers.get("Content-Range")
if content_range:
match = _CONTENT_RANGE_PATTERN.fullmatch(content_range)
if not match:
raise NetFileError(
"Invalid Content-Range: %s" % content_range,
url=response.url,
)
if match.group(1):
self._current_pos = int(match.group(1))
self._expected_end_pos = int(match.group(2)) + 1
if match.group(3):
self.file_size = int(match.group(3))
elif response.status_code == 206:
raise NetFileError(
"Missing Content-Range in partial response",
url=response.url,
)
# Parse the Content-Length if this is a complete and
# uncompressed response.
elif 200 <= response.status_code < 300:
self.is_complete = True
content_encoding = response.headers.get("Content-Encoding")
content_length = response.headers.get("Content-Length")
if content_length and not content_encoding:
try:
self.file_size = int(content_length)
self._expected_end_pos = self.file_size
except ValueError:
raise NetFileError(
"Invalid Content-Length: %s" % content_length,
url=response.url,
)
_LOGGER.info(
"%s %s: %s %s-%s/%s",
method,
response.url,
response.status_code,
self._current_pos,
self._expected_end_pos,
self.file_size,
)
# If the response is an error (or an unhandled redirection)
# then discard the body.
if response.status_code >= 300:
self.close()
def close(self):
"""
Finish reading data from the response.
Any leftover data in the response body will be discarded and
the underlying HTTP connection will be returned to the pool.
Parameters
----------
N/A
Returns
-------
N/A
"""
try:
for data in self._content_iter:
pass
except Exception:
pass
self._response.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# When exiting with a normal exception, shut down cleanly by
# reading leftover response data. When exiting abnormally
# (SystemExit, KeyboardInterrupt), do nothing.
if not exc_type or issubclass(exc_type, Exception):
self.close()
def __del__(self):
# If the object is deleted without calling close(), forcibly
# close the existing connection.
response = getattr(self, "_response", None)
if response:
response.close()
def iter_chunks(self):
"""
Iterate over the response body as a sequence of chunks.
Parameters
----------
N/A
Yields
------
chunk_start : int
Byte offset within the remote file corresponding to the
start of this chunk.
chunk_data : bytes
Contents of the chunk.
"""
for chunk_data in self._content_iter:
chunk_start = self._current_pos
self._current_pos += len(chunk_data)
yield chunk_start, chunk_data
if self.is_complete:
self.file_size = self._current_pos
def content(self):
"""
Read the complete response.
Parameters
----------
N/A
Returns
-------
start : int
Byte offset within the remote file corresponding to the
start of the response.
data : bytes
Contents of the response.
"""
start = self._current_pos
chunks = []
for _, chunk_data in self.iter_chunks():
chunks.append(chunk_data)
return start, b"".join(chunks)
|
class RangeTransfer:
'''
A single HTTP transfer representing a range of bytes.
Parameters
----------
url : str
URL of the remote file.
start : int, optional
Start of the byte range to download, as an offset from the
beginning of the file (inclusive, 0-based.)
end : int or None
End of the byte range to download, as an offset from the
beginning of the file (exclusive, 0-based.) If None, request
all data until the end of the file.
Attributes
----------
request_url : str
Original URL that was requested.
response_url : str
URL that was actually retrieved (after following redirections.)
is_complete : bool
True if the response contains the entire file; False if the
response contains a byte range.
file_size : int or None
Total size of the remote file. This may be None if the length
is unknown.
Notes
-----
The start and end parameters are requests that the server may or
may not honor. After creating a RangeTransfer object, call
content() or iter_chunks() to retrieve the actual response data,
which may be a subset or a superset of the requested range.
'''
def __init__(self, url, start, end):
pass
def _parse_headers(self, method, response):
'''
Parse the headers of the response object.
Parameters
----------
method : str
The HTTP method used for the request.
response : requests.Response
The resulting response object.
Returns
-------
N/A
Notes
-----
- response_url is set to the URL of the response
- file_size is set to the total file size
- is_complete is set to true if the response is complete
- _current_pos is set to the starting position
- _expected_end_pos is set to the expected end position
'''
pass
def close(self):
'''
Finish reading data from the response.
Any leftover data in the response body will be discarded and
the underlying HTTP connection will be returned to the pool.
Parameters
----------
N/A
Returns
-------
N/A
'''
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __del__(self):
pass
def iter_chunks(self):
'''
Iterate over the response body as a sequence of chunks.
Parameters
----------
N/A
Yields
------
chunk_start : int
Byte offset within the remote file corresponding to the
start of this chunk.
chunk_data : bytes
Contents of the chunk.
'''
pass
def content(self):
'''
Read the complete response.
Parameters
----------
N/A
Returns
-------
start : int
Byte offset within the remote file corresponding to the
start of the response.
data : bytes
Contents of the response.
'''
pass
| 9 | 5 | 27 | 3 | 16 | 8 | 4 | 0.78 | 0 | 6 | 3 | 0 | 8 | 8 | 8 | 8 | 259 | 33 | 127 | 32 | 118 | 99 | 85 | 32 | 76 | 14 | 0 | 3 | 32 |
147,723 |
MJL85/natlas
|
MJL85_natlas/natlas/config.py
|
natlas.config.natlas_config
|
class natlas_config:
def __init__(self):
self.host_domains = []
self.snmp_creds = []
self.discover_acl = []
self.diagram = natlas_config_diagram()
def load(self, filename):
# load config
json_data = self.__load_json_conf(filename)
if (json_data == None):
return 0
self.host_domains = json_data['domains']
self.snmp_creds = json_data['snmp']
# parse 'discover' block ACL entries
for acl in json_data['discover']:
try:
entry = natlas_discover_acl(acl)
except Exception as e:
print(e)
return 0
self.discover_acl.append(entry)
json_diagram = json_data.get('diagram', None)
if (json_diagram != None):
self.diagram.node_text_size = json_diagram.get('node_text_size', 8)
self.diagram.link_text_size = json_diagram.get('link_text_size', 7)
self.diagram.title_text_size = json_diagram.get('title_text_size', 15)
self.diagram.get_stack_members = json_diagram.get('get_stack_members', False)
self.diagram.get_vss_members = json_diagram.get('get_vss_members', False)
self.diagram.expand_stackwise = json_diagram.get('expand_stackwise', False)
self.diagram.expand_vss = json_diagram.get('expand_vss', False)
self.diagram.expand_lag = json_diagram.get('expand_lag', True)
self.diagram.group_vpc = json_diagram.get('group_vpc', False)
self.diagram.node_text = json_diagram.get('node_text', self.diagram.node_text)
return 1
def __load_json_conf(self, json_file):
json_data = None
fd = open(json_file)
json_data = fd.read()
fd.close()
json_data = json.loads(json_data)
return json_data
def generate_new(self):
return '{\n' \
' "snmp" : [\n' \
' { "community":"private", "ver":2 },\n' \
' { "community":"public", "ver":2 }\n' \
' ],\n' \
' "domains" : [\n' \
' ".company.net",\n' \
' ".company.com"\n' \
' ],\n' \
' "discover" : [\n' \
' "permit ip 10.0.0.0/8",\n' \
' "permit ip 192.168.1.0/24",\n' \
' "permit ip 0.0.0.0/32"\n' \
' ],\n' \
' "diagram" : {\n' \
' "node_text_size" : 10,\n' \
' "link_text_size" : 9,\n' \
' "title_text_size" : 15,\n' \
' "get_stack_members" : 0,\n' \
' "get_vss_members" : 0,\n' \
' "expand_stackwise" : 0,\n' \
' "expand_vss" : 0,\n' \
' "expand_lag" : 1,\n' \
' "group_vpc" : 0\n' \
' }\n' \
'}'
def validate_config(self, filename):
print('Validating config...')
json_data = self.__load_json_conf(filename)
if (json_data == None):
print('Could not load config.')
return 0
ret = 0
ret += self.__validate_config_snmp(json_data)
ret += self.__validate_config_domains(json_data)
ret += self.__validate_config_discover(json_data)
ret += self.__validate_config_diagram(json_data)
if (ret < 4):
print('FAILED')
else:
print('PASSED')
def __validate_config_snmp(self, data):
sys.stdout.write('Checking snmp...')
obj = None
try:
obj = data['snmp']
except:
print('does not exist')
return 0
if (type(obj) != list):
print('not a list')
return 0
for cred in obj:
if (type(cred) != dict):
print('list contains a non-dict (%s)' % type(cred))
return 0
try:
c = cred['community']
if (type(c) != str):
print('community is not a string')
return 0
except KeyError as e:
print('one or more entries does not include %s' % e)
return 0
try:
c = cred['ver']
if (type(c) != int):
print('version is not an int')
return 0
else:
if (c != 2):
print('version for \'%s\' is not supported' % cred['community'])
return 0
except KeyError as e:
print('one or more entries does not include %s' % e)
return 0
print('ok')
return 1
def __validate_config_domains(self, data):
sys.stdout.write('Checking domains...')
obj = None
try:
obj = data['domains']
except:
print('does not exist')
return 0
if (type(obj) != list):
print('not a list')
return 0
for d in obj:
if (type(d) != str):
print('domain is not a string')
return 0
print('ok')
return 1
def __validate_config_discover(self, data):
sys.stdout.write('Checking discover...')
obj = None
try:
obj = data['discover']
except:
print('does not exist')
return 0
if (type(obj) != list):
print('not a list')
return 0
for d in obj:
if (type(d) != str):
print('ACL is not a string')
return 0
ace = d.split(' ')
if (len(ace) < 3):
print('ACE not enough params \'%s\'' % d)
return 0
if (ace[0] not in natlas_discover_acl.all_actions):
print('ACE op \'%s\' not valid' % ace[0])
return 0
if (ace[1] not in natlas_discover_acl.all_types):
print('ACE cond \'%s\' not valid' % ace[1])
return 0
print('ok')
return 1
def __validate_config_diagram(self, data):
sys.stdout.write('Checking diagram...')
obj = None
try:
obj = data['diagram']
except:
print('does not exist')
return 0
if (type(obj) != dict):
print('not a dict')
return 0
for nv in obj:
if (nv not in ['node_text_size',
'link_text_size',
'title_text_size',
'get_stack_members',
'get_vss_members',
'expand_stackwise',
'expand_vss',
'expand_lag',
'group_vpc']):
print('invalid value \'%s\'' % nv)
return 0
print('ok')
return 1
|
class natlas_config:
def __init__(self):
pass
def load(self, filename):
pass
def __load_json_conf(self, json_file):
pass
def generate_new(self):
pass
def validate_config(self, filename):
pass
def __validate_config_snmp(self, data):
pass
def __validate_config_domains(self, data):
pass
def __validate_config_discover(self, data):
pass
def __validate_config_diagram(self, data):
pass
| 10 | 0 | 22 | 2 | 21 | 0 | 4 | 0.01 | 0 | 9 | 2 | 0 | 9 | 4 | 9 | 9 | 212 | 23 | 187 | 34 | 177 | 2 | 152 | 32 | 142 | 10 | 0 | 4 | 39 |
147,724 |
MJL85/natlas
|
MJL85_natlas/natlas/config.py
|
natlas.config.natlas_config_diagram
|
class natlas_config_diagram:
node_text_size = 8
link_text_size = 7
title_text_size = 15
get_stack_members = False
get_vss_members = False
expand_stackwise = False
expand_vss = False
expand_lag = True
group_vpc = False
node_text = '<font point-size="10"><b>{node.name}</b></font><br />' \
'{node.ip}<br />' \
'<%if {node.ios}: {node.ios}<br />%>' \
'<%if {node.plat}: {node.plat}<br />%>' \
'<%if ("{node.serial}"!=None)&({node.vss.enabled}==0)&({node.stack.enabled}==0): {node.serial}<br />%>' \
'<%if ({node.stack.enabled}==1)&({config.diagram.expand_stackwise}==1): {stack.serial}<br />%>' \
'<%if {node.vss.enabled}&({config.diagram.expand_vss}==1): {vss.serial}<br />%>' \
'<%if ({node.vss.enabled}==1)&({config.diagram.expand_vss}==0): VSS {node.vss.domain}<br />%>' \
'<%if {node.vss.enabled}&({config.diagram.expand_vss}==0): VSS 0 - {node.vss.members[0].plat} - {node.vss.members[0].serial}<br />VSS 1 - {node.vss.members[1].plat} - {node.vss.members[1].serial}<br />%>' \
'<%if {node.bgp_las}: BGP {node.bgp_las}<br />%>' \
'<%if {node.ospf_id}: OSPF {node.ospf_id}<br />%>' \
'<%if {node.hsrp_pri}: HSRP VIP {node.hsrp_vip}<br />HSRP Pri {node.hsrp_pri}<br />%>' \
'<%if {node.stack.enabled}: Stackwise {node.stack.count}<br />%>' \
'<%stack SW {stack.num} - {stack.plat} {stack.serial} ({stack.role})<br />%>' \
'<%loopback {lo.name} - {lo.ip}<br />%>' \
'<%svi VLAN {svi.vlan} - {svi.ip}<br />%>'
|
class natlas_config_diagram:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0 | 26 | 11 | 25 | 0 | 11 | 11 | 10 | 0 | 0 | 0 | 0 |
147,725 |
MJL85/natlas
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MJL85_natlas/natlas/mac.py
|
natlas.mac.natlas_mac.mac_object
|
class mac_object:
def __init__(self, _host, _ip, _vlan, _mac, _port):
self.node_host = _host
self.node_ip = _ip
self.vlan = int(_vlan)
self.mac = _mac
self.port = _port
def __str__(self):
return ('<node_host="%s", node_ip="%s", vlan="%s", mac="%s", port="%s">'
% (self.node_host, self.node_ip, self.vlan, self.mac, self.port))
def __repr__(self):
return self.__str__()
|
class mac_object:
def __init__(self, _host, _ip, _vlan, _mac, _port):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 4 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 3 | 5 | 3 | 3 | 13 | 1 | 12 | 9 | 8 | 0 | 11 | 9 | 7 | 1 | 0 | 0 | 3 |
147,726 |
MJL85/natlas
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MJL85_natlas/natlas/node.py
|
natlas.node.natlas_node._node_opts
|
class _node_opts:
def __init__(self):
self.reset()
def reset(self, setting=False):
self.get_name = setting
self.get_ip = setting
self.get_plat = setting
self.get_ios = setting
self.get_router = setting
self.get_ospf_id = setting
self.get_bgp_las = setting
self.get_hsrp_pri = setting
self.get_hsrp_vip = setting
self.get_serial = setting
self.get_stack = setting
self.get_stack_details = setting
self.get_vss = setting
self.get_vss_details = setting
self.get_svi = setting
self.get_lo = setting
self.get_bootf = setting
self.get_chassis_info = setting
self.get_vpc = setting
|
class _node_opts:
def __init__(self):
pass
def reset(self, setting=False):
pass
| 3 | 0 | 11 | 0 | 11 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 | 19 | 2 | 2 | 24 | 1 | 23 | 22 | 20 | 0 | 23 | 22 | 20 | 1 | 0 | 0 | 2 |
147,727 |
MJL85/natlas
|
MJL85_natlas/natlas-cli.py
|
natlas-cli.natlas_mod
|
class natlas_mod:
def __init__(self):
self.filename = ''
self.name = ''
self.version = ''
self.author = ''
self.authoremail = ''
self.syntax = None
self.about = None
self.help = None
self.example = None
self.entryfunc = None
self.notimer = 0
self.require_api = None
self.preload_conf = 1
def __str__(self):
return ('<name="%s", version="%s", author="%s">' % (self.name, self.version, self.author))
def __repr__(self):
return self.__str__()
|
class natlas_mod:
def __init__(self):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 4 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 13 | 3 | 3 | 19 | 0 | 19 | 17 | 15 | 0 | 19 | 17 | 15 | 1 | 0 | 0 | 3 |
147,728 |
MJL85/natlas
|
MJL85_natlas/natlas/config.py
|
natlas.config.natlas_discover_acl
|
class natlas_discover_acl:
'''
Define an ACL entry for the 'discover' config block.
Defined in the form:
<action> <type> <str>
Where
<action> = permit, deny, leaf, nop
<type> = ip, host
<str> = string
'''
all_actions = [ ';', 'permit', 'deny', 'leaf', 'include' ]
all_types = [ ';', 'ip', 'host', 'software', 'platform', 'serial' ]
def __init__(self, str):
self.action = "nop"
self.type = "nop"
self.str = "nop"
t = list(filter(None, str.split()))
if (len(t) < 3):
raise Exception('Invalid ACL entry: "%s"' % str)
self.action = t[0]
self.type = t[1]
self.str = t[2]
if (self.action not in self.all_actions):
raise Exception('Invalid ACL entry: "%s"; %s' % (str, self.action))
if (self.type not in self.all_types):
raise Exception('Invalid ACL entry: "%s"; %s' % (str, self.type))
def __repr__(self):
return '<%s %s %s>' % (self.action, self.type, self.str)
|
class natlas_discover_acl:
'''
Define an ACL entry for the 'discover' config block.
Defined in the form:
<action> <type> <str>
Where
<action> = permit, deny, leaf, nop
<type> = ip, host
<str> = string
'''
def __init__(self, str):
pass
def __repr__(self):
pass
| 3 | 1 | 10 | 2 | 8 | 0 | 3 | 0.47 | 0 | 3 | 0 | 0 | 2 | 3 | 2 | 2 | 33 | 5 | 19 | 9 | 16 | 9 | 19 | 9 | 16 | 4 | 0 | 1 | 5 |
147,729 |
MJL85/natlas
|
MJL85_natlas/natlas/node.py
|
natlas.node.natlas_node_lo
|
class natlas_node_lo:
def __init__(self, name, ips):
self.name = name.replace('Loopback', 'lo')
self.ips = ips
def __str__(self):
return ('Name = %s\nIPs = %s' % (self.name, self.ips))
def __repr__(self):
return ('<name="%s",ips=%s>' % (self.name, self.ips))
|
class natlas_node_lo:
def __init__(self, name, ips):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 2 | 3 | 3 | 8 | 0 | 8 | 6 | 4 | 0 | 8 | 6 | 4 | 1 | 0 | 0 | 3 |
147,730 |
MJL85/natlas
|
MJL85_natlas/natlas/natlas.py
|
natlas.natlas.natlas
|
class natlas:
def __init__(self):
if (sys.version_info < REQUIRES_PYTHON):
raise Exception('Requires Python %i.%i' % (REQUIRES_PYTHON[0], REQUIRES_PYTHON[1]))
return
self.config_file = None
self.config = None
self.network = None
self.diagram = None
self.catalog = None
def __try_snmp(self, node):
if (node == None): return 0
if (node.snmpobj == None): return 0
if (node.snmpobj.success == 1): return 1
if (node.try_snmp_creds(self.config.snmp_creds) == 0):
raise Exception('No valid SNMP credentials for %s' % node.ip)
return 1
def config_generate(self):
return natlas_config().generate_new()
def config_validate(self, conf_file):
return natlas_config().validate_config(conf_file)
def config_load(self, conf_file):
self.config = None
c = natlas_config()
c.load(conf_file)
self.config = c
self.config_file = conf_file
# initalize objects
self.network = natlas_network(self.config)
def snmp_add_credential(self, snmp_ver, snmp_community):
if (self.config == None):
self.config = natlas_config()
if (snmp_ver != 2):
raise ValueError('snmp_ver is not valid')
return
cred = {}
cred['ver'] = snmp_ver
cred['community'] = snmp_community
self.config.snmp_creds.append(cred)
def set_discover_maxdepth(self, depth):
self.network.set_max_depth(int(depth))
def set_verbose(self, verbose):
self.network.set_verbose(verbose)
def discover_network(self, root_ip, details):
self.network.discover(root_ip)
if (details == 1):
self.network.discover_details()
# initalize the output objects
self.diagram = natlas_output_diagram(self.network)
self.catalog = natlas_output_catalog(self.network)
def new_node(self, node_ip):
node = natlas_node(ip=node_ip)
self.__try_snmp(node)
return node
def query_node(self, node, **get_values):
# see natlas_node._node_opts in node.py for what get_values are available
self.__try_snmp(node)
node.opts.reset(False)
for getv in get_values:
setattr(node.opts, getv, get_values[getv])
node.query_node()
return
def write_diagram(self, output_file, diagram_title):
self.diagram.generate(output_file, diagram_title)
def write_catalog(self, output_file):
self.catalog.generate(output_file)
def get_switch_vlans(self, switch_ip):
node = natlas_node(switch_ip)
if (node.try_snmp_creds(self.config.snmp_creds) == 0):
return []
return node.get_vlans()
def get_switch_macs(self, switch_ip=None, node=None, vlan=None, mac=None, port=None, verbose=0):
'''
Get the CAM table from a switch.
Args:
switch_ip IP address of the device
node natlas_node from new_node()
vlan Filter results by VLAN
MAC Filter results by MAC address (regex)
port Filter results by port (regex)
verbose Display progress to stdout
switch_ip or node is required
Return:
Array of natlas_mac objects
'''
if (switch_ip == None):
if (node == None):
raise Exception('get_switch_macs() requires switch_ip or node parameter')
return None
switch_ip = node.get_ipaddr()
mac_obj = natlas_mac(self.config)
if (vlan == None):
# get all MACs
macs = mac_obj.get_macs(switch_ip, verbose)
else:
# get MACs only for one VLAN
macs = mac_obj.get_macs_for_vlan(switch_ip, vlan, verbose)
if ((mac == None) & (port == None)):
return macs if macs else []
# filter results
ret = []
for m in macs:
if (mac != None):
if (re.match(mac, m.mac) == None):
continue
if (port != None):
if (re.match(port, m.port) == None):
continue
ret.append(m)
return ret
def get_discovered_nodes(self):
return self.network.nodes
def get_node_ip(self, node):
return node.get_ipaddr()
def get_arp_table(self, switch_ip, ip=None, mac=None, interf=None, arp_type=None):
'''
Get the ARP table from a switch.
Args:
switch_ip IP address of the device
ip Filter results by IP (regex)
mac Filter results by MAC (regex)
interf Filter results by INTERFACE (regex)
arp_type Filter results by ARP Type
Return:
Array of natlas_arp objects
'''
node = natlas_node(switch_ip)
if (node.try_snmp_creds(self.config.snmp_creds) == 0):
return []
arp = node.get_arp_table()
if (arp == None):
return []
if ((ip == None) & (mac == None) & (interf == None) & (arp_type == None)):
# no filtering
return arp
interf = str(interf) if vlan else None
# filter the result table
ret = []
for a in arp:
if (ip != None):
if (re.match(ip, a.ip) == None):
continue
if (mac != None):
if (re.match(mac, a.mac) == None):
continue
if (interf != None):
if (re.match(interf, str(a.interf)) == None):
continue
if (arp_type != None):
if (re.match(arp_type, a.arp_type) == None):
continue
ret.append(a)
return ret
def get_neighbors(self, node):
self.__try_snmp(node)
cdp = node.get_cdp_neighbors()
lldp = node.get_lldp_neighbors()
return cdp+lldp
|
class natlas:
def __init__(self):
pass
def __try_snmp(self, node):
pass
def config_generate(self):
pass
def config_validate(self, conf_file):
pass
def config_load(self, conf_file):
pass
def snmp_add_credential(self, snmp_ver, snmp_community):
pass
def set_discover_maxdepth(self, depth):
pass
def set_verbose(self, verbose):
pass
def discover_network(self, root_ip, details):
pass
def new_node(self, node_ip):
pass
def query_node(self, node, **get_values):
pass
def write_diagram(self, output_file, diagram_title):
pass
def write_catalog(self, output_file):
pass
def get_switch_vlans(self, switch_ip):
pass
def get_switch_macs(self, switch_ip=None, node=None, vlan=None, mac=None, port=None, verbose=0):
'''
Get the CAM table from a switch.
Args:
switch_ip IP address of the device
node natlas_node from new_node()
vlan Filter results by VLAN
MAC Filter results by MAC address (regex)
port Filter results by port (regex)
verbose Display progress to stdout
switch_ip or node is required
Return:
Array of natlas_mac objects
'''
pass
def get_discovered_nodes(self):
pass
def get_node_ip(self, node):
pass
def get_arp_table(self, switch_ip, ip=None, mac=None, interf=None, arp_type=None):
'''
Get the ARP table from a switch.
Args:
switch_ip IP address of the device
ip Filter results by IP (regex)
mac Filter results by MAC (regex)
interf Filter results by INTERFACE (regex)
arp_type Filter results by ARP Type
Return:
Array of natlas_arp objects
'''
pass
def get_neighbors(self, node):
pass
| 20 | 2 | 9 | 1 | 7 | 2 | 3 | 0.25 | 0 | 10 | 6 | 0 | 19 | 5 | 19 | 19 | 190 | 32 | 126 | 40 | 106 | 32 | 128 | 40 | 108 | 14 | 0 | 3 | 52 |
147,731 |
MJL85/natlas
|
MJL85_natlas/natlas/util.py
|
natlas.util.util
|
class util:
def get_net_bits_from_mask(netm):
cidr = 0
mt = netm.split('.')
for b in range(0, 4):
v = int(mt[b])
while (v > 0):
if (v & 0x01):
cidr += 1
v = v >> 1
return cidr
#
# Return 1 if IP is in the CIDR range.
#
def is_ipv4_in_cidr(ip, cidr):
t = cidr.split('/')
cidr_ip = t[0]
cidr_m = t[1]
o = cidr_ip.split('.')
cidr_ip = ((int(o[0])<<24) + (int(o[1]) << 16) + (int(o[2]) << 8) + (int(o[3])))
cidr_mb = 0
zeros = 32 - int(cidr_m)
for b in range(0, zeros):
cidr_mb = (cidr_mb << 1) | 0x01
cidr_mb = 0xFFFFFFFF & ~cidr_mb
o = ip.split('.')
ip = ((int(o[0])<<24) + (int(o[1]) << 16) + (int(o[2]) << 8) + (int(o[3])))
return ((cidr_ip & cidr_mb) == (ip & cidr_mb))
#
# Shorten the hostname by removing any defined domain suffixes.
#
def shorten_host_name(_host, domains):
host = _host
if (_host == None):
return 'UNKNOWN'
# some devices (eg Motorola) report as hex strings
if (_host.startswith('0x')):
try:
host = binascii.unhexlify(_host[2:]).decode('utf-8')
except:
# this can fail if the node gives us bad data - revert to original
# ex, lldp can advertise MAC as hostname, and it might not convert
# to ascii
host = _host
# Nexus appends (SERIAL) to hosts
host = re.sub('\([^\(]*\)$', '', host)
for domain in domains:
host = host.replace(domain, '')
# fix some stuff that can break Dot
host = re.sub('-', '_', host)
host = host.rstrip(' \r\n\0')
return host
#
# Return a string representation of an IPv4 address
#
def convert_ip_int_str(iip):
if ((iip != None) & (iip != '')):
ip = int(iip, 0)
ip = '%i.%i.%i.%i' % (((ip >> 24) & 0xFF), ((ip >> 16) & 0xFF), ((ip >> 8) & 0xFF), (ip & 0xFF))
return ip
return 'UNKNOWN'
def get_module_from_interf(port):
try:
s = re.search('[^\d]*(\d*)/\d*/\d*', port)
if (s):
return s.group(1)
except:
pass
return '1'
def strip_slash_masklen(cidr):
try:
s = re.search('^(.*)/[0-9]{1,2}$', cidr)
if (s):
return s.group(1)
except:
pass
return cidr
def expand_path_pattern(str):
try:
match = re.search('{([^\}]*)}', str)
tokens = match[1].split('|')
except:
return [str]
ret = []
for token in tokens:
s = str.replace(match[0], token)
ret.append(s)
return ret
|
class util:
def get_net_bits_from_mask(netm):
pass
def is_ipv4_in_cidr(ip, cidr):
pass
def shorten_host_name(_host, domains):
pass
def convert_ip_int_str(iip):
pass
def get_module_from_interf(port):
pass
def strip_slash_masklen(cidr):
pass
def expand_path_pattern(str):
pass
| 8 | 0 | 13 | 2 | 10 | 1 | 3 | 0.21 | 0 | 2 | 0 | 0 | 7 | 0 | 7 | 7 | 113 | 25 | 73 | 29 | 65 | 15 | 73 | 29 | 65 | 5 | 0 | 3 | 22 |
147,732 |
MJL85/natlas
|
MJL85_natlas/natlas/snmp.py
|
natlas.snmp.natlas_snmp
|
class natlas_snmp:
def __init__(self, ip='0.0.0.0'):
self.success = 0
self.ver = 0
self.v2_community = None
self._ip = ip
#
# Try to find valid SNMP credentials in the provided list.
# Returns 1 if success, 0 if failed.
#
def get_cred(self, snmp_creds):
for cred in snmp_creds:
# we don't currently support anything other than SNMPv2
if (cred['ver'] != 2):
continue
community = cred['community']
cmdGen = cmdgen.CommandGenerator()
errIndication, errStatus, errIndex, varBinds = cmdGen.getCmd(
cmdgen.CommunityData(community),
cmdgen.UdpTransportTarget((self._ip, SNMP_PORT)),
'1.3.6.1.2.1.1.5.0',
lookupNames = False, lookupValues = False
)
if errIndication:
continue
else:
self.ver = 2
self.success = 1
self.v2_community = community
return 1
return 0
#
# Get single SNMP value at OID.
#
def get_val(self, oid):
cmdGen = cmdgen.CommandGenerator()
errIndication, errStatus, errIndex, varBinds = cmdGen.getCmd(
cmdgen.CommunityData(self.v2_community),
cmdgen.UdpTransportTarget((self._ip, SNMP_PORT), retries=2),
oid, lookupNames = False, lookupValues = False
)
if errIndication:
print('[E] get_snmp_val(%s): %s' % (self.v2_community, errIndication))
else:
r = varBinds[0][1].prettyPrint()
if ((r == OID_ERR) | (r == OID_ERR_INST)):
return None
return r
return None
#
# Get bulk SNMP value at OID.
#
# Returns 1 on success, 0 on failure.
#
def get_bulk(self, oid):
cmdGen = cmdgen.CommandGenerator()
errIndication, errStatus, errIndex, varBindTable = cmdGen.bulkCmd(
cmdgen.CommunityData(self.v2_community),
cmdgen.UdpTransportTarget((self._ip, SNMP_PORT), timeout=30, retries=2),
0, 50,
oid,
lookupNames = False, lookupValues = False
)
if errIndication:
print('[E] get_snmp_bulk(%s): %s' % (self.v2_community, errIndication))
else:
ret = []
for r in varBindTable:
for n, v in r:
n = str(n)
if (n.startswith(oid) == 0):
return ret
ret.append(r)
return ret
return None
#
# Lookup a value from the return table of get_bulk()
#
def cache_lookup(self, varBindTable, name):
if (varBindTable == None):
return None
for r in varBindTable:
for n, v in r:
n = str(n)
if (n == name):
return v.prettyPrint()
return None
#
# Given an OID 1.2.3.4...x.y.z return z
#
def get_last_oid_token(oid):
_oid = oid.getOid()
ts = len(_oid)
return _oid[ts-1]
|
class natlas_snmp:
def __init__(self, ip='0.0.0.0'):
pass
def get_cred(self, snmp_creds):
pass
def get_val(self, oid):
pass
def get_bulk(self, oid):
pass
def cache_lookup(self, varBindTable, name):
pass
def get_last_oid_token(oid):
pass
| 7 | 0 | 14 | 2 | 12 | 0 | 3 | 0.25 | 0 | 1 | 0 | 0 | 6 | 4 | 6 | 6 | 111 | 17 | 75 | 27 | 68 | 19 | 57 | 27 | 50 | 5 | 0 | 4 | 19 |
147,733 |
MJL85/natlas
|
MJL85_natlas/natlas/output_diagram.py
|
natlas.output_diagram.natlas_output_diagram
|
class natlas_output_diagram:
def __init__(self, network):
natlas_output.__init__(self)
self.network = network
self.config = network.config
def generate(self, dot_file, title):
self.network.reset_discovered()
title_text_size = self.config.diagram.title_text_size
credits = '<table border="0">' \
'<tr>' \
'<td balign="right">' \
'<font point-size="%i"><b>$title$</b></font><br />' \
'<font point-size="%i">$date$</font><br />' \
'<font point-size="7">' \
'Generated by natlas $ver$<br />' \
'Michael Laforest</font><br />' \
'</td>' \
'</tr>' \
'</table>' % (title_text_size, title_text_size-2)
today = datetime.datetime.now()
today = today.strftime('%Y-%m-%d %H:%M')
credits = credits.replace('$ver$', __version__)
credits = credits.replace('$date$', today)
credits = credits.replace('$title$', title)
node_text_size = self.config.diagram.node_text_size
link_text_size = self.config.diagram.link_text_size
diagram = pydot.Dot(
graph_type = 'graph',
labelloc = 'b',
labeljust = 'r',
fontsize = node_text_size,
label = '<%s>' % credits
)
diagram.set_node_defaults(
fontsize = link_text_size
)
diagram.set_edge_defaults(
fontsize = link_text_size,
labeljust = 'l'
)
# add all of the nodes and links
self.__generate(diagram, self.network.root_node)
# expand output string
files = util.expand_path_pattern(dot_file)
for f in files:
# get file extension
file_name, file_ext = os.path.splitext(f)
output_func = getattr(diagram, 'write_' + file_ext.lstrip('.'), None)
if (output_func == None):
print('Error: Output type "%s" does not exist.' % file_ext)
else:
output_func(f)
print('Created diagram: %s' % f)
def __generate(self, diagram, node):
if (node == None):
return (0, 0)
if (node.discovered > 0):
return (0, 0)
node.discovered = 1
dot_node = self.__get_node(diagram, node)
if (dot_node.ntype == 'single'):
diagram.add_node(
pydot.Node(
name = node.name,
label = '<%s>' % dot_node.label,
style = dot_node.style,
shape = dot_node.shape,
peripheries = dot_node.peripheries
)
)
elif (dot_node.ntype == 'vss'):
cluster = pydot.Cluster(
graph_name = node.name,
suppress_disconnected = False,
labelloc = 't',
labeljust = 'c',
fontsize = self.config.diagram.node_text_size,
label = '<<br /><b>VSS %s</b>>' % node.vss.domain
)
for i in range(0, 2):
# {vss.} vars
nlabel = dot_node.label.format(vss=node.vss.members[i])
cluster.add_node(
pydot.Node(
name = '%s[natlasVSS%i]' % (node.name, i+1),
label = '<%s>' % nlabel,
style = dot_node.style,
shape = dot_node.shape,
peripheries = dot_node.peripheries
)
)
diagram.add_subgraph(cluster)
elif (dot_node.ntype == 'vpc'):
cluster = pydot.Cluster(
graph_name = node.name,
suppress_disconnected = False,
labelloc = 't',
labeljust = 'c',
fontsize = self.config.diagram.node_text_size,
label = '<<br /><b>VPC %s</b>>' % node.vpc_domain
)
cluster.add_node(
pydot.Node(
name = node.name,
label = '<%s>' % dot_node.label,
style = dot_node.style,
shape = dot_node.shape,
peripheries = dot_node.peripheries
)
)
if (node.vpc_peerlink_node != None):
node2 = node.vpc_peerlink_node
node2.discovered = 1
dot_node2 = self.__get_node(diagram, node2)
cluster.add_node(
pydot.Node(
name = node2.name,
label = '<%s>' % dot_node2.label,
style = dot_node2.style,
shape = dot_node2.shape,
peripheries = dot_node2.peripheries
)
)
diagram.add_subgraph(cluster)
elif (dot_node.ntype == 'stackwise'):
cluster = pydot.Cluster(
graph_name = node.name,
suppress_disconnected = False,
labelloc = 't',
labeljust = 'c',
fontsize = self.config.diagram.node_text_size,
label = '<<br /><b>Stackwise</b>>'
)
for i in range(0, node.stack.count):
# {stack.} vars
if (len(node.stack.members) == 0):
nlabel = dot_node.label
else:
nlabel = dot_node.label.format(stack=node.stack.members[i])
cluster.add_node(
pydot.Node(
name = '%s[natlasSW%i]' % (node.name, i+1),
label = '<%s>' % nlabel,
style = dot_node.style,
shape = dot_node.shape,
peripheries = dot_node.peripheries
)
)
diagram.add_subgraph(cluster)
lags = []
for link in node.links:
self.__generate(diagram, link.node)
# determine if this link should be broken out or not
expand_lag = 0
if (self.config.diagram.expand_lag == 1):
expand_lag = 1
elif (link.local_lag == 'UNKNOWN'):
expand_lag = 1
elif (self.__does_lag_span_devs(link.local_lag, node.links) > 1):
# a LAG could span different devices, eg Nexus.
# in this case we should always break it out, otherwise we could
# get an unlinked node in the diagram.
expand_lag = 1
if (expand_lag == 1):
self.__create_link(diagram, node, link, 0)
else:
found = 0
for lag in lags:
if (link.local_lag == lag):
found = 1
break
if (found == 0):
lags.append(link.local_lag)
self.__create_link(diagram, node, link, 1)
def __get_node(self, diagram, node):
dot_node = natlas_diagram_dot_node()
dot_node.ntype = 'single'
dot_node.shape = 'ellipse'
dot_node.style = 'solid'
dot_node.peripheries = 1
dot_node.label = ''
# get the node text
dot_node.label = self.__get_node_text(diagram, node, self.config.diagram.node_text)
# set the node properties
if (node.vss.enabled == 1):
if (self.config.diagram.expand_vss == 1):
dot_node.ntype = 'vss'
else:
# group VSS into one diagram node
dot_node.peripheries = 2
if (node.stack.count > 0):
if (self.config.diagram.expand_stackwise == 1):
dot_node.ntype = 'stackwise'
else:
# group Stackwise into one diagram node
dot_node.peripheries = node.stack.count
if (node.vpc_domain != None):
if (self.config.diagram.group_vpc == 1):
dot_node.ntype = 'vpc'
if (node.router == 1):
dot_node.shape = 'diamond'
return dot_node
def __create_link(self, diagram, node, link, draw_as_lag):
link_color = 'black'
link_style = 'solid'
link_label = ''
if ((link.local_port == node.vpc_peerlink_if) | (link.local_lag == node.vpc_peerlink_if)):
link_label += 'VPC '
if (draw_as_lag):
link_label += 'LAG'
members = 0
for l in node.links:
if (l.local_lag == link.local_lag):
members += 1
link_label += '\n%i Members' % members
else:
link_label += 'P:%s\nC:%s' % (link.local_port, link.remote_port)
is_lag = 1 if (link.local_lag != 'UNKNOWN') else 0
if (draw_as_lag == 0):
# LAG as member
if (is_lag):
local_lag_ip = ''
remote_lag_ip = ''
if (len(link.local_lag_ips)):
local_lag_ip = ' - %s' % link.local_lag_ips[0]
if (len(link.remote_lag_ips)):
remote_lag_ip = ' - %s' % link.remote_lag_ips[0]
link_label += '\nLAG Member'
if ((local_lag_ip == '') & (remote_lag_ip == '')):
link_label += '\nP:%s | C:%s' % (link.local_lag, link.remote_lag)
else:
link_label += '\nP:%s%s' % (link.local_lag, local_lag_ip)
link_label += '\nC:%s%s' % (link.remote_lag, remote_lag_ip)
# IP Addresses
if ((link.local_if_ip != 'UNKNOWN') & (link.local_if_ip != None)):
link_label += '\nP:%s' % link.local_if_ip
if ((link.remote_if_ip != 'UNKNOWN') & (link.remote_if_ip != None)):
link_label += '\nC:%s' % link.remote_if_ip
else:
# LAG as grouping
for l in node.links:
if (l.local_lag == link.local_lag):
link_label += '\nP:%s | C:%s' % (l.local_port, l.remote_port)
local_lag_ip = ''
remote_lag_ip = ''
if (len(link.local_lag_ips)):
local_lag_ip = ' - %s' % link.local_lag_ips[0]
if (len(link.remote_lag_ips)):
remote_lag_ip = ' - %s' % link.remote_lag_ips[0]
if ((local_lag_ip == '') & (remote_lag_ip == '')):
link_label += '\nP:%s | C:%s' % (link.local_lag, link.remote_lag)
else:
link_label += '\nP:%s%s' % (link.local_lag, local_lag_ip)
link_label += '\nC:%s%s' % (link.remote_lag, remote_lag_ip)
if (link.link_type == '1'):
# Trunk = Bold/Blue
link_color = 'blue'
link_style = 'bold'
if ((link.local_native_vlan == link.remote_native_vlan) | (link.remote_native_vlan == None)):
link_label += '\nNative %s' % link.local_native_vlan
else:
link_label += '\nNative P:%s C:%s' % (link.local_native_vlan, link.remote_native_vlan)
if (link.local_allowed_vlans == link.remote_allowed_vlans):
link_label += '\nAllowed %s' % link.local_allowed_vlans
else:
link_label += '\nAllowed P:%s' % link.local_allowed_vlans
if (link.remote_allowed_vlans != None):
link_label += '\nAllowed C:%s' % link.remote_allowed_vlans
elif (link.link_type is None):
# Routed = Bold/Red
link_color = 'red'
link_style = 'bold'
else:
# Switched access, include VLAN ID in label
if (link.vlan != None):
link_label += '\nVLAN %s' % link.vlan
edge_src = node.name
edge_dst = link.node.name
lmod = util.get_module_from_interf(link.local_port)
rmod = util.get_module_from_interf(link.remote_port)
if (self.config.diagram.expand_vss == 1):
if (node.vss.enabled == 1):
edge_src = '%s[natlasVSS%s]' % (node.name, lmod)
if (link.node.vss.enabled == 1):
edge_dst = '%s[natlasVSS%s]' % (link.node.name, rmod)
if (self.config.diagram.expand_stackwise == 1):
if (node.stack.count > 0):
edge_src = '%s[natlasSW%s]' % (node.name, lmod)
if (link.node.stack.count > 0):
edge_dst = '%s[natlasSW%s]' % (link.node.name, rmod)
edge = pydot.Edge(
edge_src, edge_dst,
dir = 'forward',
label = link_label,
color = link_color,
style = link_style
)
diagram.add_edge(edge)
def __does_lag_span_devs(self, lag_name, links):
if (lag_name == None):
return 0
devs = []
for link in links:
if (link.local_lag == lag_name):
if (link.node.name not in devs):
devs.append(link.node.name)
return len(devs)
def __eval_if_block(self, if_cond, node):
# evaluate condition
if_cond_eval = if_cond.format(node=node, config=self.config).strip()
try:
if eval(if_cond_eval):
return 1
except:
if ((if_cond_eval != '0') & (if_cond_eval != 'None') & (if_cond_eval != '')):
return 1
else:
return 0
return 0
def __get_node_text(self, diagram, node, fmt):
'''
Generate the node text given the format string 'fmt'
'''
fmt_proc = fmt
# IF blocks
while (1):
if_block = re.search('<%if ([^%]*): ([^%]*)%>', fmt_proc)
if (if_block == None):
break
# evaluate condition
if_cond = if_block[1]
if_val = if_block[2]
if (self.__eval_if_block(if_cond, node) == 0):
if_val = ''
fmt_proc = fmt_proc[:if_block.span()[0]] + if_val + fmt_proc[if_block.span()[1]:]
# {node.ip} = best IP
ip = node.get_ipaddr()
fmt_proc = fmt_proc.replace('{node.ip}', ip)
# stackwise
stack_block = re.search('<%stack ([^%]*)%>', fmt_proc)
if (stack_block != None):
if (node.stack.count == 0):
# no stackwise, remove this
fmt_proc = fmt_proc[:stack_block.span()[0]] + fmt_proc[stack_block.span()[1]:]
else:
val = ''
if (self.config.diagram.expand_stackwise == 0):
if (self.config.diagram.get_stack_members):
for smem in node.stack.members:
nval = stack_block[1]
nval = nval.replace('{stack.num}', str(smem.num))
nval = nval.replace('{stack.plat}', smem.plat)
nval = nval.replace('{stack.serial}', smem.serial)
nval = nval.replace('{stack.role}', smem.role)
val += nval
fmt_proc = fmt_proc[:stack_block.span()[0]] + val + fmt_proc[stack_block.span()[1]:]
# loopbacks
loopback_block = re.search('<%loopback ([^%]*)%>', fmt_proc)
if (loopback_block != None):
val = ''
for lo in node.loopbacks:
for lo_ip in lo.ips:
nval = loopback_block[1]
nval = nval.replace('{lo.name}', lo.name)
nval = nval.replace('{lo.ip}', lo_ip)
val += nval
fmt_proc = fmt_proc[:loopback_block.span()[0]] + val + fmt_proc[loopback_block.span()[1]:]
# SVIs
svi_block = re.search('<%svi ([^%]*)%>', fmt_proc)
if (svi_block != None):
val = ''
for svi in node.svis:
for svi_ip in svi.ip:
nval = svi_block[1]
nval = nval.replace('{svi.vlan}', svi.vlan)
nval = nval.replace('{svi.ip}', svi_ip)
val += nval
fmt_proc = fmt_proc[:svi_block.span()[0]] + val + fmt_proc[svi_block.span()[1]:]
# replace {stack.} with magic
fmt_proc = re.sub('{stack\.(([a-zA-Z])*)}', '$stack2354$\g<1>$stack2354$', fmt_proc)
fmt_proc = re.sub('{vss\.(([a-zA-Z])*)}', '$vss2354$\g<1>$vss2354$', fmt_proc)
# {node.} variables
fmt_proc = fmt_proc.format(node=node)
# replace magics
fmt_proc = re.sub('\$stack2354\$(([a-zA-Z])*)\$stack2354\$', '{stack.\g<1>}', fmt_proc)
fmt_proc = re.sub('\$vss2354\$(([a-zA-Z])*)\$vss2354\$', '{vss.\g<1>}', fmt_proc)
return fmt_proc
|
class natlas_output_diagram:
def __init__(self, network):
pass
def generate(self, dot_file, title):
pass
def __generate(self, diagram, node):
pass
def __get_node(self, diagram, node):
pass
def __create_link(self, diagram, node, link, draw_as_lag):
pass
def __does_lag_span_devs(self, lag_name, links):
pass
def __eval_if_block(self, if_cond, node):
pass
def __get_node_text(self, diagram, node, fmt):
'''
Generate the node text given the format string 'fmt'
'''
pass
| 9 | 1 | 55 | 6 | 44 | 4 | 11 | 0.09 | 0 | 6 | 3 | 0 | 8 | 2 | 8 | 8 | 451 | 64 | 354 | 64 | 345 | 33 | 246 | 64 | 237 | 30 | 0 | 5 | 85 |
147,734 |
MJL85/natlas
|
MJL85_natlas/natlas/output_diagram.py
|
natlas.output_diagram.natlas_diagram_dot_node
|
class natlas_diagram_dot_node:
def __init__(self):
self.ntype = 'single'
self.shape = 'ellipse'
self.style = 'solid'
self.peripheries = 1
self.label = ''
self.vss_label = ''
|
class natlas_diagram_dot_node:
def __init__(self):
pass
| 2 | 0 | 7 | 0 | 7 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 6 | 1 | 1 | 8 | 0 | 8 | 8 | 6 | 0 | 8 | 8 | 6 | 1 | 0 | 0 | 1 |
147,735 |
MJL85/natlas
|
MJL85_natlas/natlas/output.py
|
natlas.output.natlas_output
|
class natlas_output:
def __init__(self):
self.type = 'base'
def generate(self):
raise Exception('natlas_output.generate() called direct')
|
class natlas_output:
def __init__(self):
pass
def generate(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 2 | 1 | 2 | 2 | 7 | 2 | 5 | 4 | 2 | 0 | 5 | 4 | 2 | 1 | 0 | 0 | 2 |
147,736 |
MJL85/natlas
|
MJL85_natlas/natlas/node_vss.py
|
natlas.node_vss.natlas_node_vss_member
|
class natlas_node_vss_member:
def __init__(self):
self.opts = None
self.ios = None
self.serial = None
self.plat = None
def __str__(self):
return ('<serial=%s,plat=%s>' % (self.serial, self.plat))
def __repr__(self):
return self.__str__()
|
class natlas_node_vss_member:
def __init__(self):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 4 | 3 | 3 | 11 | 1 | 10 | 8 | 6 | 0 | 10 | 8 | 6 | 1 | 0 | 0 | 3 |
147,737 |
MJL85/natlas
|
MJL85_natlas/natlas/mac.py
|
natlas.mac.natlas_mac
|
class natlas_mac:
class mac_object:
def __init__(self, _host, _ip, _vlan, _mac, _port):
self.node_host = _host
self.node_ip = _ip
self.vlan = int(_vlan)
self.mac = _mac
self.port = _port
def __str__(self):
return ('<node_host="%s", node_ip="%s", vlan="%s", mac="%s", port="%s">'
% (self.node_host, self.node_ip, self.vlan, self.mac, self.port))
def __repr__(self):
return self.__str__()
def __init__(self, conf):
self.config = conf
def __str__(self):
return ('<macs=%i>' % len(self.macs))
def __repr__(self):
return self.__str__()
def get_macs(self, ip, display_progress):
'''
Return array of MAC addresses from single node at IP
'''
if (ip == '0.0.0.0'):
return None
ret_macs = []
snmpobj = natlas_snmp(ip)
# find valid credentials for this node
if (snmpobj.get_cred(self.config.snmp_creds) == 0):
return None
system_name = util.shorten_host_name(snmpobj.get_val(OID_SYSNAME), self.config.host_domains)
# cache some common MIB trees
vlan_vbtbl = snmpobj.get_bulk(OID_VLANS)
ifname_vbtbl = snmpobj.get_bulk(OID_IFNAME)
for vlan_row in vlan_vbtbl:
for vlan_n, vlan_v in vlan_row:
# get VLAN ID from OID
vlan = natlas_snmp.get_last_oid_token(vlan_n)
if (vlan >= 1002):
continue
vmacs = self.get_macs_for_vlan(ip, vlan, display_progress, snmpobj, system_name, ifname_vbtbl)
if (vmacs != None):
ret_macs.extend(vmacs)
if (display_progress == 1):
print('')
return ret_macs
def get_macs_for_vlan(self, ip, vlan, display_progress=0, snmpobj=None, system_name=None, ifname_vbtbl=None):
'''
Return array of MAC addresses for a single VLAN from a single node at an IP
'''
ret_macs = []
if (snmpobj == None):
snmpobj = natlas_snmp(ip)
if (snmpobj.get_cred(self.config.snmp_creds) == 0):
return None
if (ifname_vbtbl == None):
ifname_vbtbl = snmpobj.get_bulk(OID_IFNAME)
if (system_name == None):
system_name = util.shorten_host_name(snmpobj.get_val(OID_SYSNAME), self.config.host_domains)
# change our SNMP credentials
old_cred = snmpobj.v2_community
snmpobj.v2_community = old_cred + '@' + str(vlan)
if (display_progress == 1):
sys.stdout.write(str(vlan)) # found VLAN
sys.stdout.flush()
# get CAM table for this VLAN
cam_vbtbl = snmpobj.get_bulk(OID_VLAN_CAM)
portnum_vbtbl = snmpobj.get_bulk(OID_BRIDGE_PORTNUMS)
ifindex_vbtbl = snmpobj.get_bulk(OID_IFINDEX)
cam_match = None
if (cam_vbtbl == None):
# error getting CAM for VLAN
return None
for cam_row in cam_vbtbl:
for cam_n, cam_v in cam_row:
cam_entry = natlas_mac.mac_format_ascii(cam_v, 0)
# find the interface index
p = cam_n.getOid()
portnum_oid = '%s.%i.%i.%i.%i.%i.%i' % (OID_BRIDGE_PORTNUMS, p[11], p[12], p[13], p[14], p[15], p[16])
bridge_portnum = snmpobj.cache_lookup(portnum_vbtbl, portnum_oid)
# get the interface index and description
try:
ifidx = snmpobj.cache_lookup(ifindex_vbtbl, OID_IFINDEX + '.' + bridge_portnum)
port = snmpobj.cache_lookup(ifname_vbtbl, OID_IFNAME + '.' + ifidx)
except TypeError:
port = 'None'
mac_addr = natlas_mac.mac_format_ascii(cam_v, 1)
if (display_progress == 1):
sys.stdout.write('.') # found CAM entry
sys.stdout.flush()
entry = natlas_mac.mac_object(system_name, ip, vlan, mac_addr, port)
ret_macs.append(entry)
# restore SNMP credentials
snmpobj.v2_community = old_cred
return ret_macs
#
# Parse an ASCII MAC address string to a hex string.
#
def mac_ascii_to_hex(mac_str):
mac_str = re.sub('[\.:]', '', mac_str)
if (len(mac_str) != 12):
return None
mac_hex = ''
for i in range(0, len(mac_str), 2):
mac_hex += chr(int(mac_str[i:i+2], 16))
return mac_hex
def mac_format_ascii(mac_hex, inc_dots):
v = mac_hex.prettyPrint()
return natlas_mac.mac_hex_to_ascii(v, inc_dots)
def mac_hex_to_ascii(mac_hex, inc_dots):
'''
Format a hex MAC string to ASCII
Args:
mac_hex: Value from SNMP
inc_dots: 1 to format as aabb.ccdd.eeff, 0 to format aabbccddeeff
Returns:
String representation of the mac_hex
'''
v = mac_hex[2:]
ret = ''
for i in range(0, len(v), 4):
ret += v[i:i+4]
if ((inc_dots) & ((i+4) < len(v))):
ret += '.'
return ret
|
class natlas_mac:
class mac_object:
def __init__(self, _host, _ip, _vlan, _mac, _port):
pass
def __str__(self):
pass
def __repr__(self):
pass
def __init__(self, _host, _ip, _vlan, _mac, _port):
pass
def __str__(self):
pass
def __repr__(self):
pass
def get_macs(self, ip, display_progress):
'''
Return array of MAC addresses from single node at IP
'''
pass
def get_macs_for_vlan(self, ip, vlan, display_progress=0, snmpobj=None, system_name=None, ifname_vbtbl=None):
'''
Return array of MAC addresses for a single VLAN from a single node at an IP
'''
pass
def mac_ascii_to_hex(mac_str):
pass
def mac_format_ascii(mac_hex, inc_dots):
pass
def mac_hex_to_ascii(mac_hex, inc_dots):
'''
Format a hex MAC string to ASCII
Args:
mac_hex: Value from SNMP
inc_dots: 1 to format as aabb.ccdd.eeff, 0 to format aabbccddeeff
Returns:
String representation of the mac_hex
'''
pass
| 13 | 3 | 13 | 2 | 9 | 2 | 3 | 0.28 | 0 | 7 | 3 | 0 | 8 | 1 | 8 | 8 | 161 | 36 | 99 | 50 | 86 | 28 | 98 | 50 | 85 | 11 | 0 | 3 | 32 |
147,738 |
MJL85/natlas
|
MJL85_natlas/natlas/node_vss.py
|
natlas.node_vss.natlas_node_vss
|
class natlas_node_vss:
def __init__(self, snmpobj = None, opts = None):
self.members = [ natlas_node_vss_member(), natlas_node_vss_member() ]
self.enabled = 0
self.domain = None
self.opts = opts
if (snmpobj != None):
self.get_members(snmpobj)
def __str__(self):
return ('<enabled=%s,domain=%s,members=%s>' % (self.enabled, self.domain, self.members))
def __repr__(self):
return self.__str__()
def get_members(self, snmpobj):
# check if VSS is enabled
self.enabled = 1 if (snmpobj.get_val(OID_VSS_MODE) == '2') else 0
if (self.enabled == 0):
return
if (self.opts == None):
return
self.domain = snmpobj.get_val(OID_VSS_DOMAIN)
if (self.opts.get_vss_details == 0):
return
# pull some VSS-related info
module_vbtbl = snmpobj.get_bulk(OID_VSS_MODULES)
if (self.opts.get_ios): ios_vbtbl = snmpobj.get_bulk(OID_ENTPHYENTRY_SOFTWARE)
if (self.opts.get_serial): serial_vbtbl = snmpobj.get_bulk(OID_ENTPHYENTRY_SERIAL)
if (self.opts.get_plat): plat_vbtbl = snmpobj.get_bulk(OID_ENTPHYENTRY_PLAT)
chassis = 0
# enumerate VSS modules and find chassis info
for row in module_vbtbl:
for n,v in row:
if (v == 1):
modidx = str(n).split('.')[14]
# we want only chassis - line card module have no software
ios = snmpobj.cache_lookup(ios_vbtbl, OID_ENTPHYENTRY_SOFTWARE + '.' + modidx)
if (ios != ''):
if (self.opts.get_ios): self.members[chassis].ios = ios
if (self.opts.get_plat): self.members[chassis].plat = snmpobj.cache_lookup(plat_vbtbl, OID_ENTPHYENTRY_PLAT + '.' + modidx)
if (self.opts.get_serial): self.members[chassis].serial = snmpobj.cache_lookup(serial_vbtbl, OID_ENTPHYENTRY_SERIAL + '.' + modidx)
chassis += 1
if (chassis > 1):
return
|
class natlas_node_vss:
def __init__(self, snmpobj = None, opts = None):
pass
def __str__(self):
pass
def __repr__(self):
pass
def get_members(self, snmpobj):
pass
| 5 | 0 | 13 | 3 | 9 | 1 | 5 | 0.11 | 0 | 2 | 1 | 0 | 4 | 4 | 4 | 4 | 54 | 12 | 38 | 18 | 33 | 4 | 44 | 18 | 39 | 16 | 0 | 5 | 20 |
147,739 |
MJL85/natlas
|
MJL85_natlas/natlas/output_catalog.py
|
natlas.output_catalog.natlas_output_catalog
|
class natlas_output_catalog:
def __init__(self, network):
natlas_output.__init__(self)
self.network = network
self.config = network.config
def generate(self, filename):
try:
f = open(filename, 'w')
except:
print('Unable to open catalog file "%s"' % filename)
return
for n in self.network.nodes:
# get info that we may not have yet
n.opts.get_serial = True
n.opts.get_plat = True
n.opts.get_bootf = True
n.query_node()
if (n.stack.count > 0):
# stackwise
for smem in n.stack.members:
serial = smem.serial or 'NOT CONFIGURED TO POLL'
plat = smem.plat or 'NOT CONFIGURED TO POLL'
f.write('"%s","%s","%s","%s","%s","STACK","%s"\n' % (n.name, n.ip[0], plat, n.ios, serial, n.bootfile))
elif (n.vss.enabled != 0):
#vss
for i in range(0, 2):
serial = n.vss.members[i].serial
plat = n.vss.members[i].plat
ios = n.vss.members[i].ios
f.write('"%s","%s","%s","%s","%s","VSS","%s"\n' % (n.name, n.ip[0], plat, ios, serial, n.bootfile))
else:
# stand alone
f.write('"%s","%s","%s","%s","%s","","%s"\n' % (n.name, n.ip[0], n.plat, n.ios, n.serial, n.bootfile))
f.close()
|
class natlas_output_catalog:
def __init__(self, network):
pass
def generate(self, filename):
pass
| 3 | 0 | 18 | 2 | 15 | 2 | 4 | 0.13 | 0 | 2 | 1 | 0 | 2 | 2 | 2 | 2 | 39 | 5 | 30 | 12 | 27 | 4 | 28 | 12 | 25 | 7 | 0 | 3 | 8 |
147,740 |
MJL85/natlas
|
MJL85_natlas/natlas/node_stack.py
|
natlas.node_stack.natlas_node_stack
|
class natlas_node_stack:
def __init__(self, snmpobj = None, opts = None):
self.members = []
self.count = 0
self.enabled = 0
self.opts = opts
if (snmpobj != None):
self.get_members(snmpobj)
def __str__(self):
return ('<enabled=%s,count=%s,members=%s>' % (self.enabled, self.count, self.members))
def __repr__(self):
return self.__str__()
def get_members(self, snmpobj):
if (self.opts == None):
return
vbtbl = snmpobj.get_bulk(OID_STACK)
if (vbtbl == None):
return None
if (self.opts.get_stack_details == 0):
self.count = 0
for row in vbtbl:
for n, v in row:
n = str(n)
if (n.startswith(OID_STACK_NUM + '.')):
self.count += 1
if (self.count == 1):
self.count = 0
return
if (self.opts.get_serial): serial_vbtbl = snmpobj.get_bulk(OID_ENTPHYENTRY_SERIAL)
if (self.opts.get_plat): platf_vbtbl = snmpobj.get_bulk(OID_ENTPHYENTRY_PLAT)
for row in vbtbl:
for n, v in row:
n = str(n)
if (n.startswith(OID_STACK_NUM + '.')):
# Get info on this stack member and add to the list
m = natlas_node_stack_member()
t = n.split('.')
idx = t[14]
m.num = v
m.role = snmpobj.cache_lookup(vbtbl, OID_STACK_ROLE + '.' + idx)
m.pri = snmpobj.cache_lookup(vbtbl, OID_STACK_PRI + '.' + idx)
m.mac = snmpobj.cache_lookup(vbtbl, OID_STACK_MAC + '.' + idx)
m.img = snmpobj.cache_lookup(vbtbl, OID_STACK_IMG + '.' + idx)
if (self.opts.get_serial): m.serial = snmpobj.cache_lookup(serial_vbtbl, OID_ENTPHYENTRY_SERIAL + '.' + idx)
if (self.opts.get_plat): m.plat = snmpobj.cache_lookup(platf_vbtbl, OID_ENTPHYENTRY_PLAT + '.' + idx)
if (m.role == '1'):
m.role = 'master'
elif (m.role == '2'):
m.role = 'member'
elif (m.role == '3'):
m.role = 'notMember'
elif (m.role == '4'):
m.role = 'standby'
mac_seg = [m.mac[x:x+4] for x in range(2, len(m.mac), 4)]
m.mac = '.'.join(mac_seg)
self.members.append(m)
self.count = len(self.members)
if (self.count == 1):
self.count = 0
if (self.count > 0):
self.enabled = 1
|
class natlas_node_stack:
def __init__(self, snmpobj = None, opts = None):
pass
def __str__(self):
pass
def __repr__(self):
pass
def get_members(self, snmpobj):
pass
| 5 | 0 | 18 | 3 | 15 | 0 | 6 | 0.02 | 0 | 3 | 1 | 0 | 4 | 4 | 4 | 4 | 77 | 16 | 60 | 18 | 55 | 1 | 61 | 18 | 56 | 21 | 0 | 4 | 25 |
147,741 |
MJL85/natlas
|
MJL85_natlas/natlas/node.py
|
natlas.node.natlas_vlan
|
class natlas_vlan:
def __init__(self, vid, name):
self.id = vid
self.name = name
def __str__(self):
return ('<vid=%s,name="%s">' % (self.id, self.name))
def __repr__(self):
return self.__str__()
|
class natlas_vlan:
def __init__(self, vid, name):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 2 | 3 | 3 | 8 | 0 | 8 | 6 | 4 | 0 | 8 | 6 | 4 | 1 | 0 | 0 | 3 |
147,742 |
MJL85/natlas
|
MJL85_natlas/natlas/node.py
|
natlas.node.natlas_node_svi
|
class natlas_node_svi:
def __init__(self, vlan):
self.vlan = vlan
self.ip = []
def __str__(self):
return ('VLAN = %s\nIP = %s' % (self.vlan, self.ip))
def __repr__(self):
return ('<vlan=%s,ip="%s">' % (self.vlan, self.ip))
|
class natlas_node_svi:
def __init__(self, vlan):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 2 | 3 | 3 | 8 | 0 | 8 | 6 | 4 | 0 | 8 | 6 | 4 | 1 | 0 | 0 | 3 |
147,743 |
MJL85/natlas
|
MJL85_natlas/natlas/node.py
|
natlas.node.natlas_node_link
|
class natlas_node_link:
'''
Generic link to another node.
CDP and LLDP neighbors are discovered
and returned as natlas_node_link objects.
'''
def __init__(self):
# the linked node
self.node = None
# details about the link
self.link_type = None
self.remote_ip = None
self.remote_name = None
self.vlan = None
self.local_native_vlan = None
self.local_allowed_vlans = None
self.remote_native_vlan = None
self.remote_allowed_vlans = None
self.local_port = None
self.remote_port = None
self.local_lag = None
self.remote_lag = None
self.local_lag_ips = None
self.remote_lag_ips = None
self.local_if_ip = None
self.remote_if_ip = None
self.remote_platform = None
self.remote_ios = None
self.remote_mac = None
self.discovered_proto = None
def __str__(self):
return (
'link_type = %s\n' \
'remote_ip = %s\n' \
'remote_name = %s\n' \
'vlan = %s\n' \
'local_native_vlan = %s\n' \
'local_allowed_vlans = %s\n' \
'remote_native_vlan = %s\n' \
'remote_allowed_vlans = %s\n' \
'local_port = %s\n' \
'remote_port = %s\n' \
'local_lag = %s\n' \
'remote_lag = %s\n' \
'local_lag_ips = %s\n' \
'remote_lag_ips = %s\n' \
'local_if_ip = %s\n' \
'remote_if_ip = %s\n' \
'remote_platform = %s\n' \
'remote_ios = %s\n' \
'remote_mac = %s\n' \
'discovered_proto = %s\n' \
% (self.link_type, self.remote_ip, self.remote_name, self.vlan, self.local_native_vlan,
self.local_allowed_vlans, self.remote_native_vlan, self.remote_allowed_vlans,
self.local_port, self.remote_port, self.local_lag, self.remote_lag, self.local_lag_ips,
self.remote_lag_ips, self.local_if_ip, self.remote_if_ip, self.remote_platform, self.remote_ios,
self.remote_mac, self.discovered_proto))
def __repr__(self):
return ('<local_port="%s",remote_name="%s",remote_port="%s">' % (self.local_port, self.remote_name, self.remote_port))
|
class natlas_node_link:
'''
Generic link to another node.
CDP and LLDP neighbors are discovered
and returned as natlas_node_link objects.
'''
def __init__(self):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 4 | 1 | 18 | 0 | 17 | 1 | 1 | 0.13 | 0 | 0 | 0 | 0 | 3 | 21 | 3 | 3 | 62 | 3 | 52 | 25 | 48 | 7 | 27 | 25 | 23 | 1 | 0 | 0 | 3 |
147,744 |
MJL85/natlas
|
MJL85_natlas/natlas/node.py
|
natlas.node.natlas_node
|
class natlas_node:
class _node_opts:
def __init__(self):
self.reset()
def reset(self, setting=False):
self.get_name = setting
self.get_ip = setting
self.get_plat = setting
self.get_ios = setting
self.get_router = setting
self.get_ospf_id = setting
self.get_bgp_las = setting
self.get_hsrp_pri = setting
self.get_hsrp_vip = setting
self.get_serial = setting
self.get_stack = setting
self.get_stack_details = setting
self.get_vss = setting
self.get_vss_details = setting
self.get_svi = setting
self.get_lo = setting
self.get_bootf = setting
self.get_chassis_info = setting
self.get_vpc = setting
def __init__(self, ip=None):
self.opts = natlas_node._node_opts()
self.snmpobj = natlas_snmp()
self.links = []
self.discovered = 0
self.name = None
self.ip = [ip]
self.plat = None
self.ios = None
self.router = None
self.ospf_id = None
self.bgp_las = None
self.hsrp_pri = None
self.hsrp_vip = None
self.serial = None
self.bootfile = None
self.svis = []
self.loopbacks = []
self.vpc_peerlink_if = None
self.vpc_peerlink_node = None
self.vpc_domain = None
self.stack = natlas_node_stack()
self.vss = natlas_node_vss()
self.cdp_vbtbl = None
self.ldp_vbtbl = None
self.link_type_vbtbl = None
self.lag_vbtbl = None
self.vlan_vbtbl = None
self.ifname_vbtbl = None
self.ifip_vbtbl = None
self.svi_vbtbl = None
self.ethif_vbtbl = None
self.trk_allowed_vbtbl = None
self.trk_native_vbtbl = None
self.vpc_vbtbl = None
self.vlan_vbtbl = None
self.vlandesc_vbtbl = None
self.arp_vbtbl = None
def __str__(self):
return (
'Name = %s\n' \
'IP = %s\n' \
'Platform = %s\n' \
'IOS = %s\n' \
'Router = %s\n' \
'OSPF_ID = %s\n' \
'BGP_LAS = %s\n' \
'HSRP_PRI = %s\n' \
'HSRP_VIP = %s\n' \
'Serials = %s\n' \
'Bootfile = %s\n' \
'SVIs = %s\n' \
'Loopbacks = %s\n' \
'VPC_Peerlink_If = %s\n' \
'VPC_Peerlink_Node = %s\n' \
'VPC_Domain = %s\n' \
'Stack = %s\n' \
'VSS = %s\n'
'Links = %s\n'
% (self.name, self.ip, self.plat, self.ios, self.router,
self.ospf_id, self.bgp_las, self.hsrp_pri, self.hsrp_vip,
self.serial, self.bootfile, self.svis, self.loopbacks,
self.vpc_peerlink_if, self.vpc_peerlink_node, self.vpc_domain,
self.stack, self.vss, self.links)
)
def __repr__(self):
return ('<name=%s, ip=%s, plat=%s, ios=%s, serial=%s, router=%s, vss=%s, stack=%s>' %
(self.name, self.ip, self.plat, self.ios, self.serial, self.router, self.vss, self.stack))
def add_link(self, link):
self.links.append(link)
# find valid credentials for this node.
# try each known IP until one works
def try_snmp_creds(self, snmp_creds):
if (self.snmpobj.success == 0):
for ipaddr in self.ip:
if ((ipaddr == '0.0.0.0') | (ipaddr == 'UNKNOWN') | (ipaddr == '')):
continue
self.snmpobj._ip = ipaddr
if (self.snmpobj.get_cred(snmp_creds) == 1):
return 1
return 0
# Query this node.
# Set .opts and .snmp_creds before calling.
def query_node(self):
if (self.snmpobj.ver == 0):
# call try_snmp_creds() first or it failed to find good creds
return 0
snmpobj = self.snmpobj
if (self.opts.get_name == True):
self.name = self.get_system_name([])
# router
if (self.opts.get_router == True):
if (self.router == None):
self.router = 1 if (snmpobj.get_val(OID_IP_ROUTING) == '1') else 0
if (self.router == 1):
# OSPF
if (self.opts.get_ospf_id == True):
self.ospf_id = snmpobj.get_val(OID_OSPF)
if (self.ospf_id != None):
self.ospf_id = snmpobj.get_val(OID_OSPF_ID)
# BGP
if (self.opts.get_bgp_las == True):
self.bgp_las = snmpobj.get_val(OID_BGP_LAS)
if (self.bgp_las == '0'): # 4500x is reporting 0 with disabled
self.bgp_las = None
# HSRP
if (self.opts.get_hsrp_pri == True):
self.hsrp_pri = snmpobj.get_val(OID_HSRP_PRI)
if (self.hsrp_pri != None):
self.hsrp_vip = snmpobj.get_val(OID_HSRP_VIP)
# stack
if (self.opts.get_stack):
self.stack = natlas_node_stack(snmpobj, self.opts)
# vss
if (self.opts.get_vss):
self.vss = natlas_node_vss(snmpobj, self.opts)
# serial
if ((self.opts.get_serial == 1) & (self.stack.count == 0) & (self.vss.enabled == 0)):
self.serial = snmpobj.get_val(OID_SYS_SERIAL)
# SVI
if (self.opts.get_svi == True):
if (self.svi_vbtbl == None):
self.svi_vbtbl = snmpobj.get_bulk(OID_SVI_VLANIF)
if (self.ifip_vbtbl == None):
self.ifip_vbtbl = snmpobj.get_bulk(OID_IF_IP)
for row in self.svi_vbtbl:
for n, v in row:
n = str(n)
vlan = n.split('.')[14]
svi = natlas_node_svi(vlan)
svi_ips = self.__get_cidrs_from_ifidx(v)
svi.ip.extend(svi_ips)
self.svis.append(svi)
# loopback
if (self.opts.get_lo == True):
self.ethif_vbtbl = snmpobj.get_bulk(OID_ETH_IF)
if (self.ifip_vbtbl == None):
self.ifip_vbtbl = snmpobj.get_bulk(OID_IF_IP)
for row in self.ethif_vbtbl:
for n, v in row:
n = str(n)
if (n.startswith(OID_ETH_IF_TYPE) & (v == 24)):
ifidx = n.split('.')[10]
lo_name = snmpobj.cache_lookup(self.ethif_vbtbl, OID_ETH_IF_DESC + '.' + ifidx)
lo_ips = self.__get_cidrs_from_ifidx(ifidx)
lo = natlas_node_lo(lo_name, lo_ips)
self.loopbacks.append(lo)
# bootfile
if (self.opts.get_bootf):
self.bootfile = snmpobj.get_val(OID_SYS_BOOT)
# chassis info (serial, IOS, platform)
if (self.opts.get_chassis_info):
self.__get_chassis_info()
# VPC peerlink
if (self.opts.get_vpc):
self.vpc_domain, self.vpc_peerlink_if = self.__get_vpc_info(self.ethif_vbtbl)
# reset the get options
self.opts.reset()
return 1
def __get_cidrs_from_ifidx(self, ifidx):
ips = []
for ifrow in self.ifip_vbtbl:
for ifn, ifv in ifrow:
ifn = str(ifn)
if (ifn.startswith(OID_IF_IP_ADDR)):
if (str(ifv) == str(ifidx)):
t = ifn.split('.')
ip = ".".join(t[10:])
mask = self.snmpobj.cache_lookup(self.ifip_vbtbl, OID_IF_IP_NETM + ip)
nbits = util.get_net_bits_from_mask(mask)
cidr = '%s/%i' % (ip, nbits)
ips.append(cidr)
return ips
def __cache_common_mibs(self):
if (self.link_type_vbtbl == None):
self.link_type_vbtbl = self.snmpobj.get_bulk(OID_TRUNK_VTP)
if (self.lag_vbtbl == None):
self.lag_vbtbl = self.snmpobj.get_bulk(OID_LAG_LACP)
if (self.vlan_vbtbl == None):
self.vlan_vbtbl = self.snmpobj.get_bulk(OID_IF_VLAN)
if (self.ifname_vbtbl == None):
self.ifname_vbtbl = self.snmpobj.get_bulk(OID_IFNAME)
if (self.trk_allowed_vbtbl == None):
self.trk_allowed_vbtbl = self.snmpobj.get_bulk(OID_TRUNK_ALLOW)
if (self.trk_native_vbtbl == None):
self.trk_native_vbtbl = self.snmpobj.get_bulk(OID_TRUNK_NATIVE)
if (self.ifip_vbtbl == None):
self.ifip_vbtbl = self.snmpobj.get_bulk(OID_IF_IP)
#
# Get a list of CDP neighbors.
# Returns a list of natlas_node_link's.
# Will always return an array.
#
def get_cdp_neighbors(self):
neighbors = []
snmpobj = self.snmpobj
# get list of CDP neighbors
self.cdp_vbtbl = snmpobj.get_bulk(OID_CDP)
if (self.cdp_vbtbl == None):
print('No CDP Neighbors Found.')
return []
# cache some common MIB trees
self.__cache_common_mibs()
for row in self.cdp_vbtbl:
for name, val in row:
name = str(name)
# process only if this row is a CDP_DEVID
if (name.startswith(OID_CDP_DEVID) == 0):
continue
t = name.split('.')
ifidx = t[14]
ifidx2 = t[15]
# get remote IP
rip = snmpobj.cache_lookup(self.cdp_vbtbl, OID_CDP_IPADDR + '.' + ifidx + '.' + ifidx2)
rip = util.convert_ip_int_str(rip)
# get local port
lport = self.__get_ifname(ifidx)
# get remote port
rport = snmpobj.cache_lookup(self.cdp_vbtbl, OID_CDP_DEVPORT + '.' + ifidx + '.' + ifidx2)
rport = self.shorten_port_name(rport)
# get remote platform
rplat = snmpobj.cache_lookup(self.cdp_vbtbl, OID_CDP_DEVPLAT + '.' + ifidx + '.' + ifidx2)
# get IOS version
rios = snmpobj.cache_lookup(self.cdp_vbtbl, OID_CDP_IOS + '.' + ifidx + '.' + ifidx2)
if (rios != None):
try:
rios = binascii.unhexlify(rios[2:])
except:
pass
rios = self.__format_ios_ver(rios)
link = self.__get_node_link_info(ifidx, ifidx2)
link.remote_name = val.prettyPrint()
link.remote_ip = rip
link.discovered_proto = 'cdp'
link.local_port = lport
link.remote_port = rport
link.remote_plat = rplat
link.remote_ios = rios
neighbors.append(link)
return neighbors
#
# Get a list of LLDP neighbors.
# Returns a list of natlas_node_link's
# Will always return an array.
#
def get_lldp_neighbors(self):
neighbors = []
snmpobj = self.snmpobj
self.lldp_vbtbl = snmpobj.get_bulk(OID_LLDP)
if (self.lldp_vbtbl == None):
print('No LLDP Neighbors Found.')
return []
self.__cache_common_mibs()
for row in self.lldp_vbtbl:
for name, val in row:
name = str(name)
if (name.startswith(OID_LLDP_TYPE) == 0):
continue
t = name.split('.')
ifidx = t[12]
ifidx2 = t[13]
rip = ''
for r in self.lldp_vbtbl:
for n, v in r:
n = str(n)
if (n.startswith(OID_LLDP_DEVADDR + '.' + ifidx + '.' + ifidx2)):
t2 = n.split('.')
rip = '.'.join(t2[16:])
lport = self.__get_ifname(ifidx)
rport = snmpobj.cache_lookup(self.lldp_vbtbl, OID_LLDP_DEVPORT + '.' + ifidx + '.' + ifidx2)
rport = self.shorten_port_name(rport)
devid = snmpobj.cache_lookup(self.lldp_vbtbl, OID_LLDP_DEVID + '.' + ifidx + '.' + ifidx2)
try:
mac_seg = [devid[x:x+4] for x in xrange(2, len(devid), 4)]
devid = '.'.join(mac_seg)
except:
pass
rimg = snmpobj.cache_lookup(self.lldp_vbtbl, OID_LLDP_DEVDESC + '.' + ifidx + '.' + ifidx2)
if (rimg != None):
try:
rimg = binascii.unhexlify(rimg[2:])
except:
pass
rimg = self.__format_ios_ver(rimg)
name = snmpobj.cache_lookup(self.lldp_vbtbl, OID_LLDP_DEVNAME + '.' + ifidx + '.' + ifidx2)
if ((name == None) | (name == '')):
name = devid
link = self.__get_node_link_info(ifidx, ifidx2)
link.remote_ip = rip
link.remote_name = name
link.discovered_proto = 'lldp'
link.local_port = lport
link.remote_port = rport
link.remote_plat = None
link.remote_ios = rimg
link.remote_mac = devid
neighbors.append(link)
return neighbors
def __get_node_link_info(self, ifidx, ifidx2):
snmpobj = self.snmpobj
# get link type (trunk ?)
link_type = snmpobj.cache_lookup(self.link_type_vbtbl, OID_TRUNK_VTP + '.' + ifidx)
native_vlan = None
allowed_vlans = 'All'
if (link_type == '1'):
native_vlan = snmpobj.cache_lookup(self.trk_native_vbtbl, OID_TRUNK_NATIVE + '.' + ifidx)
allowed_vlans = snmpobj.cache_lookup(self.trk_allowed_vbtbl, OID_TRUNK_ALLOW + '.' + ifidx)
allowed_vlans = self.__parse_allowed_vlans(allowed_vlans)
# get LAG membership
lag = snmpobj.cache_lookup(self.lag_vbtbl, OID_LAG_LACP + '.' + ifidx)
lag_ifname = self.__get_ifname(lag)
lag_ips = self.__get_cidrs_from_ifidx(lag)
# get VLAN info
vlan = snmpobj.cache_lookup(self.vlan_vbtbl, OID_IF_VLAN + '.' + ifidx)
# get IP address
lifips = self.__get_cidrs_from_ifidx(ifidx)
link = natlas_node_link()
link.link_type = link_type
link.vlan = vlan
link.local_native_vlan = native_vlan
link.local_allowed_vlans = allowed_vlans
link.local_lag = lag_ifname
link.local_lag_ips = lag_ips
link.remote_lag_ips = []
link.local_if_ip = lifips[0] if len(lifips) else None
return link
def __parse_allowed_vlans(self, allowed_vlans):
if (allowed_vlans.startswith('0x') == False):
return 'All'
ret = ''
group = 0
op = 0
for i in range(2, len(allowed_vlans)):
v = int(allowed_vlans[i], 16)
for b in range(0, 4):
a = v & (0x1 << (3 - b))
vlan = ((i-2)*4)+b
if (a):
if (op == 1):
group += 1
else:
if (len(ret)):
if (group > 1):
ret += '-'
ret += str(vlan - 1) if vlan else '1'
else:
ret += ',%i' % vlan
else:
ret += str(vlan)
group = 0
op = 1
else:
if (op == 1):
if (len(ret)):
if (group > 1):
ret += '-%i' % (vlan - 1)
op = 0
group = 0
if (op):
if (ret == '1'):
return 'All'
if (group):
ret += '-1001'
else:
ret += ',1001'
return ret if len(ret) else 'All'
def __get_chassis_info(self):
# Get:
# Serial number
# Platform
# IOS
# Slow but reliable method by using SNMP directly.
# Usually we will get this via CDP.
snmpobj = self.snmpobj
if ((self.stack.count > 0) | (self.vss.enabled == 1)):
# Use opts.get_stack_details
# or opts.get_vss_details
# for this.
return
class_vbtbl = snmpobj.get_bulk(OID_ENTPHYENTRY_CLASS)
if (self.opts.get_serial): serial_vbtbl = snmpobj.get_bulk(OID_ENTPHYENTRY_SERIAL)
if (self.opts.get_plat): platf_vbtbl = snmpobj.get_bulk(OID_ENTPHYENTRY_PLAT)
if (self.opts.get_ios): ios_vbtbl = snmpobj.get_bulk(OID_ENTPHYENTRY_SOFTWARE)
if (class_vbtbl == None):
return
for row in class_vbtbl:
for n, v in row:
n = str(n)
if (v != ENTPHYCLASS_CHASSIS):
continue
t = n.split('.')
idx = t[12]
if (self.opts.get_serial): self.serial = snmpobj.cache_lookup(serial_vbtbl, OID_ENTPHYENTRY_SERIAL + '.' + idx)
if (self.opts.get_plat): self.plat = snmpobj.cache_lookup(platf_vbtbl, OID_ENTPHYENTRY_PLAT + '.' + idx)
if (self.opts.get_ios): self.ios = snmpobj.cache_lookup(ios_vbtbl, OID_ENTPHYENTRY_SOFTWARE + '.' + idx)
if (self.opts.get_ios):
# modular switches might have IOS on a module rather than chassis
if (self.ios == ''):
for row in class_vbtbl:
for n, v in row:
n = str(n)
if (v != ENTPHYCLASS_MODULE):
continue
t = n.split('.')
idx = t[12]
self.ios = snmpobj.cache_lookup(ios_vbtbl, OID_ENTPHYENTRY_SOFTWARE + '.' + idx)
if (self.ios != ''):
break
if (self.ios != ''):
break
self.ios = self.__format_ios_ver(self.ios)
return
#
# Lookup and format an interface name from a cache table of indexes.
#
def __get_ifname(self, ifidx):
if ((ifidx == None) | (ifidx == OID_ERR)):
return 'UNKNOWN'
if (self.ifname_vbtbl == None):
self.ifname_vbtbl = self.snmpobj.get_bulk(OID_IFNAME)
str = self.snmpobj.cache_lookup(self.ifname_vbtbl, OID_IFNAME + '.' + ifidx)
str = self.shorten_port_name(str)
return str or 'UNKNOWN'
def get_system_name(self, domains):
return util.shorten_host_name(self.snmpobj.get_val(OID_SYSNAME), domains)
#
# Normalize a reporeted software vesion string.
#
def __format_ios_ver(self, img):
x = img
if (type(img) == bytes):
x = img.decode("utf-8")
try:
img_s = re.search('(Version:? |CCM:)([^ ,$]*)', x)
except:
return img
if (img_s):
if (img_s.group(1) == 'CCM:'):
return 'CCM %s' % img_s.group(2)
return img_s.group(2)
return img
def get_ipaddr(self):
'''
Return the best IP address for this device.
Returns the first matching IP:
- Lowest Loopback interface
- Lowest SVI address/known IP
'''
# Loopbacks - first interface
if (len(self.loopbacks)):
ips = self.loopbacks[0].ips
if (len(ips)):
ips.sort()
return util.strip_slash_masklen(ips[0])
# SVIs + all known - lowest address
ips = []
for svi in self.svis:
ips.extend(svi.ip)
ips.extend(self.ip)
if (len(ips)):
ips.sort()
return util.strip_slash_masklen(ips[0])
return ''
def __get_vpc_info(self, ifarr):
'''
If VPC is enabled,
Return the VPC domain and interface name of the VPC peerlink.
'''
if (self.vpc_vbtbl == None):
self.vpc_vbtbl = self.snmpobj.get_bulk(OID_VPC_PEERLINK_IF)
if ((self.vpc_vbtbl == None) | (len(self.vpc_vbtbl) == 0)):
return (None, None)
domain = natlas_snmp.get_last_oid_token(self.vpc_vbtbl[0][0][0])
ifidx = str(self.vpc_vbtbl[0][0][1])
ifname = self.snmpobj.cache_lookup(ifarr, OID_ETH_IF_DESC + '.' + ifidx)
ifname = self.shorten_port_name(ifname)
return (domain, ifname)
def get_vlans(self):
# use cache if possible
if (self.vlan_vbtbl == None):
self.vlan_vbtbl = self.snmpobj.get_bulk(OID_VLANS)
if (self.vlandesc_vbtbl == None):
self.vlandesc_vbtbl = self.snmpobj.get_bulk(OID_VLAN_DESC)
arr = []
i = 0
for vlan_row in self.vlan_vbtbl:
for vlan_n, vlan_v in vlan_row:
# get VLAN ID from OID
vlan = natlas_snmp.get_last_oid_token(vlan_n)
if (vlan >= 1002):
continue
arr.append(natlas_vlan(vlan, str(self.vlandesc_vbtbl[i][0][1])))
i = i + 1
return arr
def get_arp_table(self):
# use cache if possible
if (self.arp_vbtbl == None):
self.arp_vbtbl = self.snmpobj.get_bulk(OID_ARP)
arr = []
for r in self.arp_vbtbl:
for n, v in r:
n = str(n)
if (n.startswith(OID_ARP_VLAN)):
tok = n.split('.')
ip = '.'.join(tok[11:])
interf = self.__get_ifname(str(v))
mach = self.snmpobj.cache_lookup(self.arp_vbtbl, OID_ARP_MAC+'.'+str(v)+'.'+ip)
mac = natlas_mac.mac_hex_to_ascii(mach, 1)
atype = self.snmpobj.cache_lookup(self.arp_vbtbl, OID_ARP_TYPE+'.'+str(v)+'.'+ip)
atype = int(atype)
type_str = 'unknown'
if (atype == ARP_TYPE_OTHER): type_str = 'other'
elif (atype == ARP_TYPE_INVALID): type_str = 'invalid'
elif (atype == ARP_TYPE_DYNAMIC): type_str = 'dynamic'
elif (atype == ARP_TYPE_STATIC): type_str = 'static'
arr.append(natlas_arp(ip, mac, interf, type_str))
return arr if arr else []
def shorten_port_name(self, port):
if (port == OID_ERR):
return 'UNKNOWN'
if (port != None):
port = port.replace('TenGigabitEthernet', 'te')
port = port.replace('GigabitEthernet', 'gi')
port = port.replace('FastEthernet', 'fa')
port = port.replace('port-channel', 'po')
port = port.replace('Te', 'te')
port = port.replace('Gi', 'gi')
port = port.replace('Fa', 'fa')
port = port.replace('Po', 'po')
return port
|
class natlas_node:
class _node_opts:
def __init__(self):
pass
def reset(self, setting=False):
pass
def __init__(self):
pass
def __str__(self):
pass
def __repr__(self):
pass
def add_link(self, link):
pass
def try_snmp_creds(self, snmp_creds):
pass
def query_node(self):
pass
def __get_cidrs_from_ifidx(self, ifidx):
pass
def __cache_common_mibs(self):
pass
def get_cdp_neighbors(self):
pass
def get_lldp_neighbors(self):
pass
def __get_node_link_info(self, ifidx, ifidx2):
pass
def __parse_allowed_vlans(self, allowed_vlans):
pass
def __get_chassis_info(self):
pass
def __get_ifname(self, ifidx):
pass
def get_system_name(self, domains):
pass
def __format_ios_ver(self, img):
pass
def get_ipaddr(self):
'''
Return the best IP address for this device.
Returns the first matching IP:
- Lowest Loopback interface
- Lowest SVI address/known IP
'''
pass
def __get_vpc_info(self, ifarr):
'''
If VPC is enabled,
Return the VPC domain and interface name of the VPC peerlink.
'''
pass
def get_vlans(self):
pass
def get_arp_table(self):
pass
def shorten_port_name(self, port):
pass
| 25 | 2 | 27 | 4 | 21 | 2 | 6 | 0.15 | 0 | 16 | 11 | 0 | 21 | 37 | 21 | 21 | 675 | 121 | 483 | 180 | 458 | 72 | 459 | 180 | 434 | 29 | 0 | 6 | 146 |
147,745 |
MJL85/natlas
|
MJL85_natlas/natlas/node.py
|
natlas.node.natlas_arp
|
class natlas_arp:
def __init__(self, ip, mac, interf, arp_type):
self.ip = ip
self.mac = mac
self.interf = interf
self.arp_type = arp_type
def __str__(self):
return ('<ip="%s",mac="%s",interf="%s",arp_type="%s">' % (self.ip, self.mac, self.interf, self.arp_type))
def __repr__(self):
return self.__str__()
|
class natlas_arp:
def __init__(self, ip, mac, interf, arp_type):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 4 | 3 | 3 | 10 | 0 | 10 | 8 | 6 | 0 | 10 | 8 | 6 | 1 | 0 | 0 | 3 |
147,746 |
MJL85/natlas
|
MJL85_natlas/natlas/network.py
|
natlas.network.natlas_network
|
class natlas_network:
def __init__(self, conf):
self.root_node = None
self.nodes = []
self.max_depth = 0
self.config = conf
self.verbose = 1
def __str__(self):
return ('<root_node="%s", num_nodes=%i>' % (self.root_node.name, len(self.nodes)))
def __repr__(self):
return self.__str__()
def set_max_depth(self, depth):
self.max_depth = depth
def reset_discovered(self):
for n in self.nodes:
n.discovered = 0
def set_verbose(self, level):
'''
Set the verbose output level for discovery output.
Args:
Level 0 = no output
1 = normal output
'''
self.verbose = level
def discover(self, ip):
'''
Discover the network starting at the defined root node IP.
Recursively enumerate the network tree up to self.depth.
Populates self.nodes[] as a list of discovered nodes in the
network with self.root_node being the root.
This function will discover the network with minimal information.
It is enough to define the structure of the network but will not
include much data on each node. Call discover_details() after this
to update the self.nodes[] array with more info.
'''
if (self.verbose > 0):
print('Discovery codes:\n' \
' . depth %s connection error\n' \
' %s discovering node %s numerating adjacencies\n' \
' %s include node %s leaf node\n' %
(DCODE_ERR_SNMP_STR,
DCODE_DISCOVERED_STR, DCODE_STEP_INTO_STR,
DCODE_INCLUDE_STR, DCODE_LEAF_STR)
)
print('Discovering network...')
# Start the process of querying this node and recursing adjacencies.
node, new_node = self.__query_node(ip, 'UNKNOWN')
self.root_node = node
if (node != None):
self.nodes.append(node)
self.__print_step(node.ip[0], node.name, 0, DCODE_ROOT|DCODE_DISCOVERED)
self.__discover_node(node, 0)
else:
return
# we may have missed chassis info
for n in self.nodes:
if ((n.serial == None) | (n.plat == None) | (n.ios == None)):
n.opts.get_chassis_info = True
if (n.serial == None):
n.opts.get_serial = True
if (n.ios == None):
n.opts.get_ios = True
if (n.plat == None):
n.opts.get_plat = True
n.query_node()
def discover_details(self):
'''
Enumerate the discovered nodes from discover() and update the
nodes in the array with additional info.
'''
if (self.root_node == None):
return
if (self.verbose > 0):
print('\nCollecting node details...')
ni = 0
for n in self.nodes:
ni = ni + 1
indicator = '+'
if (n.snmpobj.success == 0):
indicator = '!'
if (self.verbose > 0):
sys.stdout.write('[%i/%i]%s %s (%s)' % (ni, len(self.nodes), indicator, n.name, n.snmpobj._ip))
sys.stdout.flush()
# set what details to discover for this node
n.opts.get_router = True
n.opts.get_ospf_id = True
n.opts.get_bgp_las = True
n.opts.get_hsrp_pri = True
n.opts.get_hsrp_vip = True
n.opts.get_serial = True
n.opts.get_stack = True
n.opts.get_stack_details = self.config.diagram.get_stack_members
n.opts.get_vss = True
n.opts.get_vss_details = self.config.diagram.get_vss_members
n.opts.get_svi = True
n.opts.get_lo = True
n.opts.get_vpc = True
n.opts.get_ios = True
n.opts.get_plat = True
start = timer()
n.query_node()
end = timer()
if (self.verbose > 0):
print(' %.2f sec' % (end - start))
# There is some back fill information we can populate now that
# we know all there is to know.
if (self.verbose > 0):
print('\nBack filling node details...')
for n in self.nodes:
# Find and link VPC nodes together for easy reference later
if ((n.vpc_domain != None) & (n.vpc_peerlink_node == None)):
for link in n.links:
if ((link.local_port == n.vpc_peerlink_if) | (link.local_lag == n.vpc_peerlink_if)):
n.vpc_peerlink_node = link.node
link.node.vpc_peerlink_node = n
break
def __print_step(self, ip, name, depth, dcodes):
if (self.verbose == 0):
return
if (dcodes & DCODE_DISCOVERED):
sys.stdout.write('%-3i' % len(self.nodes))
else:
sys.stdout.write(' ')
if (dcodes & DCODE_INCLUDE):
# flip this off cause we didn't even try
dcodes = dcodes & ~DCODE_ERR_SNMP
if (dcodes & DCODE_ROOT): sys.stdout.write( DCODE_ROOT_STR )
elif (dcodes & DCODE_CDP): sys.stdout.write( DCODE_CDP_STR )
elif (dcodes & DCODE_LLDP): sys.stdout.write( DCODE_LLDP_STR )
else: sys.stdout.write(' ')
status = ''
if (dcodes & DCODE_ERR_SNMP): status += DCODE_ERR_SNMP_STR
if (dcodes & DCODE_LEAF): status += DCODE_LEAF_STR
elif (dcodes & DCODE_INCLUDE): status += DCODE_INCLUDE_STR
if (dcodes & DCODE_DISCOVERED): status += DCODE_DISCOVERED_STR
elif (dcodes & DCODE_STEP_INTO): status += DCODE_STEP_INTO_STR
sys.stdout.write('%3s' % status)
for i in range(0, depth):
sys.stdout.write('.')
name = util.shorten_host_name(name, self.config.host_domains)
if (self.verbose > 0):
print('%s (%s)' % (name, ip))
def __query_node(self, ip, host):
'''
Query this node.
Return node details and if we already knew about it or if this is a new node.
Don't save the node to the known list, just return info about it.
Args:
ip: IP Address of the node.
host: Hostname of this known (if known from CDP/LLDP)
Returns:
natlas_node: Node of this object
int: NODE_NEW = Newly discovered node
NODE_NEWIP = Already knew about this node but not by this IP
NODE_KNOWN = Already knew about this node
'''
host = util.shorten_host_name(host, self.config.host_domains)
node, node_updated = self.__get_known_node(ip, host)
if (node == None):
# new node
node = natlas_node()
node.name = host
node.ip = [ip]
state = NODE_NEW
else:
# existing node
if (node.snmpobj.success == 1):
# we already queried this node successfully - return it
return (node, NODE_KNOWN)
# existing node but we couldn't connect before
if (node_updated == 1):
state = NODE_NEWIP
else:
state = NODE_KNOWN
node.name = host
if (ip == 'UNKNOWN'):
return (node, state)
# vmware ESX reports the IP as 0.0.0.0
# LLDP can return an empty string for IPs.
if ((ip == '0.0.0.0') | (ip == '')):
return (node, state)
# find valid credentials for this node
if (node.try_snmp_creds(self.config.snmp_creds) == 0):
return (node, state)
node.name = node.get_system_name(self.config.host_domains)
if (node.name != host):
# the hostname changed (cdp/lldp vs snmp)!
# double check we don't already know about this node
if (state == NODE_NEW):
node2, node_updated2 = self.__get_known_node(ip, host)
if ((node2 != None) & (node_updated2 == 0)):
return (node, NODE_KNOWN)
if (node_updated2 == 1):
state = NODE_NEWIP
# Finally, if we still don't have a name, use the IP.
# e.g. Maybe CDP/LLDP was empty and we dont have good credentials
# for this device. A blank name can break Dot.
if ((node.name == None) | (node.name == '')):
node.name = node.get_ipaddr()
node.opts.get_serial = True # CDP/LLDP does not report, need for extended ACL
node.query_node()
return (node, state)
def __get_known_node(self, ip, host):
'''
Look for known nodes by IP and HOST.
If found by HOST, add the IP if not already known.
Return:
node: Node, if found. Otherwise None.
updated: 1=updated, 0=not updated
'''
# already known by IP ?
for ex in self.nodes:
for exip in ex.ip:
if (exip == '0.0.0.0'):
continue
if (exip == ip):
return (ex, 0)
# already known by HOST ?
node = self.__get_known_node_by_host(host)
if (node != None):
# node already known
if (ip not in node.ip):
node.ip.append(ip)
return (node, 1)
return (node, 0)
return (None, 0)
def __discover_node(self, node, depth):
'''
Given a node, recursively enumerate its adjacencies
until we reach the specified depth (>0).
Args:
node: natlas_node object to enumerate.
depth: The depth left that we can go further away from the root.
'''
if (node == None):
return
if (depth >= self.max_depth):
return
if (node.discovered > 0):
return
node.discovered = 1
# vmware ESX can report IP as 0.0.0.0
# If we are allowing 0.0.0.0/32 in the config,
# then we added it as a leaf, but don't discover it
if (node.ip[0] == '0.0.0.0'):
return
# may be a leaf we couldn't connect to previously
if (node.snmpobj.success == 0):
return
# print some info to stdout
dcodes = DCODE_STEP_INTO
if (depth == 0):
dcodes |= DCODE_ROOT
self.__print_step(node.ip[0], node.name, depth, dcodes)
# get the cached snmp credentials
snmpobj = node.snmpobj
# list of valid neighbors to discover next
valid_neighbors = []
# get list of neighbors
cdp_neighbors = node.get_cdp_neighbors()
lldp_neighbors = node.get_lldp_neighbors()
neighbors = cdp_neighbors + lldp_neighbors
if (len(neighbors) == 0):
return
for n in neighbors:
# some neighbors may not advertise IP addresses - default them to 0.0.0.0
if (n.remote_ip == None):
n.remote_ip = '0.0.0.0'
# check the ACL
acl_action = self.__match_node_acl(n.remote_ip, n.remote_name)
if (acl_action == 'deny'):
# deny inclusion of this node
continue
dcodes = DCODE_DISCOVERED
child = None
if (acl_action == 'include'):
# include this node but do not discover it
child = natlas_node()
child.ip = [n.remote_ip]
dcodes |= DCODE_INCLUDE
else:
# discover this node
child, query_result = self.__query_node(n.remote_ip, n.remote_name)
# if we couldn't pull info from SNMP fill in what we know
if (child.snmpobj.success == 0):
child.name = util.shorten_host_name(n.remote_name, self.config.host_domains)
dcodes |= DCODE_ERR_SNMP
# need to check the ACL again for extended ops (we have more info)
acl_action = self.__match_node_acl(n.remote_ip, n.remote_name, n.remote_plat, n.remote_ios, child.serial)
if (acl_action == 'deny'):
continue
if (query_result == NODE_NEW):
self.nodes.append(child)
if (acl_action == 'leaf'): dcodes |= DCODE_LEAF
if (n.discovered_proto == 'cdp'): dcodes |= DCODE_CDP
if (n.discovered_proto == 'lldp'): dcodes |= DCODE_LLDP
self.__print_step(n.remote_ip, n.remote_name, depth+1, dcodes)
# CDP/LLDP advertises the platform
child.plat = n.remote_plat
child.ios = n.remote_ios
# add the discovered node to the link object and link to the parent
n.node = child
self.__add_link(node, n)
# if we need to discover this node then add it to the list
if ((query_result == NODE_NEW) & (acl_action != 'leaf') & (acl_action != 'include')):
valid_neighbors.append(child)
# discover the valid neighbors
for n in valid_neighbors:
self.__discover_node(n, depth+1)
def __match_node_acl(self, ip, host, platform=None, software=None, serial=None):
for acl in self.config.discover_acl:
if (acl.type == 'ip'):
if (self.__match_ip(ip, acl.str)):
return acl.action
elif (acl.type == 'host'):
if (self.__match_strpattern(host, acl.str)):
return acl.action
elif (acl.type == 'platform'):
if ((platform != None) and self.__match_strpattern(platform, acl.str)):
return acl.action
elif (acl.type == 'software'):
if ((software != None) and self.__match_strpattern(software, acl.str)):
return acl.action
elif (acl.type == 'serial'):
if ((serial != None) and self.__match_strpattern(serial, acl.str)):
return acl.action
return 'deny'
def __match_ip(self, ip, cidr):
if (cidr == 'any'):
return 1
validate = re.match('^([0-2]?[0-9]?[0-9]\.){3}[0-2]?[0-9]?[0-9]$', ip)
if (validate == None):
return 0
if (USE_NETADDR):
if (ip in IPNetwork(cidr)):
return 1
else:
if (util.is_ipv4_in_cidr(ip, cidr)):
return 1
return 0
def __match_strpattern(self, str, pattern):
if (str == '*'):
return 1
if (re.search(pattern, str)):
return 1
return 0
#
# Add or update a link.
# Return
# 0 - Found an existing link and updated it
# 1 - Added as a new link
#
def __add_link(self, node, link):
if (link.node.discovered == 1):
# both nodes have been discovered,
# so try to update existing reverse link info
# instead of adding a new link
for n in self.nodes:
# find the child, which was the original parent
if (n.name == link.node.name):
# find the existing link
for ex_link in n.links:
if ((ex_link.node.name == node.name) & (ex_link.local_port == link.remote_port)):
if ((link.local_if_ip != 'UNKNOWN') & (ex_link.remote_if_ip == None)):
ex_link.remote_if_ip = link.local_if_ip
if ((link.local_lag != 'UNKNOWN') & (ex_link.remote_lag == None)):
ex_link.remote_lag = link.local_lag
if ((len(link.local_lag_ips) == 0) & len(ex_link.remote_lag_ips)):
ex_link.remote_lag_ips = link.local_lag_ips
if ((link.local_native_vlan != None) & (ex_link.remote_native_vlan == None)):
ex_link.remote_native_vlan = link.local_native_vlan
if ((link.local_allowed_vlans != None) & (ex_link.remote_allowed_vlans == None)):
ex_link.remote_allowed_vlans = link.local_allowed_vlans
return 0
else:
for ex_link in node.links:
if ((ex_link.node.name == link.node.name) & (ex_link.local_port == link.local_port)):
# haven't discovered yet but somehow we have this link twice.
# maybe from different discovery processes?
return 0
node.add_link(link)
return 1
def __get_known_node_by_host(self, hostname):
'''
Determine if the node is already known by hostname.
If it is, return it.
'''
for n in self.nodes:
if (n.name == hostname):
return n
return None
|
class natlas_network:
def __init__(self, conf):
pass
def __str__(self):
pass
def __repr__(self):
pass
def set_max_depth(self, depth):
pass
def reset_discovered(self):
pass
def set_verbose(self, level):
'''
Set the verbose output level for discovery output.
Args:
Level 0 = no output
1 = normal output
'''
pass
def discover(self, ip):
'''
Discover the network starting at the defined root node IP.
Recursively enumerate the network tree up to self.depth.
Populates self.nodes[] as a list of discovered nodes in the
network with self.root_node being the root.
This function will discover the network with minimal information.
It is enough to define the structure of the network but will not
include much data on each node. Call discover_details() after this
to update the self.nodes[] array with more info.
'''
pass
def discover_details(self):
'''
Enumerate the discovered nodes from discover() and update the
nodes in the array with additional info.
'''
pass
def __print_step(self, ip, name, depth, dcodes):
pass
def __query_node(self, ip, host):
'''
Query this node.
Return node details and if we already knew about it or if this is a new node.
Don't save the node to the known list, just return info about it.
Args:
ip: IP Address of the node.
host: Hostname of this known (if known from CDP/LLDP)
Returns:
natlas_node: Node of this object
int: NODE_NEW = Newly discovered node
NODE_NEWIP = Already knew about this node but not by this IP
NODE_KNOWN = Already knew about this node
'''
pass
def __get_known_node(self, ip, host):
'''
Look for known nodes by IP and HOST.
If found by HOST, add the IP if not already known.
Return:
node: Node, if found. Otherwise None.
updated: 1=updated, 0=not updated
'''
pass
def __discover_node(self, node, depth):
'''
Given a node, recursively enumerate its adjacencies
until we reach the specified depth (>0).
Args:
node: natlas_node object to enumerate.
depth: The depth left that we can go further away from the root.
'''
pass
def __match_node_acl(self, ip, host, platform=None, software=None, serial=None):
pass
def __match_ip(self, ip, cidr):
pass
def __match_strpattern(self, str, pattern):
pass
def __add_link(self, node, link):
pass
def __get_known_node_by_host(self, hostname):
'''
Determine if the node is already known by hostname.
If it is, return it.
'''
pass
| 18 | 7 | 26 | 4 | 17 | 6 | 7 | 0.37 | 0 | 3 | 2 | 0 | 17 | 5 | 17 | 17 | 476 | 85 | 286 | 55 | 268 | 106 | 275 | 55 | 257 | 20 | 0 | 6 | 117 |
147,747 |
MJL85/natlas
|
MJL85_natlas/natlas/node_stack.py
|
natlas.node_stack.natlas_node_stack_member
|
class natlas_node_stack_member:
def __init__(self):
self.opts = None
self.num = 0
self.role = 0
self.pri = 0
self.mac = None
self.img = None
self.serial = None
self.plat = None
def __str__(self):
return ('<num=%s,role=%s,serial=%s>' % (self.num, self.role, self.serial))
def __repr__(self):
return self.__str__()
|
class natlas_node_stack_member:
def __init__(self):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 4 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 8 | 3 | 3 | 16 | 2 | 14 | 12 | 10 | 0 | 14 | 12 | 10 | 1 | 0 | 0 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.