id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
147,948 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz.tzrange
|
class tzrange(tzrangebase):
"""
The ``tzrange`` object is a time zone specified by a set of offsets and
abbreviations, equivalent to the way the ``TZ`` variable can be specified
in POSIX-like systems, but using Python delta objects to specify DST
start, end and offsets.
:param stdabbr:
The abbreviation for standard time (e.g. ``'EST'``).
:param stdoffset:
An integer or :class:`datetime.timedelta` object or equivalent
specifying the base offset from UTC.
If unspecified, +00:00 is used.
:param dstabbr:
The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
If specified, with no other DST information, DST is assumed to occur
and the default behavior or ``dstoffset``, ``start`` and ``end`` is
used. If unspecified and no other DST information is specified, it
is assumed that this zone has no DST.
If this is unspecified and other DST information is *is* specified,
DST occurs in the zone but the time zone abbreviation is left
unchanged.
:param dstoffset:
A an integer or :class:`datetime.timedelta` object or equivalent
specifying the UTC offset during DST. If unspecified and any other DST
information is specified, it is assumed to be the STD offset +1 hour.
:param start:
A :class:`relativedelta.relativedelta` object or equivalent specifying
the time and time of year that daylight savings time starts. To specify,
for example, that DST starts at 2AM on the 2nd Sunday in March, pass:
``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
If unspecified and any other DST information is specified, the default
value is 2 AM on the first Sunday in April.
:param end:
A :class:`relativedelta.relativedelta` object or equivalent representing
the time and time of year that daylight savings time ends, with the
same specification method as in ``start``. One note is that this should
point to the first time in the *standard* zone, so if a transition
occurs at 2AM in the DST zone and the clocks are set back 1 hour to 1AM,
set the `hours` parameter to +1.
**Examples:**
.. testsetup:: tzrange
from dateutil.tz import tzrange, tzstr
.. doctest:: tzrange
>>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
True
>>> from dateutil.relativedelta import *
>>> range1 = tzrange("EST", -18000, "EDT")
>>> range2 = tzrange("EST", -18000, "EDT", -14400,
... relativedelta(hours=+2, month=4, day=1,
... weekday=SU(+1)),
... relativedelta(hours=+1, month=10, day=31,
... weekday=SU(-1)))
>>> tzstr('EST5EDT') == range1 == range2
True
"""
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
try:
stdoffset = _total_seconds(stdoffset)
except (TypeError, AttributeError):
pass
try:
dstoffset = _total_seconds(dstoffset)
except (TypeError, AttributeError):
pass
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = bool(self._start_delta)
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
base_year = datetime.datetime(year, 1, 1)
start = base_year + self._start_delta
end = base_year + self._end_delta
return (start, end)
def __eq__(self, other):
if not isinstance(other, tzrange):
return NotImplemented
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
@property
def _dst_base_offset(self):
return self._dst_base_offset_
|
class tzrange(tzrangebase):
'''
The ``tzrange`` object is a time zone specified by a set of offsets and
abbreviations, equivalent to the way the ``TZ`` variable can be specified
in POSIX-like systems, but using Python delta objects to specify DST
start, end and offsets.
:param stdabbr:
The abbreviation for standard time (e.g. ``'EST'``).
:param stdoffset:
An integer or :class:`datetime.timedelta` object or equivalent
specifying the base offset from UTC.
If unspecified, +00:00 is used.
:param dstabbr:
The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
If specified, with no other DST information, DST is assumed to occur
and the default behavior or ``dstoffset``, ``start`` and ``end`` is
used. If unspecified and no other DST information is specified, it
is assumed that this zone has no DST.
If this is unspecified and other DST information is *is* specified,
DST occurs in the zone but the time zone abbreviation is left
unchanged.
:param dstoffset:
A an integer or :class:`datetime.timedelta` object or equivalent
specifying the UTC offset during DST. If unspecified and any other DST
information is specified, it is assumed to be the STD offset +1 hour.
:param start:
A :class:`relativedelta.relativedelta` object or equivalent specifying
the time and time of year that daylight savings time starts. To specify,
for example, that DST starts at 2AM on the 2nd Sunday in March, pass:
``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
If unspecified and any other DST information is specified, the default
value is 2 AM on the first Sunday in April.
:param end:
A :class:`relativedelta.relativedelta` object or equivalent representing
the time and time of year that daylight savings time ends, with the
same specification method as in ``start``. One note is that this should
point to the first time in the *standard* zone, so if a transition
occurs at 2AM in the DST zone and the clocks are set back 1 hour to 1AM,
set the `hours` parameter to +1.
**Examples:**
.. testsetup:: tzrange
from dateutil.tz import tzrange, tzstr
.. doctest:: tzrange
>>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
True
>>> from dateutil.relativedelta import *
>>> range1 = tzrange("EST", -18000, "EDT")
>>> range2 = tzrange("EST", -18000, "EDT", -14400,
... relativedelta(hours=+2, month=4, day=1,
... weekday=SU(+1)),
... relativedelta(hours=+1, month=10, day=31,
... weekday=SU(-1)))
>>> tzstr('EST5EDT') == range1 == range2
True
'''
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
pass
def transitions(self, year):
'''
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
'''
pass
def __eq__(self, other):
pass
@property
def _dst_base_offset(self):
pass
| 6 | 2 | 20 | 4 | 14 | 3 | 3 | 1.14 | 1 | 5 | 0 | 1 | 4 | 8 | 4 | 25 | 160 | 38 | 57 | 21 | 47 | 65 | 42 | 18 | 35 | 8 | 3 | 1 | 13 |
147,949 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz.tzstr
|
class tzstr(tzrange):
"""
``tzstr`` objects are time zone objects specified by a time-zone string as
it would be passed to a ``TZ`` variable on POSIX-style systems (see
the `GNU C Library: TZ Variable`_ for more details).
There is one notable exception, which is that POSIX-style time zones use an
inverted offset format, so normally ``GMT+3`` would be parsed as an offset
3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
behavior, pass a ``True`` value to ``posix_offset``.
The :class:`tzrange` object provides the same functionality, but is
specified using :class:`relativedelta.relativedelta` objects. rather than
strings.
:param s:
A time zone string in ``TZ`` variable format. This can be a
:class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: :class:`unicode`)
or a stream emitting unicode characters (e.g. :class:`StringIO`).
:param posix_offset:
Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
POSIX standard.
.. _`GNU C Library: TZ Variable`:
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
"""
def __init__(self, s, posix_offset=False):
global parser
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC") and not posix_offset:
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
self.hasdst = bool(self._start_delta)
def _delta(self, x, isend=0):
from dateutil import relativedelta
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset - self._std_offset
kwargs["seconds"] -= delta.seconds + delta.days * 86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
|
class tzstr(tzrange):
'''
``tzstr`` objects are time zone objects specified by a time-zone string as
it would be passed to a ``TZ`` variable on POSIX-style systems (see
the `GNU C Library: TZ Variable`_ for more details).
There is one notable exception, which is that POSIX-style time zones use an
inverted offset format, so normally ``GMT+3`` would be parsed as an offset
3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
behavior, pass a ``True`` value to ``posix_offset``.
The :class:`tzrange` object provides the same functionality, but is
specified using :class:`relativedelta.relativedelta` objects. rather than
strings.
:param s:
A time zone string in ``TZ`` variable format. This can be a
:class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: :class:`unicode`)
or a stream emitting unicode characters (e.g. :class:`StringIO`).
:param posix_offset:
Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
POSIX standard.
.. _`GNU C Library: TZ Variable`:
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
'''
def __init__(self, s, posix_offset=False):
pass
def _delta(self, x, isend=0):
pass
def __repr__(self):
pass
| 4 | 1 | 24 | 2 | 19 | 4 | 6 | 0.6 | 1 | 4 | 0 | 0 | 3 | 4 | 3 | 28 | 105 | 14 | 57 | 14 | 50 | 34 | 48 | 14 | 41 | 11 | 4 | 3 | 17 |
147,950 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz.tzfile
|
class tzfile(_tzinfo):
"""
This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)``
format timezone files to extract current and historical zone information.
:param fileobj:
This can be an opened file stream or a file name that the time zone
information can be read from.
:param filename:
This is an optional parameter specifying the source of the time zone
information in the event that ``fileobj`` is a file object. If omitted
and ``fileobj`` is a file stream, this parameter will be set either to
``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
See `Sources for Time Zone and Daylight Saving Time Data
<http://www.twinsun.com/tz/tz-link.htm>`_ for more information. Time zone
files can be compiled from the `IANA Time Zone database files
<https://www.iana.org/time-zones>`_ with the `zic time zone compiler
<https://www.freebsd.org/cgi/man.cgi?query=zic&sektion=8>`_
"""
def __init__(self, fileobj, filename=None):
super(tzfile, self).__init__()
file_opened_here = False
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
file_opened_here = True
elif filename is not None:
self._filename = filename
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
if fileobj is not None:
if not file_opened_here:
fileobj = _ContextWrapper(fileobj)
with fileobj as file_stream:
tzobj = self._read_tzfile(file_stream)
self._set_tzdata(tzobj)
def _set_tzdata(self, tzobj):
""" Set the time zone data of this object from a _tzfile object """
# Copy the relevant attributes over as private attributes
for attr in _tzfile.attrs:
setattr(self, '_' + attr, getattr(tzobj, attr))
def _read_tzfile(self, fileobj):
out = _tzfile()
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
out.trans_list_utc = list(struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt * 4)))
else:
out.trans_list_utc = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
out.trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
out.trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now (but seek for correct file position)
if leapcnt:
fileobj.seek(leapcnt * 8, os.SEEK_CUR)
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# Build ttinfo list
out.ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = 60 * ((gmtoff + 30) // 60)
tti = _ttinfo()
tti.offset = gmtoff
tti.dstoffset = datetime.timedelta(0)
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
out.ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
out.ttinfo_std = None
out.ttinfo_dst = None
out.ttinfo_before = None
if out.ttinfo_list:
if not out.trans_list_utc:
out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
else:
for i in range(timecnt - 1, -1, -1):
tti = out.trans_idx[i]
if not out.ttinfo_std and not tti.isdst:
out.ttinfo_std = tti
elif not out.ttinfo_dst and tti.isdst:
out.ttinfo_dst = tti
if out.ttinfo_std and out.ttinfo_dst:
break
else:
if out.ttinfo_dst and not out.ttinfo_std:
out.ttinfo_std = out.ttinfo_dst
for tti in out.ttinfo_list:
if not tti.isdst:
out.ttinfo_before = tti
break
else:
out.ttinfo_before = out.ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = None
out.trans_list = []
for i, tti in enumerate(out.trans_idx):
if not tti.isdst:
offset = tti.offset
laststdoffset = offset
else:
if laststdoffset is not None:
# Store the DST offset as well and update it in the list
tti.dstoffset = tti.offset - laststdoffset
out.trans_idx[i] = tti
offset = laststdoffset or 0
out.trans_list.append(out.trans_list_utc[i] + offset)
# In case we missed any DST offsets on the way in for some reason, make
# a second pass over the list, looking for the /next/ DST offset.
laststdoffset = None
for i in reversed(range(len(out.trans_idx))):
tti = out.trans_idx[i]
if tti.isdst:
if not (tti.dstoffset or laststdoffset is None):
tti.dstoffset = tti.offset - laststdoffset
else:
laststdoffset = tti.offset
if not isinstance(tti.dstoffset, datetime.timedelta):
tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
out.trans_idx[i] = tti
out.trans_idx = tuple(out.trans_idx)
out.trans_list = tuple(out.trans_list)
out.trans_list_utc = tuple(out.trans_list_utc)
return out
def _find_last_transition(self, dt, in_utc=False):
# If there's no list, there are no transitions to find
if not self._trans_list:
return None
timestamp = _datetime_to_timestamp(dt)
# Find where the timestamp fits in the transition list - if the
# timestamp is a transition time, it's part of the "after" period.
trans_list = self._trans_list_utc if in_utc else self._trans_list
idx = bisect.bisect_right(trans_list, timestamp)
# We want to know when the previous transition was, so subtract off 1
return idx - 1
def _get_ttinfo(self, idx):
# For no list or after the last transition, default to _ttinfo_std
if idx is None or (idx + 1) >= len(self._trans_list):
return self._ttinfo_std
# If there is a list and the time is before it, return _ttinfo_before
if idx < 0:
return self._ttinfo_before
return self._trans_idx[idx]
def _find_ttinfo(self, dt):
idx = self._resolve_ambiguous_time(dt)
return self._get_ttinfo(idx)
def fromutc(self, dt):
"""
The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`.
:param dt:
A :py:class:`datetime.datetime` object.
:raises TypeError:
Raised if ``dt`` is not a :py:class:`datetime.datetime` object.
:raises ValueError:
Raised if this is called with a ``dt`` which does not have this
``tzinfo`` attached.
:return:
Returns a :py:class:`datetime.datetime` object representing the
wall time in ``self``'s time zone.
"""
# These isinstance checks are in datetime.tzinfo, so we'll preserve
# them, even if we don't care about duck typing.
if not isinstance(dt, datetime.datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# First treat UTC as wall time and get the transition we're in.
idx = self._find_last_transition(dt, in_utc=True)
tti = self._get_ttinfo(idx)
dt_out = dt + datetime.timedelta(seconds=tti.offset)
fold = self.is_ambiguous(dt_out, idx=idx)
return enfold(dt_out, fold=int(fold))
def is_ambiguous(self, dt, idx=None):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if idx is None:
idx = self._find_last_transition(dt)
# Calculate the difference in offsets from current to previous
timestamp = _datetime_to_timestamp(dt)
tti = self._get_ttinfo(idx)
if idx is None or idx <= 0:
return False
od = self._get_ttinfo(idx - 1).offset - tti.offset
tt = self._trans_list[idx] # Transition time
return timestamp < tt + od
def _resolve_ambiguous_time(self, dt):
idx = self._find_last_transition(dt)
# If we have no transitions, return the index
_fold = self._fold(dt)
if idx is None or idx == 0:
return idx
# If it's ambiguous and we're in a fold, shift to a different index.
idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
return idx - idx_offset
def utcoffset(self, dt):
if dt is None:
return None
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if dt is None:
return None
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.dstoffset
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std or dt is None:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return NotImplemented
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
return self.__reduce_ex__(None)
def __reduce_ex__(self, protocol):
return (self.__class__, (None, self._filename), self.__dict__)
|
class tzfile(_tzinfo):
'''
This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)``
format timezone files to extract current and historical zone information.
:param fileobj:
This can be an opened file stream or a file name that the time zone
information can be read from.
:param filename:
This is an optional parameter specifying the source of the time zone
information in the event that ``fileobj`` is a file object. If omitted
and ``fileobj`` is a file stream, this parameter will be set either to
``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
See `Sources for Time Zone and Daylight Saving Time Data
<http://www.twinsun.com/tz/tz-link.htm>`_ for more information. Time zone
files can be compiled from the `IANA Time Zone database files
<https://www.iana.org/time-zones>`_ with the `zic time zone compiler
<https://www.freebsd.org/cgi/man.cgi?query=zic&sektion=8>`_
'''
def __init__(self, fileobj, filename=None):
pass
def _set_tzdata(self, tzobj):
''' Set the time zone data of this object from a _tzfile object '''
pass
def _read_tzfile(self, fileobj):
pass
def _find_last_transition(self, dt, in_utc=False):
pass
def _get_ttinfo(self, idx):
pass
def _find_ttinfo(self, dt):
pass
def fromutc(self, dt):
'''
The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`.
:param dt:
A :py:class:`datetime.datetime` object.
:raises TypeError:
Raised if ``dt`` is not a :py:class:`datetime.datetime` object.
:raises ValueError:
Raised if this is called with a ``dt`` which does not have this
``tzinfo`` attached.
:return:
Returns a :py:class:`datetime.datetime` object representing the
wall time in ``self``'s time zone.
'''
pass
def is_ambiguous(self, dt, idx=None):
'''
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
'''
pass
def _resolve_ambiguous_time(self, dt):
pass
def utcoffset(self, dt):
pass
def dst(self, dt):
pass
@tzname_in_python2
def tzname(self, dt):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __repr__(self):
pass
def __reduce__(self):
pass
def __reduce_ex__(self, protocol):
pass
| 19 | 4 | 23 | 4 | 12 | 7 | 4 | 0.69 | 1 | 14 | 3 | 1 | 17 | 1 | 17 | 27 | 432 | 93 | 201 | 57 | 182 | 139 | 178 | 50 | 160 | 24 | 2 | 4 | 62 |
147,951 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz._tzicalvtzcomp
|
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
|
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 6 | 1 | 1 | 9 | 0 | 9 | 9 | 6 | 0 | 8 | 8 | 6 | 1 | 1 | 0 | 1 |
147,952 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz._tzicalvtz
|
class _tzicalvtz(_tzinfo):
def __init__(self, tzid, comps=[]):
super(_tzicalvtz, self).__init__()
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index((dt, self._fold(dt)))]
except ValueError:
pass
lastcompdt = None
lastcomp = None
for comp in self._comps:
compdt = self._find_compdt(comp, dt)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, (dt, self._fold(dt)))
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def _find_compdt(self, comp, dt):
if comp.tzoffsetdiff < ZERO and self._fold(dt):
dt -= comp.tzoffsetdiff
compdt = comp.rrule.before(dt, inc=True)
return compdt
def utcoffset(self, dt):
if dt is None:
return None
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % repr(self._tzid)
__reduce__ = object.__reduce__
|
class _tzicalvtz(_tzinfo):
def __init__(self, tzid, comps=[]):
pass
def _find_comp(self, dt):
pass
def _find_compdt(self, comp, dt):
pass
def utcoffset(self, dt):
pass
def dst(self, dt):
pass
@tzname_in_python2
def tzname(self, dt):
pass
def __repr__(self):
pass
| 9 | 0 | 10 | 2 | 8 | 1 | 2 | 0.07 | 1 | 2 | 0 | 0 | 7 | 4 | 7 | 17 | 80 | 20 | 56 | 20 | 47 | 4 | 54 | 19 | 46 | 8 | 2 | 3 | 17 |
147,953 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz._tzfile
|
class _tzfile(object):
"""
Lightweight class for holding the relevant transition and time zone
information read from binary tzfiles.
"""
attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list',
'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
def __init__(self, **kwargs):
for attr in self.attrs:
setattr(self, attr, kwargs.get(attr, None))
|
class _tzfile(object):
'''
Lightweight class for holding the relevant transition and time zone
information read from binary tzfiles.
'''
def __init__(self, **kwargs):
pass
| 2 | 1 | 3 | 0 | 3 | 0 | 2 | 0.67 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 11 | 1 | 6 | 4 | 4 | 4 | 5 | 4 | 3 | 2 | 1 | 1 | 2 |
147,954 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz._ttinfo
|
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr",
"isstd", "isgmt", "dstoffset"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return NotImplemented
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt and
self.dstoffset == other.dstoffset)
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
|
class _ttinfo(object):
def __init__(self):
pass
def __repr__(self):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __getstate__(self):
pass
def __setstate__(self, state):
pass
| 7 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 6 | 0 | 6 | 6 | 43 | 8 | 35 | 16 | 28 | 0 | 28 | 16 | 21 | 3 | 1 | 2 | 13 |
147,955 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz._ContextWrapper
|
class _ContextWrapper(object):
"""
Class for wrapping contexts so that they are passed through in a
with statement.
"""
def __init__(self, context):
self.context = context
def __enter__(self):
return self.context
def __exit__(*args, **kwargs):
pass
|
class _ContextWrapper(object):
'''
Class for wrapping contexts so that they are passed through in a
with statement.
'''
def __init__(self, context):
pass
def __enter__(self):
pass
def __exit__(*args, **kwargs):
pass
| 4 | 1 | 2 | 0 | 2 | 0 | 1 | 0.57 | 1 | 0 | 0 | 0 | 3 | 1 | 3 | 3 | 14 | 3 | 7 | 5 | 3 | 4 | 7 | 5 | 3 | 1 | 1 | 0 | 3 |
147,956 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/_common.py
|
constant2.pkg.superjson.pkg.dateutil.tz._common._tzinfo
|
class _tzinfo(tzinfo):
"""
Base class for all ``dateutil`` ``tzinfo`` objects.
"""
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
dt = dt.replace(tzinfo=self)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
return same_dt and not same_offset
def _fold_status(self, dt_utc, dt_wall):
"""
Determine the fold status of a "wall" datetime, given a representation
of the same datetime as a (naive) UTC datetime. This is calculated based
on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
datetimes, and that this offset is the actual number of hours separating
``dt_utc`` and ``dt_wall``.
:param dt_utc:
Representation of the datetime as UTC
:param dt_wall:
Representation of the datetime as "wall time". This parameter must
either have a `fold` attribute or have a fold-naive
:class:`datetime.tzinfo` attached, otherwise the calculation may
fail.
"""
if self.is_ambiguous(dt_wall):
delta_wall = dt_wall - dt_utc
_fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
else:
_fold = 0
return _fold
def _fold(self, dt):
return getattr(dt, 'fold', 0)
def _fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
# Re-implement the algorithm from Python's datetime.py
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# The original datetime.py code assumes that `dst()` defaults to
# zero during ambiguous times. PEP 495 inverts this presumption, so
# for pre-PEP 495 versions of python, we need to tweak the algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
dt += delta
# Set fold=1 so we can default to being in the fold for
# ambiguous dates.
dtdst = enfold(dt, fold=1).dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
@_validate_fromutc_inputs
def fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurance, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
dt_wall = self._fromutc(dt)
# Calculate the fold status given the two datetimes.
_fold = self._fold_status(dt, dt_wall)
# Set the default fold value for ambiguous dates
return enfold(dt_wall, fold=_fold)
|
class _tzinfo(tzinfo):
'''
Base class for all ``dateutil`` ``tzinfo`` objects.
'''
def is_ambiguous(self, dt):
'''
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
'''
pass
def _fold_status(self, dt_utc, dt_wall):
'''
Determine the fold status of a "wall" datetime, given a representation
of the same datetime as a (naive) UTC datetime. This is calculated based
on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
datetimes, and that this offset is the actual number of hours separating
``dt_utc`` and ``dt_wall``.
:param dt_utc:
Representation of the datetime as UTC
:param dt_wall:
Representation of the datetime as "wall time". This parameter must
either have a `fold` attribute or have a fold-naive
:class:`datetime.tzinfo` attached, otherwise the calculation may
fail.
'''
pass
def _fold_status(self, dt_utc, dt_wall):
pass
def _fromutc(self, dt):
'''
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
'''
pass
@_validate_fromutc_inputs
def fromutc(self, dt):
'''
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurance, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
'''
pass
| 7 | 5 | 21 | 4 | 7 | 10 | 2 | 1.46 | 1 | 2 | 0 | 4 | 5 | 0 | 5 | 10 | 116 | 25 | 37 | 18 | 30 | 54 | 33 | 17 | 27 | 4 | 1 | 1 | 9 |
147,957 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/rrule.py
|
constant2.pkg.superjson.pkg.dateutil.rrule.weekday
|
class weekday(weekdaybase):
"""
This version of weekday does not allow n = 0.
"""
def __init__(self, wkday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n==0")
super(weekday, self).__init__(wkday, n)
|
class weekday(weekdaybase):
'''
This version of weekday does not allow n = 0.
'''
def __init__(self, wkday, n=None):
pass
| 2 | 1 | 5 | 1 | 4 | 0 | 2 | 0.6 | 1 | 2 | 0 | 0 | 1 | 0 | 1 | 5 | 10 | 2 | 5 | 2 | 3 | 3 | 5 | 2 | 3 | 2 | 2 | 1 | 2 |
147,958 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/rrule.py
|
constant2.pkg.superjson.pkg.dateutil.rrule.rruleset
|
class rruleset(rrulebase):
""" The rruleset type allows more complex recurrence setups, mixing
multiple rules, dates, exclusion rules, and exclusion dates. The type
constructor takes the following keyword arguments:
:param cache: If True, caching of results will be enabled, improving
performance of multiple queries considerably. """
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
if self.genlist[0] is self:
heapq.heappop(self.genlist)
else:
self.genlist.remove(self)
heapq.heapify(self.genlist)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
@_invalidates_cache
def rrule(self, rrule):
""" Include the given :py:class:`rrule` instance in the recurrence set
generation. """
self._rrule.append(rrule)
@_invalidates_cache
def rdate(self, rdate):
""" Include the given :py:class:`datetime` instance in the recurrence
set generation. """
self._rdate.append(rdate)
@_invalidates_cache
def exrule(self, exrule):
""" Include the given rrule instance in the recurrence set exclusion
list. Dates which are part of the given recurrence rules will not
be generated, even if some inclusive rrule or rdate matches them.
"""
self._exrule.append(exrule)
@_invalidates_cache
def exdate(self, exdate):
""" Include the given datetime instance in the recurrence set
exclusion list. Dates included that way will not be generated,
even if some inclusive rrule or rdate matches them. """
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
lastdt = None
total = 0
heapq.heapify(rlist)
heapq.heapify(exlist)
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exitem = exlist[0]
advance_iterator(exitem)
if exlist and exlist[0] is exitem:
heapq.heapreplace(exlist, exitem)
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
advance_iterator(ritem)
if rlist and rlist[0] is ritem:
heapq.heapreplace(rlist, ritem)
self._len = total
|
class rruleset(rrulebase):
''' The rruleset type allows more complex recurrence setups, mixing
multiple rules, dates, exclusion rules, and exclusion dates. The type
constructor takes the following keyword arguments:
:param cache: If True, caching of results will be enabled, improving
performance of multiple queries considerably. '''
class _genitem(object):
def __init__(self, genlist, gen):
pass
def __next__(self):
pass
def __lt__(self, other):
pass
def __gt__(self, other):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __init__(self, genlist, gen):
pass
@_invalidates_cache
def rrule(self, rrule):
''' Include the given :py:class:`rrule` instance in the recurrence set
generation. '''
pass
@_invalidates_cache
def rdate(self, rdate):
''' Include the given :py:class:`datetime` instance in the recurrence
set generation. '''
pass
@_invalidates_cache
def exrule(self, exrule):
''' Include the given rrule instance in the recurrence set exclusion
list. Dates which are part of the given recurrence rules will not
be generated, even if some inclusive rrule or rdate matches them.
'''
pass
@_invalidates_cache
def exdate(self, exdate):
''' Include the given datetime instance in the recurrence set
exclusion list. Dates included that way will not be generated,
even if some inclusive rrule or rdate matches them. '''
pass
def _iter(self):
pass
| 18 | 5 | 7 | 0 | 6 | 1 | 2 | 0.21 | 1 | 2 | 1 | 0 | 6 | 5 | 6 | 17 | 107 | 14 | 77 | 33 | 59 | 16 | 72 | 29 | 58 | 9 | 2 | 4 | 23 |
147,959 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/rrule.py
|
constant2.pkg.superjson.pkg.dateutil.rrule.rrulebase
|
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._invalidate_cache()
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _invalidate_cache(self):
if self._cache is not None:
self._cache = []
self._cache_complete = False
self._cache_gen = self._iter()
if self._cache_lock.locked():
self._cache_lock.release()
self._len = None
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item + 1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
""" Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def xafter(self, dt, count=None, inc=False):
"""
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
"""
if self._cache_complete:
gen = self._cache
else:
gen = self
# Select the comparison function
if inc:
def comp(dc, dtc): return dc >= dtc
else:
def comp(dc, dtc): return dc > dtc
# Generate dates
n = 0
for d in gen:
if comp(d, dt):
if count is not None:
n += 1
if n > count:
break
yield d
def between(self, after, before, inc=False, count=1):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
|
class rrulebase(object):
def __init__(self, cache=False):
pass
def __iter__(self):
pass
def _invalidate_cache(self):
pass
def _iter_cached(self):
pass
def __getitem__(self, item):
pass
def __contains__(self, item):
pass
def count(self):
''' Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. '''
pass
def before(self, dt, inc=False):
''' Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. '''
pass
def after(self, dt, inc=False):
''' Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. '''
pass
def xafter(self, dt, count=None, inc=False):
'''
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
'''
pass
def comp(dc, dtc):
pass
def comp(dc, dtc):
pass
def between(self, after, before, inc=False, count=1):
''' Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. '''
pass
| 14 | 5 | 15 | 1 | 12 | 2 | 5 | 0.17 | 1 | 6 | 0 | 2 | 11 | 5 | 11 | 11 | 209 | 20 | 161 | 42 | 149 | 28 | 139 | 42 | 125 | 11 | 1 | 4 | 64 |
147,960 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/rrule.py
|
constant2.pkg.superjson.pkg.dateutil.rrule.rrule
|
class rrule(rrulebase):
"""
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
.. note::
Per RFC section 3.3.10, recurrence instances falling on invalid dates
and times are ignored rather than coerced:
Recurrence rules may generate recurrence instances with an invalid
date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
on a day where the local time is moved forward by an hour at 1:00
AM). Such recurrence instances MUST be ignored and MUST NOT be
counted as part of the recurrence set.
This can lead to possibly surprising behavior when, for example, the
start date occurs at the end of the month:
>>> from dateutil.rrule import rrule, MONTHLY
>>> from datetime import datetime
>>> start_date = datetime(2014, 12, 31)
>>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
... # doctest: +NORMALIZE_WHITESPACE
[datetime.datetime(2014, 12, 31, 0, 0),
datetime.datetime(2015, 1, 31, 0, 0),
datetime.datetime(2015, 3, 31, 0, 0),
datetime.datetime(2015, 5, 31, 0, 0)]
Additionally, it supports the following keyword arguments:
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
times, enabling caching will improve the performance considerably.
:param dtstart:
The recurrence start. Besides being the base for the recurrence,
missing parameters in the final recurrence instances will also be
extracted from this date. If not given, datetime.now() will be used
instead.
:param interval:
The interval between each freq iteration. For example, when using
YEARLY, an interval of 2 means once every two years, but with HOURLY,
it means once every two hours. The default interval is 1.
:param wkst:
The week start day. Must be one of the MO, TU, WE constants, or an
integer, specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week start is got
from calendar.firstweekday(), and may be modified by
calendar.setfirstweekday().
:param count:
How many occurrences will be generated.
.. note::
As of version 2.5.0, the use of the ``until`` keyword together
with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
:param until:
If given, this must be a datetime instance, that will specify the
limit of the recurrence. The last recurrence in the rule is the greatest
datetime that is less than or equal to the value specified in the
``until`` parameter.
.. note::
As of version 2.5.0, the use of the ``until`` keyword together
with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
:param bysetpos:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each given integer will specify an occurrence
number, corresponding to the nth occurrence of the rule inside the
frequency period. For example, a bysetpos of -1 if combined with a
MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
result in the last work day of every month.
:param bymonth:
If given, it must be either an integer, or a sequence of integers,
meaning the months to apply the recurrence to.
:param bymonthday:
If given, it must be either an integer, or a sequence of integers,
meaning the month days to apply the recurrence to.
:param byyearday:
If given, it must be either an integer, or a sequence of integers,
meaning the year days to apply the recurrence to.
:param byweekno:
If given, it must be either an integer, or a sequence of integers,
meaning the week numbers to apply the recurrence to. Week numbers
have the meaning described in ISO8601, that is, the first week of
the year is that containing at least four days of the new year.
:param byweekday:
If given, it must be either an integer (0 == MO), a sequence of
integers, one of the weekday constants (MO, TU, etc), or a sequence
of these constants. When given, these variables will define the
weekdays where the recurrence will be applied. It's also possible to
use an argument n for the weekday instances, which will mean the nth
occurrence of this weekday in the period. For example, with MONTHLY,
or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
first friday of the month where the recurrence happens. Notice that in
the RFC documentation, this is specified as BYDAY, but was renamed to
avoid the ambiguity of that keyword.
:param byhour:
If given, it must be either an integer, or a sequence of integers,
meaning the hours to apply the recurrence to.
:param byminute:
If given, it must be either an integer, or a sequence of integers,
meaning the minutes to apply the recurrence to.
:param bysecond:
If given, it must be either an integer, or a sequence of integers,
meaning the seconds to apply the recurrence to.
:param byeaster:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each integer will define an offset from the
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
Sunday itself. This is an extension to the RFC specification.
"""
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
super(rrule, self).__init__(cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
# Cache the original byxxx rules, if they are provided, as the _byxxx
# attributes do not necessarily map to the inputs, and this can be
# a problem in generating the strings. Only store things if they've
# been supplied (the string retrieval will just use .get())
self._original_rule = {}
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if count is not None and until:
warn("Using both 'count' and 'until' is inconsistent with RFC 2445"
" and has been deprecated in dateutil. Future versions will "
"raise an error.", DeprecationWarning)
if wkst is None:
self._wkst = calendar.firstweekday()
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if self._bysetpos:
self._original_rule['bysetpos'] = self._bysetpos
if (byweekno is None and byyearday is None and bymonthday is None and
byweekday is None and byeaster is None):
if freq == YEARLY:
if bymonth is None:
bymonth = dtstart.month
self._original_rule['bymonth'] = None
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == MONTHLY:
bymonthday = dtstart.day
self._original_rule['bymonthday'] = None
elif freq == WEEKLY:
byweekday = dtstart.weekday()
self._original_rule['byweekday'] = None
# bymonth
if bymonth is None:
self._bymonth = None
else:
if isinstance(bymonth, integer_types):
bymonth = (bymonth,)
self._bymonth = tuple(sorted(set(bymonth)))
if 'bymonth' not in self._original_rule:
self._original_rule['bymonth'] = self._bymonth
# byyearday
if byyearday is None:
self._byyearday = None
else:
if isinstance(byyearday, integer_types):
byyearday = (byyearday,)
self._byyearday = tuple(sorted(set(byyearday)))
self._original_rule['byyearday'] = self._byyearday
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(sorted(byeaster))
self._original_rule['byeaster'] = self._byeaster
else:
self._byeaster = None
# bymonthday
if bymonthday is None:
self._bymonthday = ()
self._bynmonthday = ()
else:
if isinstance(bymonthday, integer_types):
bymonthday = (bymonthday,)
bymonthday = set(bymonthday) # Ensure it's unique
self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
# Storing positive numbers first, then negative numbers
if 'bymonthday' not in self._original_rule:
self._original_rule['bymonthday'] = tuple(
itertools.chain(self._bymonthday, self._bynmonthday))
# byweekno
if byweekno is None:
self._byweekno = None
else:
if isinstance(byweekno, integer_types):
byweekno = (byweekno,)
self._byweekno = tuple(sorted(set(byweekno)))
self._original_rule['byweekno'] = self._byweekno
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
else:
# If it's one of the valid non-sequence types, convert to a
# single-element sequence before the iterator that builds the
# byweekday set.
if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
byweekday = (byweekday,)
self._byweekday = set()
self._bynweekday = set()
for wday in byweekday:
if isinstance(wday, integer_types):
self._byweekday.add(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.add(wday.weekday)
else:
self._bynweekday.add((wday.weekday, wday.n))
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
if self._byweekday is not None:
self._byweekday = tuple(sorted(self._byweekday))
orig_byweekday = [weekday(x) for x in self._byweekday]
else:
orig_byweekday = tuple()
if self._bynweekday is not None:
self._bynweekday = tuple(sorted(self._bynweekday))
orig_bynweekday = [weekday(*x) for x in self._bynweekday]
else:
orig_bynweekday = tuple()
if 'byweekday' not in self._original_rule:
self._original_rule['byweekday'] = tuple(itertools.chain(
orig_byweekday, orig_bynweekday))
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = set((dtstart.hour,))
else:
self._byhour = None
else:
if isinstance(byhour, integer_types):
byhour = (byhour,)
if freq == HOURLY:
self._byhour = self.__construct_byset(start=dtstart.hour,
byxxx=byhour,
base=24)
else:
self._byhour = set(byhour)
self._byhour = tuple(sorted(self._byhour))
self._original_rule['byhour'] = self._byhour
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = set((dtstart.minute,))
else:
self._byminute = None
else:
if isinstance(byminute, integer_types):
byminute = (byminute,)
if freq == MINUTELY:
self._byminute = self.__construct_byset(start=dtstart.minute,
byxxx=byminute,
base=60)
else:
self._byminute = set(byminute)
self._byminute = tuple(sorted(self._byminute))
self._original_rule['byminute'] = self._byminute
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = ((dtstart.second,))
else:
self._bysecond = None
else:
if isinstance(bysecond, integer_types):
bysecond = (bysecond,)
self._bysecond = set(bysecond)
if freq == SECONDLY:
self._bysecond = self.__construct_byset(start=dtstart.second,
byxxx=bysecond,
base=60)
else:
self._bysecond = set(bysecond)
self._bysecond = tuple(sorted(self._bysecond))
self._original_rule['bysecond'] = self._bysecond
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def __str__(self):
"""
Output a string that would generate this RRULE if passed to rrulestr.
This is mostly compatible with RFC2445, except for the
dateutil-specific extension BYEASTER.
"""
output = []
h, m, s = [None] * 3
if self._dtstart:
output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
h, m, s = self._dtstart.timetuple()[3:6]
parts = ['FREQ=' + FREQNAMES[self._freq]]
if self._interval != 1:
parts.append('INTERVAL=' + str(self._interval))
if self._wkst:
parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
if self._count is not None:
parts.append('COUNT=' + str(self._count))
if self._until:
parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
if self._original_rule.get('byweekday') is not None:
# The str() method on weekday objects doesn't generate
# RFC2445-compliant strings, so we should modify that.
original_rule = dict(self._original_rule)
wday_strings = []
for wday in original_rule['byweekday']:
if wday.n:
wday_strings.append('{n:+d}{wday}'.format(
n=wday.n,
wday=repr(wday)[0:2]))
else:
wday_strings.append(repr(wday))
original_rule['byweekday'] = wday_strings
else:
original_rule = self._original_rule
partfmt = '{name}={vals}'
for name, key in [('BYSETPOS', 'bysetpos'),
('BYMONTH', 'bymonth'),
('BYMONTHDAY', 'bymonthday'),
('BYYEARDAY', 'byyearday'),
('BYWEEKNO', 'byweekno'),
('BYDAY', 'byweekday'),
('BYHOUR', 'byhour'),
('BYMINUTE', 'byminute'),
('BYSECOND', 'bysecond'),
('BYEASTER', 'byeaster')]:
value = original_rule.get(key)
if value:
parts.append(partfmt.format(name=name, vals=(','.join(str(v)
for v in value))))
output.append(';'.join(parts))
return '\n'.join(output)
def replace(self, **kwargs):
"""Return new rrule with same attributes except for those attributes given new
values by whichever keyword arguments are specified."""
new_kwargs = {"interval": self._interval,
"count": self._count,
"dtstart": self._dtstart,
"freq": self._freq,
"until": self._until,
"wkst": self._wkst,
"cache": False if self._cache is None else True}
new_kwargs.update(self._original_rule)
new_kwargs.update(kwargs)
return rrule(**new_kwargs)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY: ii.ydayset,
MONTHLY: ii.mdayset,
WEEKLY: ii.wdayset,
DAILY: ii.ddayset,
HOURLY: ii.ddayset,
MINUTELY: ii.ddayset,
SECONDLY: ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY: ii.htimeset,
MINUTELY: ii.mtimeset,
SECONDLY: ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i + 1 not in byyearday and
-ii.yearlen + i not in byyearday) or
(i >= ii.yearlen and i + 1 - ii.yearlen not in byyearday and
-ii.nextyearlen + i - ii.yearlen not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos - 1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal + i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal + i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
if count is not None:
count -= 1
if count < 0:
self._len = total
return
total += 1
yield res
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday + 1 + (6 - wkst)) + self._interval * 7
else:
day += -(weekday - wkst) + self._interval * 7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23 - hour) // interval) * interval
if byhour:
ndays, hour = self.__mod_distance(value=hour,
byxxx=self._byhour,
base=24)
else:
ndays, hour = divmod(hour + interval, 24)
if ndays:
day += ndays
fixday = True
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439 - (hour * 60 + minute)) //
interval) * interval
valid = False
rep_rate = (24 * 60)
for j in range(rep_rate // gcd(interval, rep_rate)):
if byminute:
nhours, minute = \
self.__mod_distance(value=minute,
byxxx=self._byminute,
base=60)
else:
nhours, minute = divmod(minute + interval, 60)
div, hour = divmod(hour + nhours, 24)
if div:
day += div
fixday = True
filtered = False
if not byhour or hour in byhour:
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval and ' +
'byhour resulting in empty rule.')
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399 - (hour * 3600 + minute * 60 + second))
// interval) * interval)
rep_rate = (24 * 3600)
valid = False
for j in range(0, rep_rate // gcd(interval, rep_rate)):
if bysecond:
nminutes, second = \
self.__mod_distance(value=second,
byxxx=self._bysecond,
base=60)
else:
nminutes, second = divmod(second + interval, 60)
div, minute = divmod(minute + nminutes, 60)
if div:
hour += div
div, hour = divmod(hour, 24)
if div:
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval, ' +
'byhour and byminute resulting in empty' +
' rule.')
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
def __construct_byset(self, start, byxxx, base):
"""
If a `BYXXX` sequence is passed to the constructor at the same level as
`FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
specifications which cannot be reached given some starting conditions.
This occurs whenever the interval is not coprime with the base of a
given unit and the difference between the starting position and the
ending position is not coprime with the greatest common denominator
between the interval and the base. For example, with a FREQ of hourly
starting at 17:00 and an interval of 4, the only valid values for
BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
coprime.
:param start:
Specifies the starting position.
:param byxxx:
An iterable containing the list of allowed values.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
This does not preserve the type of the iterable, returning a set, since
the values should be unique and the order is irrelevant, this will
speed up later lookups.
In the event of an empty set, raises a :exception:`ValueError`, as this
results in an empty rrule.
"""
cset = set()
# Support a single byxxx value.
if isinstance(byxxx, integer_types):
byxxx = (byxxx, )
for num in byxxx:
i_gcd = gcd(self._interval, base)
# Use divmod rather than % because we need to wrap negative nums.
if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
cset.add(num)
if len(cset) == 0:
raise ValueError("Invalid rrule byxxx generates an empty set.")
return cset
def __mod_distance(self, value, byxxx, base):
"""
Calculates the next value in a sequence where the `FREQ` parameter is
specified along with a `BYXXX` parameter at the same "level"
(e.g. `HOURLY` specified with `BYHOUR`).
:param value:
The old value of the component.
:param byxxx:
The `BYXXX` set, which should have been generated by
`rrule._construct_byset`, or something else which checks that a
valid rule is present.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
If a valid value is not found after `base` iterations (the maximum
number before the sequence would start to repeat), this raises a
:exception:`ValueError`, as no valid values were found.
This returns a tuple of `divmod(n*interval, base)`, where `n` is the
smallest number of `interval` repetitions until the next specified
value in `byxxx` is found.
"""
accumulator = 0
for ii in range(1, base + 1):
# Using divmod() over % to account for negative intervals
div, value = divmod(value + self._interval, base)
accumulator += div
if value in byxxx:
return (accumulator, value)
|
class rrule(rrulebase):
'''
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
.. note::
Per RFC section 3.3.10, recurrence instances falling on invalid dates
and times are ignored rather than coerced:
Recurrence rules may generate recurrence instances with an invalid
date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
on a day where the local time is moved forward by an hour at 1:00
AM). Such recurrence instances MUST be ignored and MUST NOT be
counted as part of the recurrence set.
This can lead to possibly surprising behavior when, for example, the
start date occurs at the end of the month:
>>> from dateutil.rrule import rrule, MONTHLY
>>> from datetime import datetime
>>> start_date = datetime(2014, 12, 31)
>>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
... # doctest: +NORMALIZE_WHITESPACE
[datetime.datetime(2014, 12, 31, 0, 0),
datetime.datetime(2015, 1, 31, 0, 0),
datetime.datetime(2015, 3, 31, 0, 0),
datetime.datetime(2015, 5, 31, 0, 0)]
Additionally, it supports the following keyword arguments:
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
times, enabling caching will improve the performance considerably.
:param dtstart:
The recurrence start. Besides being the base for the recurrence,
missing parameters in the final recurrence instances will also be
extracted from this date. If not given, datetime.now() will be used
instead.
:param interval:
The interval between each freq iteration. For example, when using
YEARLY, an interval of 2 means once every two years, but with HOURLY,
it means once every two hours. The default interval is 1.
:param wkst:
The week start day. Must be one of the MO, TU, WE constants, or an
integer, specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week start is got
from calendar.firstweekday(), and may be modified by
calendar.setfirstweekday().
:param count:
How many occurrences will be generated.
.. note::
As of version 2.5.0, the use of the ``until`` keyword together
with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
:param until:
If given, this must be a datetime instance, that will specify the
limit of the recurrence. The last recurrence in the rule is the greatest
datetime that is less than or equal to the value specified in the
``until`` parameter.
.. note::
As of version 2.5.0, the use of the ``until`` keyword together
with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
:param bysetpos:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each given integer will specify an occurrence
number, corresponding to the nth occurrence of the rule inside the
frequency period. For example, a bysetpos of -1 if combined with a
MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
result in the last work day of every month.
:param bymonth:
If given, it must be either an integer, or a sequence of integers,
meaning the months to apply the recurrence to.
:param bymonthday:
If given, it must be either an integer, or a sequence of integers,
meaning the month days to apply the recurrence to.
:param byyearday:
If given, it must be either an integer, or a sequence of integers,
meaning the year days to apply the recurrence to.
:param byweekno:
If given, it must be either an integer, or a sequence of integers,
meaning the week numbers to apply the recurrence to. Week numbers
have the meaning described in ISO8601, that is, the first week of
the year is that containing at least four days of the new year.
:param byweekday:
If given, it must be either an integer (0 == MO), a sequence of
integers, one of the weekday constants (MO, TU, etc), or a sequence
of these constants. When given, these variables will define the
weekdays where the recurrence will be applied. It's also possible to
use an argument n for the weekday instances, which will mean the nth
occurrence of this weekday in the period. For example, with MONTHLY,
or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
first friday of the month where the recurrence happens. Notice that in
the RFC documentation, this is specified as BYDAY, but was renamed to
avoid the ambiguity of that keyword.
:param byhour:
If given, it must be either an integer, or a sequence of integers,
meaning the hours to apply the recurrence to.
:param byminute:
If given, it must be either an integer, or a sequence of integers,
meaning the minutes to apply the recurrence to.
:param bysecond:
If given, it must be either an integer, or a sequence of integers,
meaning the seconds to apply the recurrence to.
:param byeaster:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each integer will define an offset from the
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
Sunday itself. This is an extension to the RFC specification.
'''
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
pass
def __str__(self):
'''
Output a string that would generate this RRULE if passed to rrulestr.
This is mostly compatible with RFC2445, except for the
dateutil-specific extension BYEASTER.
'''
pass
def replace(self, **kwargs):
'''Return new rrule with same attributes except for those attributes given new
values by whichever keyword arguments are specified.'''
pass
def _iter(self):
pass
def __construct_byset(self, start, byxxx, base):
'''
If a `BYXXX` sequence is passed to the constructor at the same level as
`FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
specifications which cannot be reached given some starting conditions.
This occurs whenever the interval is not coprime with the base of a
given unit and the difference between the starting position and the
ending position is not coprime with the greatest common denominator
between the interval and the base. For example, with a FREQ of hourly
starting at 17:00 and an interval of 4, the only valid values for
BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
coprime.
:param start:
Specifies the starting position.
:param byxxx:
An iterable containing the list of allowed values.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
This does not preserve the type of the iterable, returning a set, since
the values should be unique and the order is irrelevant, this will
speed up later lookups.
In the event of an empty set, raises a :exception:`ValueError`, as this
results in an empty rrule.
'''
pass
def __mod_distance(self, value, byxxx, base):
'''
Calculates the next value in a sequence where the `FREQ` parameter is
specified along with a `BYXXX` parameter at the same "level"
(e.g. `HOURLY` specified with `BYHOUR`).
:param value:
The old value of the component.
:param byxxx:
The `BYXXX` set, which should have been generated by
`rrule._construct_byset`, or something else which checks that a
valid rule is present.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
If a valid value is not found after `base` iterations (the maximum
number before the sequence would start to repeat), this raises a
:exception:`ValueError`, as no valid values were found.
This returns a tuple of `divmod(n*interval, base)`, where `n` is the
smallest number of `interval` repetitions until the next specified
value in `byxxx` is found.
'''
pass
| 7 | 5 | 110 | 14 | 83 | 14 | 22 | 0.38 | 1 | 15 | 2 | 0 | 6 | 22 | 6 | 17 | 784 | 98 | 499 | 99 | 485 | 188 | 371 | 94 | 362 | 57 | 2 | 8 | 134 |
147,961 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/_common.py
|
constant2.pkg.superjson.pkg.dateutil._common.weekday
|
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
__hash__ = None
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
|
class weekday(object):
def __init__(self, weekday, n=None):
pass
def __call__(self, n):
pass
def __eq__(self, other):
pass
def __repr__(self):
pass
| 5 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 1 | 0 | 1 | 4 | 2 | 4 | 4 | 29 | 5 | 24 | 10 | 19 | 0 | 22 | 10 | 17 | 3 | 1 | 2 | 8 |
147,962 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/rrule.py
|
constant2.pkg.superjson.pkg.dateutil.rrule._rrulestr
|
class _rrulestr(object):
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
"""
Two ways to specify this: +1MO or MO(+1)
"""
l = []
for wday in value.split(','):
if '(' in wday:
# If it's of the form TH(+1), etc.
splt = wday.split('(')
w = splt[0]
n = int(splt[1][:-1])
elif len(wday):
# If it's of the form +1MO
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
else:
raise ValueError("Invalid (empty) BYDAY specification.")
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_" + name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i - 1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: " + parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: " + parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: " + parm)
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError(
"unsupported EXDATE parm: " + parm)
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError("unsupported DTSTART parm: " + parm)
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError("unsupported property: " + name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
rset.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
|
class _rrulestr(object):
def _handle_int(self, rrkwargs, name, value, **kwargs):
pass
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
pass
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
pass
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
pass
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
pass
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
'''
Two ways to specify this: +1MO or MO(+1)
'''
pass
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
pass
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
pass
def __call__(self, s, **kwargs):
pass
| 10 | 1 | 20 | 0 | 20 | 1 | 6 | 0.03 | 1 | 7 | 2 | 0 | 9 | 0 | 9 | 9 | 217 | 14 | 198 | 62 | 172 | 5 | 144 | 51 | 129 | 34 | 1 | 5 | 55 |
147,963 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/relativedelta.py
|
constant2.pkg.superjson.pkg.dateutil.relativedelta.relativedelta
|
class relativedelta(object):
"""
The relativedelta type is based on the specification of the excellent
work done by M.-A. Lemburg in his
`mx.DateTime <http://www.egenix.com/files/python/mxDateTime.html>`_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an aritmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding aritmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1. Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2. Add the relative 'years' argument to the absolute year.
3. Do steps 1 and 2 for month/months.
4. Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5. Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7. If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
# Check for non-integer values in integer-only quantities
if any(x is not None and x != int(x) for x in (years, months)):
raise ValueError("Non-integer years and months are "
"ambiguous and not currently supported.")
if dt1 and dt2:
# datetime is a subclass of date. So both must be date
if not (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
# We allow two dates, or two datetimes, so we coerce them to be
# of the same type
if (isinstance(dt1, datetime.datetime) !=
isinstance(dt2, datetime.datetime)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
# Get year / month delta between the two
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
self._set_months(months)
# Remove the year/month delta so the timedelta is just well-defined
# time units (seconds, days and microseconds)
dtm = self.__radd__(dt2)
# If we've overshot our target, make an adjustment
if dt1 < dt2:
compare = operator.gt
increment = 1
else:
compare = operator.lt
increment = -1
while compare(dt1, dtm):
months += increment
self._set_months(months)
dtm = self.__radd__(dt2)
# Get the timedelta between the "months-adjusted" date and dt1
delta = dt1 - dtm
self.seconds = delta.seconds + delta.days * 86400
self.microseconds = delta.microseconds
else:
# Relative information
self.years = years
self.months = months
self.days = days + weeks * 7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
# Absolute information
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if any(x is not None and int(x) != x
for x in (year, month, day, hour,
minute, second, microsecond)):
# For now we'll deprecate floats - later it'll be an error.
warn("Non-integer value passed as absolute information. " +
"This is not a well-defined condition and will raise " +
"errors in future versions.", DeprecationWarning)
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212,
243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx + 1
if idx == 0:
self.day = yday
else:
self.day = yday - ydayidx[idx - 1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = _sign(self.microseconds)
div, mod = divmod(self.microseconds * s, 1000000)
self.microseconds = mod * s
self.seconds += div * s
if abs(self.seconds) > 59:
s = _sign(self.seconds)
div, mod = divmod(self.seconds * s, 60)
self.seconds = mod * s
self.minutes += div * s
if abs(self.minutes) > 59:
s = _sign(self.minutes)
div, mod = divmod(self.minutes * s, 60)
self.minutes = mod * s
self.hours += div * s
if abs(self.hours) > 23:
s = _sign(self.hours)
div, mod = divmod(self.hours * s, 24)
self.hours = mod * s
self.days += div * s
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years += div * s
if (self.hours or self.minutes or self.seconds or self.microseconds
or self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
@property
def weeks(self):
return self.days // 7
@weeks.setter
def weeks(self, value):
self.days = self.days - (self.weeks * 7) + value * 7
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years = div * s
else:
self.years = 0
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=1, hours=14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest
# microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond)
def __add__(self, other):
if isinstance(other, relativedelta):
return self.__class__(years=other.years + self.years,
months=other.months + self.months,
days=other.days + self.days,
hours=other.hours + self.hours,
minutes=other.minutes + self.minutes,
seconds=other.seconds + self.seconds,
microseconds=(other.microseconds +
self.microseconds),
leapdays=other.leapdays or self.leapdays,
year=(other.year if other.year is not None
else self.year),
month=(other.month if other.month is not None
else self.month),
day=(other.day if other.day is not None
else self.day),
weekday=(other.weekday if other.weekday is not None
else self.weekday),
hour=(other.hour if other.hour is not None
else self.hour),
minute=(other.minute if other.minute is not None
else self.minute),
second=(other.second if other.second is not None
else self.second),
microsecond=(other.microsecond if other.microsecond
is not None else
self.microsecond))
if isinstance(other, datetime.timedelta):
return self.__class__(years=self.years,
months=self.months,
days=self.days + other.days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds + other.seconds,
microseconds=self.microseconds + other.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
if not isinstance(other, datetime.date):
return NotImplemented
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year) + self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth) - 1) * 7
if nth > 0:
jumpdays += (7 - ret.weekday() + weekday) % 7
else:
jumpdays += (ret.weekday() - weekday) % 7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented # In case the other object defines __rsub__
return self.__class__(years=self.years - other.years,
months=self.months - other.months,
days=self.days - other.days,
hours=self.hours - other.hours,
minutes=self.minutes - other.minutes,
seconds=self.seconds - other.seconds,
microseconds=self.microseconds - other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=(self.year if self.year is not None
else other.year),
month=(self.month if self.month is not None else
other.month),
day=(self.day if self.day is not None else
other.day),
weekday=(self.weekday if self.weekday is not None else
other.weekday),
hour=(self.hour if self.hour is not None else
other.hour),
minute=(self.minute if self.minute is not None else
other.minute),
second=(self.second if self.second is not None else
other.second),
microsecond=(self.microsecond if self.microsecond
is not None else
other.microsecond))
def __neg__(self):
return self.__class__(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
# Compatibility with Python 2.x
__nonzero__ = __bool__
def __mul__(self, other):
try:
f = float(other)
except TypeError:
return NotImplemented
return self.__class__(years=int(self.years * f),
months=int(self.months * f),
days=int(self.days * f),
hours=int(self.hours * f),
minutes=int(self.minutes * f),
seconds=int(self.seconds * f),
microseconds=int(self.microseconds * f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.microseconds == other.microseconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
try:
reciprocal = 1 / float(other)
except TypeError:
return NotImplemented
return self.__mul__(reciprocal)
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("{attr}={value:+g}".format(attr=attr, value=value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("{attr}={value}".format(attr=attr, value=repr(value)))
return "{classname}({attrs})".format(classname=self.__class__.__name__,
attrs=", ".join(l))
|
class relativedelta(object):
'''
The relativedelta type is based on the specification of the excellent
work done by M.-A. Lemburg in his
`mx.DateTime <http://www.egenix.com/files/python/mxDateTime.html>`_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an aritmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding aritmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1. Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2. Add the relative 'years' argument to the absolute year.
3. Do steps 1 and 2 for month/months.
4. Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5. Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7. If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
'''
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
pass
def _fix(self):
pass
@property
def weeks(self):
pass
@weeks.setter
def weeks(self):
pass
def _set_months(self, months):
pass
def normalized(self):
'''
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=1, hours=14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
'''
pass
def __add__(self, other):
pass
def __radd__(self, other):
pass
def __rsub__(self, other):
pass
def __sub__(self, other):
pass
def __neg__(self):
pass
def __bool__(self):
pass
def __mul__(self, other):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __div__(self, other):
pass
def __repr__(self):
pass
| 20 | 2 | 25 | 1 | 22 | 1 | 5 | 0.2 | 1 | 9 | 0 | 0 | 17 | 17 | 17 | 17 | 527 | 60 | 389 | 81 | 364 | 79 | 222 | 74 | 204 | 21 | 1 | 5 | 80 |
147,964 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/parser.py
|
constant2.pkg.superjson.pkg.dateutil.parser.parserinfo
|
class parserinfo(object):
"""
Class which handles what inputs are accepted. Subclass this to customize
the language and acceptable values for each parameter.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. Default is ``False``.
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
Default is ``False``.
"""
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
("Wed", "Wednesday"),
("Thu", "Thursday"),
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
for i, v in enumerate(lst):
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
if len(name) >= min(len(n) for n in self._weekdays.keys()):
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
if len(name) >= min(len(n) for n in self._months.keys()):
try:
return self._months[name.lower()] + 1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year, century_specified=False):
if year < 100 and not century_specified:
year += self._century
if abs(year - self._year) >= 50:
if year < self._year:
year += 100
else:
year -= 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year, res.century_specified)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
|
class parserinfo(object):
'''
Class which handles what inputs are accepted. Subclass this to customize
the language and acceptable values for each parameter.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. Default is ``False``.
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
Default is ``False``.
'''
def __init__(self, dayfirst=False, yearfirst=False):
pass
def _convert(self, lst):
pass
def jump(self, name):
pass
def weekday(self, name):
pass
def month(self, name):
pass
def hms(self, name):
pass
def ampm(self, name):
pass
def pertain(self, name):
pass
def utczone(self, name):
pass
def tzoffset(self, name):
pass
def convertyear(self, year, century_specified=False):
pass
def validate(self, res):
pass
| 13 | 1 | 7 | 0 | 6 | 0 | 2 | 0.15 | 1 | 3 | 0 | 0 | 12 | 11 | 12 | 12 | 140 | 20 | 104 | 34 | 91 | 16 | 79 | 34 | 66 | 4 | 1 | 3 | 28 |
147,965 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/parser.py
|
constant2.pkg.superjson.pkg.dateutil.parser.parser
|
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None, ignoretz=False, tzinfos=None, **kwargs):
"""
Parse the date/time string into a :class:`datetime.datetime` object.
:param timestr:
Any date/time string using the supported formats.
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a
naive :class:`datetime.datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in minutes or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param **kwargs:
Keyword arguments as passed to ``_parse()``.
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ValueError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises TypeError:
Raised for non-string or character stream input.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if default is None:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res, skipped_tokens = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("Unknown string format")
if len(res) == 0:
raise ValueError("String does not contain a date.")
repl = {}
for attr in ("year", "month", "day", "hour",
"minute", "second", "microsecond"):
value = getattr(res, attr)
if value is not None:
repl[attr] = value
if 'day' not in repl:
# If the default day exceeds the last day of the month, fall back to
# the end of the month.
cyear = default.year if res.year is None else res.year
cmonth = default.month if res.month is None else res.month
cday = default.day if res.day is None else res.day
if cday > monthrange(cyear, cmonth)[1]:
repl['day'] = monthrange(cyear, cmonth)[1]
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret + relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if (isinstance(tzinfos, collections.Callable) or
tzinfos and res.tzname in tzinfos):
if isinstance(tzinfos, collections.Callable):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError("Offset must be tzinfo subclass, "
"tz string, or int offset.")
ret = ret.replace(tzinfo=tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
if kwargs.get('fuzzy_with_tokens', False):
return ret, skipped_tokens
else:
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset", "ampm"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
"""
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
"""
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr) # Splits the timestr into tokens
# keep up with the last token skipped so we can recombine
# consecutively skipped tokens (-2 for when i begins at 0).
last_skipped_token_i = -2
skipped_tokens = list()
try:
# year/month/day list
ymd = _ymd(timestr)
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and res.hour is None and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i - 1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i - 1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i - 1]
if not ymd and l[i - 1].find('.') == -1:
# ymd.append(info.convertyear(int(s[:2])))
ymd.append(s[:2])
ymd.append(s[2:4])
ymd.append(s[4:])
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = _parsems(s[4:])
elif len_li in (8, 12, 14):
# YYYYMMDD
s = l[i - 1]
ymd.append(s[:4])
ymd.append(s[4:6])
ymd.append(s[6:8])
if len_li > 8:
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li > 12:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i + 1 < len_l and l[i] == ' ' and
info.hms(l[i + 1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value % 1:
res.minute = int(60 * (value % 1))
elif idx == 1:
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif (i == len_l and l[i - 2] == ' ' and
info.hms(l[i - 3]) is not None):
# X h MM or X m SS
idx = info.hms(l[i - 3])
if idx == 0: # h
res.minute = int(value)
sec_remainder = value % 1
if sec_remainder:
res.second = int(60 * sec_remainder)
elif idx == 1: # m
res.second, res.microsecond = \
_parsems(value_repr)
# We don't need to advance the tokens here because the
# i == len_l call indicates that we're looking at all
# the tokens already.
elif i + 1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i + 1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(value_repr)
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(l[i])
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd) - 1
else:
return None, None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd) - 1
assert mstridx == -1
else:
ymd.append(l[i])
i += 1
elif i >= len_l or info.jump(l[i]):
if i + 1 < len_l and info.ampm(l[i + 1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i + 1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i + 1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(value)
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None, None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd) - 1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(l[i])
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(l[i])
i += 1
elif (i + 3 < len_l and l[i] == l[i + 2] == ' '
and info.pertain(l[i + 1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i + 3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(str(info.convertyear(value)))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
# For fuzzy parsing, 'a' or 'am' (both valid English words)
# may erroneously trigger the AM/PM flag. Deal with that
# here.
val_is_ampm = True
# If there's already an AM/PM flag, this one isn't one.
if fuzzy and res.ampm is not None:
val_is_ampm = False
# If AM/PM is found and hour is not, raise a ValueError
if res.hour is None:
if fuzzy:
val_is_ampm = False
else:
raise ValueError('No hour specified with ' +
'AM or PM flag.')
elif not 0 <= res.hour <= 12:
# If AM/PM is found, it's a 12 hour clock, so raise
# an error for invalid range
if fuzzy:
val_is_ampm = False
else:
raise ValueError('Invalid hour specified for ' +
'12-hour clock.')
if val_is_ampm:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
res.ampm = value
elif fuzzy:
last_skipped_token_i = self._skip_token(skipped_tokens,
last_skipped_token_i, i, l)
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in
string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2]) * \
3600 + int(l[i][2:]) * 60
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
res.tzoffset = int(l[i]) * 3600 + int(l[i + 2]) * 60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2]) * 3600
else:
return None, None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i + 3 < len_l and
info.jump(l[i]) and l[i + 1] == '(' and l[i + 3] == ')' and
3 <= len(l[i + 2]) <= 5 and
not [x for x in l[i + 2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i + 2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None, None
last_skipped_token_i = self._skip_token(skipped_tokens,
last_skipped_token_i, i, l)
i += 1
# Process year/month/day
year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst)
if year is not None:
res.year = year
res.century_specified = ymd.century_specified
if month is not None:
res.month = month
if day is not None:
res.day = day
except (IndexError, ValueError, AssertionError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
return res, tuple(skipped_tokens)
else:
return res, None
@staticmethod
def _skip_token(skipped_tokens, last_skipped_token_i, i, l):
if last_skipped_token_i == i - 1:
# recombine the tokens
skipped_tokens[-1] += l[i]
else:
# just append
skipped_tokens.append(l[i])
last_skipped_token_i = i
return last_skipped_token_i
|
class parser(object):
def __init__(self, info=None):
pass
def parse(self, timestr, default=None, ignoretz=False, tzinfos=None, **kwargs):
'''
Parse the date/time string into a :class:`datetime.datetime` object.
:param timestr:
Any date/time string using the supported formats.
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a
naive :class:`datetime.datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in minutes or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param **kwargs:
Keyword arguments as passed to ``_parse()``.
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ValueError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises TypeError:
Raised for non-string or character stream input.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
'''
pass
class _result(_resultbase):
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
'''
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
'''
pass
@staticmethod
def _skip_token(skipped_tokens, last_skipped_token_i, i, l):
pass
| 7 | 2 | 146 | 28 | 84 | 36 | 26 | 0.42 | 1 | 15 | 5 | 0 | 3 | 1 | 4 | 4 | 595 | 115 | 341 | 40 | 333 | 142 | 275 | 38 | 269 | 79 | 1 | 8 | 104 |
147,966 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/parser.py
|
constant2.pkg.superjson.pkg.dateutil.parser._ymd
|
class _ymd(list):
def __init__(self, tzstr, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.century_specified = False
self.tzstr = tzstr
@staticmethod
def token_could_be_year(token, year):
try:
return int(token) == year
except ValueError:
return False
@staticmethod
def find_potential_year_tokens(year, tokens):
return [token for token in tokens if _ymd.token_could_be_year(token, year)]
def find_probable_year_index(self, tokens):
"""
attempt to deduce if a pre 100 year was lost
due to padded zeros being taken off
"""
for index, token in enumerate(self):
potential_year_tokens = _ymd.find_potential_year_tokens(
token, tokens)
if len(potential_year_tokens) == 1 and len(potential_year_tokens[0]) > 2:
return index
def append(self, val):
if hasattr(val, '__len__'):
if val.isdigit() and len(val) > 2:
self.century_specified = True
elif val > 100:
self.century_specified = True
super(self.__class__, self).append(int(val))
def resolve_ymd(self, mstridx, yearfirst, dayfirst):
len_ymd = len(self)
year, month, day = (None, None, None)
if len_ymd > 3:
raise ValueError("More than three YMD values")
elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
# One member, or two members with a month string
if mstridx != -1:
month = self[mstridx]
del self[mstridx]
if len_ymd > 1 or mstridx == -1:
if self[0] > 31:
year = self[0]
else:
day = self[0]
elif len_ymd == 2:
# Two members with numbers
if self[0] > 31:
# 99-01
year, month = self
elif self[1] > 31:
# 01-99
month, year = self
elif dayfirst and self[1] <= 12:
# 13-01
day, month = self
else:
# 01-13
month, day = self
elif len_ymd == 3:
# Three members
if mstridx == 0:
month, day, year = self
elif mstridx == 1:
if self[0] > 31 or (yearfirst and self[2] <= 31):
# 99-Jan-01
year, month, day = self
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
day, month, year = self
elif mstridx == 2:
# WTF!?
if self[1] > 31:
# 01-99-Jan
day, year, month = self
else:
# 99-01-Jan
year, day, month = self
else:
if self[0] > 31 or \
self.find_probable_year_index(_timelex.split(self.tzstr)) == 0 or \
(yearfirst and self[1] <= 12 and self[2] <= 31):
# 99-01-01
if dayfirst and self[2] <= 12:
year, day, month = self
else:
year, month, day = self
elif self[0] > 12 or (dayfirst and self[1] <= 12):
# 13-01-01
day, month, year = self
else:
# 01-13-01
month, day, year = self
return year, month, day
|
class _ymd(list):
def __init__(self, tzstr, *args, **kwargs):
pass
@staticmethod
def token_could_be_year(token, year):
pass
@staticmethod
def find_potential_year_tokens(year, tokens):
pass
def find_probable_year_index(self, tokens):
'''
attempt to deduce if a pre 100 year was lost
due to padded zeros being taken off
'''
pass
def append(self, val):
pass
def resolve_ymd(self, mstridx, yearfirst, dayfirst):
pass
| 9 | 1 | 17 | 1 | 12 | 4 | 5 | 0.28 | 1 | 5 | 1 | 0 | 4 | 2 | 6 | 39 | 110 | 13 | 76 | 15 | 67 | 21 | 55 | 13 | 48 | 19 | 2 | 4 | 30 |
147,967 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/parser.py
|
constant2.pkg.superjson.pkg.dateutil.parser._tzparser
|
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = _timelex.split(tzstr)
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
i = j
if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
"0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr, (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60) * signal)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i]) * 3600 + int(l[i + 2]) * 60) * signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2]) * 3600 * signal)
else:
return None
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';':
l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i + 1]) * -1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i]) - 1) % 7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset + int(l[i])) * signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
i += 1
x.month = int(l[i])
i += 1
assert l[i] in ('-', '.')
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i]) - 1) % 7
else:
# year day (zero based)
x.yday = int(l[i]) + 1
i += 1
if i < len_l and l[i] == '/':
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
i += 2
if i + 1 < len_l and l[i + 1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2]) * 3600)
else:
return None
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
return res
|
class _tzparser(object):
class _result(_resultbase):
class _attr(_resultbase):
def __repr__(self):
pass
def __init__(self):
pass
def parse(self, tzstr):
pass
| 6 | 0 | 53 | 4 | 44 | 5 | 11 | 0.1 | 1 | 7 | 2 | 0 | 1 | 0 | 1 | 1 | 172 | 19 | 139 | 19 | 133 | 14 | 110 | 19 | 104 | 32 | 1 | 6 | 34 |
147,968 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/parser.py
|
constant2.pkg.superjson.pkg.dateutil.parser._timelex
|
class _timelex(object):
# Fractional seconds are sometimes split by a comma
_split_decimal = re.compile("([.,])")
def __init__(self, instream):
if isinstance(instream, binary_type):
instream = instream.decode()
if isinstance(instream, text_type):
instream = StringIO(instream)
if getattr(instream, 'read', None) is None:
raise TypeError('Parser must be a string or character stream, not '
'{itype}'.format(itype=instream.__class__.__name__))
self.instream = instream
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
"""
This function breaks the time string into lexical units (tokens), which
can be parsed by the parser. Lexical units are demarcated by changes in
the character set, so any continuous string of letters is considered
one unit, any continuous string of numbers is considered one unit.
The main complication arises from the fact that dots ('.') can be used
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
"4:30:21.447"). As such, it is necessary to read the full context of
any dot-separated strings before breaking it into tokens; as such, this
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
"""
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
while not self.eof:
# We only realize that we've reached the end of a token when we
# find a character that's not part of the current token - since
# that character may be part of the next token, it's stored in the
# charstack.
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
# First character of the token - determines if we're starting
# to parse a word, a number or something else.
token = nextchar
if self.isword(nextchar):
state = 'a'
elif self.isnum(nextchar):
state = '0'
elif self.isspace(nextchar):
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
# If we've already started reading a word, we keep reading
# letters until we find something that's not part of a word.
seenletters = True
if self.isword(nextchar):
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
# If we've already started reading a number, we keep reading
# numbers until we find something that doesn't fit.
if self.isnum(nextchar):
token += nextchar
elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
# If we've seen some letters and a dot separator, continue
# parsing, and the tokens will be broken up later.
seenletters = True
if nextchar == '.' or self.isword(nextchar):
token += nextchar
elif self.isnum(nextchar) and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
# If we've seen at least one dot separator, keep going, we'll
# break up the tokens later.
if nextchar == '.' or self.isnum(nextchar):
token += nextchar
elif self.isword(nextchar) and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
token[-1] in '.,')):
l = self._split_decimal.split(token)
token = l[0]
for tok in l[1:]:
if tok:
self.tokenstack.append(tok)
if state == '0.' and token.count('.') == 0:
token = token.replace(',', '.')
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
@classmethod
def split(cls, s):
return list(cls(s))
@classmethod
def isword(cls, nextchar):
""" Whether or not the next character is part of a word """
return nextchar.isalpha()
@classmethod
def isnum(cls, nextchar):
""" Whether the next character is part of a number """
return nextchar.isdigit()
@classmethod
def isspace(cls, nextchar):
""" Whether the next character is whitespace """
return nextchar.isspace()
|
class _timelex(object):
def __init__(self, instream):
pass
def get_token(self):
'''
This function breaks the time string into lexical units (tokens), which
can be parsed by the parser. Lexical units are demarcated by changes in
the character set, so any continuous string of letters is considered
one unit, any continuous string of numbers is considered one unit.
The main complication arises from the fact that dots ('.') can be used
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
"4:30:21.447"). As such, it is necessary to read the full context of
any dot-separated strings before breaking it into tokens; as such, this
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
'''
pass
def __iter__(self):
pass
def __next__(self):
pass
def next(self):
pass
@classmethod
def split(cls, s):
pass
@classmethod
def isword(cls, nextchar):
''' Whether or not the next character is part of a word '''
pass
@classmethod
def isnum(cls, nextchar):
''' Whether the next character is part of a number '''
pass
@classmethod
def isspace(cls, nextchar):
''' Whether the next character is whitespace '''
pass
| 14 | 4 | 16 | 1 | 12 | 4 | 4 | 0.34 | 1 | 3 | 0 | 0 | 5 | 4 | 9 | 9 | 160 | 20 | 110 | 26 | 96 | 37 | 87 | 22 | 77 | 26 | 1 | 3 | 38 |
147,969 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/rrule.py
|
constant2.pkg.superjson.pkg.dateutil.rrule._iterinfo
|
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365 + calendar.isleap(year)
self.nextyearlen = 365 + calendar.isleap(year + 1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0] * (self.yearlen + 7)
# no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7 - self.yearweekday + rr._wkst) % 7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen + (self.yearweekday - rr._wkst) % 7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen - no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div + mod // 4
for n in rr._byweekno:
if n < 0:
n += numweeks + 1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst + (n - 1) * 7
if no1wkst != firstwkst:
i -= 7 - firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst + numweeks * 7
if no1wkst != firstwkst:
i -= 7 - firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year - 1, 1, 1).weekday()
lno1wkst = (7 - lyearweekday + rr._wkst) % 7
lyearlen = 365 + calendar.isleap(year - 1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52 + (lyearlen +
(lyearweekday - rr._wkst) % 7) % 7 // 4
else:
lnumweeks = 52 + (self.yearlen - no1wkst) % 7 // 4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and (month != self.lastmonth or
year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month - 1:month + 1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month - 1:month + 1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0] * self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last + (n + 1) * 7
i -= (self.wdaymask[i] - wday) % 7
else:
i = first + (n - 1) * 7
i += (7 - self.wdaymask[i] + wday) % 7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0] * (self.yearlen + 7)
eyday = easter.easter(year).toordinal() - self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday + offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
dset = [None] * self.yearlen
start, end = self.mrange[month - 1:month + 1]
for i in range(start, end):
dset[i] = i
return dset, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
dset = [None] * (self.yearlen + 7)
i = datetime.date(year, month, day).toordinal() - self.yearordinal
start = i
for j in range(7):
dset[i] = i
i += 1
# if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return dset, start, i
def ddayset(self, year, month, day):
dset = [None] * self.yearlen
i = datetime.date(year, month, day).toordinal() - self.yearordinal
dset[i] = i
return dset, i, i + 1
def htimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
tset.sort()
return tset
def mtimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
tset.sort()
return tset
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
|
class _iterinfo(object):
def __init__(self, rrule):
pass
def rebuild(self, year, month):
pass
def ydayset(self, year, month, day):
pass
def mdayset(self, year, month, day):
pass
def wdayset(self, year, month, day):
pass
def ddayset(self, year, month, day):
pass
def htimeset(self, hour, minute, second):
pass
def mtimeset(self, hour, minute, second):
pass
def stimeset(self, hour, minute, second):
pass
| 10 | 0 | 20 | 1 | 17 | 2 | 5 | 0.14 | 1 | 4 | 0 | 0 | 9 | 15 | 9 | 9 | 193 | 14 | 157 | 58 | 147 | 22 | 141 | 58 | 131 | 34 | 1 | 6 | 49 |
147,970 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/scheduler.py
|
crawl_zillow.scheduler.StreetScheduler
|
class StreetScheduler(BaseScheduler):
model = Street
next_model = Address
next_model_col_name = Address._meta["collection"]
def user_post_process(self, task):
# insert into next model's collection
output_data = task.output_data
if output_data.status == Status.S50_Finished.id:
n_children = len(output_data.data)
if n_children:
state = output_data.data[0].state
next_model_col = address_col_mapper[state]
to_insert = list()
for doc in output_data.data:
d = doc.to_dict()
d["_id"] = d["href"]
del d["href"]
to_insert.append(d)
smart_insert(next_model_col, to_insert)
# update parent collection about status, edit_at, n_children
self.collection.update(
{"_id": task.id},
{
"$set": {
status_key: Status.S50_Finished.id,
edit_at_key: datetime.utcnow(),
n_children_key: n_children,
}
},
)
else:
self.collection.update(
{"_id": task.id},
{
"$set": {
status_key: output_data.status,
edit_at_key: datetime.utcnow(),
}
},
)
raise output_data.errors
|
class StreetScheduler(BaseScheduler):
def user_post_process(self, task):
pass
| 2 | 0 | 42 | 5 | 35 | 2 | 4 | 0.05 | 1 | 2 | 0 | 0 | 1 | 0 | 1 | 7 | 47 | 6 | 39 | 12 | 37 | 2 | 21 | 12 | 19 | 4 | 2 | 3 | 4 |
147,971 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/scheduler.py
|
crawl_zillow.scheduler.ZipcodeScheduler
|
class ZipcodeScheduler(BaseScheduler):
model = Zipcode
next_model = Street
next_model_col_name = Street._meta["collection"]
|
class ZipcodeScheduler(BaseScheduler):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 4 | 0 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 2 | 0 | 0 |
147,972 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/urlbuilder.py
|
crawl_zillow.urlbuilder.UrlBuilder
|
class UrlBuilder(BaseUrlBuilder):
domain = "https://www.zillow.com"
domain_browse_homes = "https://www.zillow.com/browse/homes"
def browse_home_listpage_url(self,
state=None,
county=None,
zipcode=None,
street=None,
**kwargs):
"""
Construct an url of home list page by state, county, zipcode, street.
Example:
- https://www.zillow.com/browse/homes/ca/
- https://www.zillow.com/browse/homes/ca/los-angeles-county/
- https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/
- https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/
"""
url = self.domain_browse_homes
for item in [state, county, zipcode, street]:
if item:
url = url + "/%s" % item
url = url + "/"
return url
|
class UrlBuilder(BaseUrlBuilder):
def browse_home_listpage_url(self,
state=None,
county=None,
zipcode=None,
street=None,
**kwargs):
'''
Construct an url of home list page by state, county, zipcode, street.
Example:
- https://www.zillow.com/browse/homes/ca/
- https://www.zillow.com/browse/homes/ca/los-angeles-county/
- https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/
- https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/
'''
pass
| 2 | 1 | 22 | 2 | 12 | 8 | 3 | 0.53 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 26 | 3 | 15 | 11 | 8 | 8 | 10 | 6 | 8 | 3 | 1 | 2 | 3 |
147,973 |
MacHu-GWU/crawl_zillow-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawl_zillow-project/crawl_zillow/config.py
|
crawl_zillow.config.Config.MongoDB
|
class MongoDB(object):
dbname = None
username = None
password = None
host = None
port = None
|
class MongoDB(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0 | 6 | 6 | 5 | 0 | 6 | 6 | 5 | 0 | 1 | 0 | 0 |
147,974 |
MacHu-GWU/crawl_zillow-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawl_zillow-project/crawl_zillow/config.py
|
crawl_zillow.config.Config.Crawler
|
class Crawler(object):
wait_time = 1.0
browser_prepare_time = 10.0
chrome_driver_executable = None
|
class Crawler(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 1 | 0 | 0 |
147,975 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/tests/test_model.py
|
test_model.TestState
|
class TestState(object):
def test_url_and_key(self):
state = model.State(href="/browse/homes/ca/", state="California")
assert state.url == "https://www.zillow.com/browse/homes/ca"
assert state.key == "ca"
|
class TestState(object):
def test_url_and_key(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 5 | 0 | 5 | 3 | 3 | 0 | 5 | 3 | 3 | 1 | 1 | 0 | 1 |
147,976 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/tests/test_urlbuilder_htmlparser.py
|
test_urlbuilder_htmlparser.TestHtmlParser
|
class TestHtmlParser(object):
def test_get_items(self):
urls = [
urlbuilder.browse_home_listpage_url(),
urlbuilder.browse_home_listpage_url(state="ca"),
urlbuilder.browse_home_listpage_url(
state="ca", county="los-angeles-county"
),
urlbuilder.browse_home_listpage_url(
state="ca", county="los-angeles-county", zipcode="91001"
),
urlbuilder.browse_home_listpage_url(
state="ca", county="los-angeles-county", zipcode="91001",
street="tola-ave_5038895"
),
]
for url in urls:
html = get_html(url)
data = htmlparser.get_items(html)
assert len(data[:3]) == 3
|
class TestHtmlParser(object):
def test_get_items(self):
pass
| 2 | 0 | 19 | 0 | 19 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 20 | 0 | 20 | 6 | 18 | 0 | 7 | 6 | 5 | 2 | 1 | 1 | 2 |
147,977 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/scheduler.py
|
crawl_zillow.scheduler.StateScheduler
|
class StateScheduler(BaseScheduler):
model = State
next_model = County
next_model_col_name = County._meta["collection"]
|
class StateScheduler(BaseScheduler):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 4 | 0 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 2 | 0 | 0 |
147,978 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/scheduler.py
|
crawl_zillow.scheduler.OutputData
|
class OutputData(object):
"""
:param doc: instance of :class:`~mongoengine.Document`.
:param get_html_kwargs: arguments of :meth:`~crawl_zillow.spider.get_html`.
"""
data = attr.ib()
errors = attr.ib(default=None)
status = attr.ib(default=Status.S0_ToDo.id)
|
class OutputData(object):
'''
:param doc: instance of :class:`~mongoengine.Document`.
:param get_html_kwargs: arguments of :meth:`~crawl_zillow.spider.get_html`.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 1 | 4 | 4 | 3 | 4 | 4 | 4 | 3 | 0 | 1 | 0 | 0 |
147,979 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/model.py
|
crawl_zillow.model.Street
|
class Street(BaseModel):
state = mongoengine.StringField()
county = mongoengine.StringField()
zipcode = mongoengine.StringField()
street = mongoengine.StringField()
meta = {
"collection": Model.street
}
|
class Street(BaseModel):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 9 | 1 | 8 | 6 | 7 | 0 | 6 | 6 | 5 | 0 | 2 | 0 | 0 |
147,980 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/scheduler.py
|
crawl_zillow.scheduler.CountyScheduler
|
class CountyScheduler(BaseScheduler):
model = County
next_model = Zipcode
next_model_col_name = Zipcode._meta["collection"]
|
class CountyScheduler(BaseScheduler):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 4 | 0 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 2 | 0 | 0 |
147,981 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/htmlparser.py
|
crawl_zillow.htmlparser.HTMLParser
|
class HTMLParser(BaseHtmlParser):
domain = "http://www.zillow.com"
def get_items(self, html):
"""
Get state, county, zipcode, address code from lists page.
Example:
target url: http://www.zillow.com/browse/homes/md/
<<<<<<< HEAD
data: ``[(href, name), ...]``
=======
data: [(href, name)]
>>>>>>> 4507a26c6cc47e0affe1f7000f912e536c45212b
"""
captcha_patterns = [
"https://www.google.com/recaptcha/api.js", "I'm not a robot"]
for captcha_pattern in captcha_patterns:
if captcha_pattern in html:
raise exc.CaptchaError("Found %r in html!" % captcha_pattern)
data = list()
soup = self.to_soup(html)
div = soup.find("div", class_="zsg-lg-1-2 zsg-sm-1-1")
for li in div.find_all("li"):
a = li.find_all("a")[0]
href = a["href"]
name = a.text.strip()
data.append((href, name))
return data
def get_house_detail(self, html):
"""Get bedroom, bathroom, sqft and more information.
Example: http://www.zillow.com/homedetails/8510-Whittier-Blvd-Bethesda-MD-20817/37183103_zpid/
"""
if "I'm not a robot" in html:
raise exc.CaptchaError(url)
data = {"errors": dict()}
soup = self.get_soup(html)
# header part, bedroom, bathroom, sqft
header = soup.find("header", class_="zsg-content-header addr")
if header is None:
raise exc.ParseError(url)
try:
h3 = header.find("h3")
if h3 is None:
raise exc.ParseError
span_list = h3.find_all("span", class_="addr_bbs")
if len(span_list) != 3:
raise exc.ParseError
text = span_list[0].text
try:
bedroom = float_filter(text)
data["bedroom"] = bedroom
except:
data["errors"][
"bedroom"] = "can't parse bedroom from %r" % text
text = span_list[1].text
try:
bathroom = float_filter(text)
data["bathroom"] = bathroom
except:
data["errors"][
"bathroom"] = "can't parse bathroom from %r" % text
text = span_list[2].text
try:
sqft = int_filter(text)
data["sqft"] = sqft
except:
data["errors"]["sqft"] = "can't parse sqft from %r" % text
except:
pass
# Facts, Features, Construction, Other (FFCO)
div_list = soup.find_all(
"div", class_=re.compile("fact-group-container zsg-content-component"))
for div in div_list:
h3 = div.find("h3")
if h3.text == "Facts":
try:
facts = list()
for li in div.find_all("li"):
facts.append(li.text.strip())
data["facts"] = facts
except Exception as e:
data["errors"]["facts"] = str(e)
if h3.text == "Features":
features = list()
try:
for li in div.find_all("li"):
if '"targetDiv"' not in li.text:
features.append(li.text.strip())
data["features"] = features
except Exception as e:
data["errors"]["features"] = repr(e)
if h3.text == "Appliances Included":
appliances = list()
try:
for li in div.find_all("li"):
appliances.append(li.text.strip())
data["appliances"] = appliances
except Exception as e:
data["errors"]["appliances"] = repr(e)
if h3.text == "Additional Features":
additional_features = list()
try:
for li in div.find_all("li"):
additional_features.append(li.text.strip())
data["additional_features"] = additional_features
except Exception as e:
data["errors"]["additional_features"] = repr(e)
if h3.text == "Construction":
construction = list()
try:
for li in div.find_all("li"):
construction.append(li.text.strip())
data["construction"] = construction
except Exception as e:
data["errors"]["construction"] = repr(e)
if h3.text == "Other":
other = list()
try:
for li in div.find_all("li"):
other.append(li.text.strip())
data["other"] = other
except Exception as e:
data["errors"]["other"] = repr(e)
if len(data["errors"]) == 0:
del data["errors"]
if data:
return data
else:
return None
|
class HTMLParser(BaseHtmlParser):
def get_items(self, html):
'''
Get state, county, zipcode, address code from lists page.
Example:
target url: http://www.zillow.com/browse/homes/md/
<<<<<<< HEAD
data: ``[(href, name), ...]``
=======
data: [(href, name)]
>>>>>>> 4507a26c6cc47e0affe1f7000f912e536c45212b
'''
pass
def get_house_detail(self, html):
'''Get bedroom, bathroom, sqft and more information.
Example: http://www.zillow.com/homedetails/8510-Whittier-Blvd-Bethesda-MD-20817/37183103_zpid/
'''
pass
| 3 | 2 | 74 | 11 | 56 | 8 | 18 | 0.13 | 1 | 4 | 0 | 0 | 2 | 0 | 2 | 2 | 151 | 23 | 113 | 32 | 110 | 15 | 108 | 31 | 105 | 31 | 1 | 5 | 35 |
147,982 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/model.py
|
crawl_zillow.model.Address
|
class Address(BaseModel):
state = mongoengine.StringField()
county = mongoengine.StringField()
zipcode = mongoengine.StringField()
street = mongoengine.StringField()
address = mongoengine.StringField()
|
class Address(BaseModel):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 6 | 0 | 6 | 6 | 5 | 0 | 6 | 6 | 5 | 0 | 2 | 0 | 0 |
147,983 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/model.py
|
crawl_zillow.model.BaseModel
|
class BaseModel(ExtendedDocument):
href = mongoengine.StringField(primary_key=True)
_status = mongoengine.IntField(
default=Status.S0_ToDo.id,
)
_edit_at = mongoengine.DateTimeField(
default=lambda: datetime.utcnow()
)
_n_children = mongoengine.IntField()
meta = {
"abstract": True,
}
@property
def url(self):
"""
Example::
/browse/homes/ca/ -> https://www.zillow.com/browse/homes/ca:return:
"""
return urlbuilder.join_all(self.href)
@property
def key(self):
"""
Example::
/browse/homes/ca/ -> ca
/browse/homes/ca/los-angeles-county/ -> los-angeles-county
/browse/homes/ca/los-angeles-county/91001/ -> 91001
/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ -> tola-ave_5038895
:return:
"""
return [part.strip() for part in self.href.split("/") if part.strip()][
-1]
|
class BaseModel(ExtendedDocument):
@property
def url(self):
'''
Example::
/browse/homes/ca/ -> https://www.zillow.com/browse/homes/ca:return:
'''
pass
@property
def key(self):
'''
Example::
/browse/homes/ca/ -> ca
/browse/homes/ca/los-angeles-county/ -> los-angeles-county
/browse/homes/ca/los-angeles-county/91001/ -> 91001
/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ -> tola-ave_5038895
:return:
'''
pass
| 5 | 2 | 11 | 2 | 3 | 6 | 1 | 0.63 | 1 | 1 | 0 | 5 | 2 | 0 | 2 | 2 | 38 | 7 | 19 | 10 | 14 | 12 | 10 | 8 | 7 | 1 | 1 | 0 | 2 |
147,984 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/model.py
|
crawl_zillow.model.County
|
class County(BaseModel):
state = mongoengine.StringField()
county = mongoengine.StringField()
meta = {
"collection": Model.county
}
|
class County(BaseModel):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 7 | 1 | 6 | 4 | 5 | 0 | 4 | 4 | 3 | 0 | 2 | 0 | 0 |
147,985 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/model.py
|
crawl_zillow.model.State
|
class State(BaseModel):
state = mongoengine.StringField()
meta = {
"collection": Model.state
}
|
class State(BaseModel):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 6 | 1 | 5 | 3 | 4 | 0 | 3 | 3 | 2 | 0 | 2 | 0 | 0 |
147,986 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/scheduler.py
|
crawl_zillow.scheduler.InputData
|
class InputData(object):
"""
:param doc: instance of :class:`~mongoengine.Document`.
:param get_html_kwargs: arguments of :meth:`~crawl_zillow.spider.get_html`.
"""
doc = attr.ib()
get_html_kwargs = attr.ib(default=attr.Factory(dict))
|
class InputData(object):
'''
:param doc: instance of :class:`~mongoengine.Document`.
:param get_html_kwargs: arguments of :meth:`~crawl_zillow.spider.get_html`.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1.33 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0 | 3 | 3 | 2 | 4 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
147,987 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/model.py
|
crawl_zillow.model.Zipcode
|
class Zipcode(BaseModel):
state = mongoengine.StringField()
county = mongoengine.StringField()
zipcode = mongoengine.StringField()
meta = {
"collection": Model.zipcode
}
|
class Zipcode(BaseModel):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 8 | 1 | 7 | 5 | 6 | 0 | 5 | 5 | 4 | 0 | 2 | 0 | 0 |
147,988 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/scheduler.py
|
crawl_zillow.scheduler.BaseScheduler
|
class BaseScheduler(StatusFlagScheduler):
duplicate_flag = Status.S50_Finished.id
update_interval = 30 * 24 * 3600 # 30 days
status_key = status_key
edit_at_key = edit_at_key
n_children_key = n_children_key
model = BaseModel
next_model = None
next_model_col_name = None
_use_browser = None
_selenium_driver = None
@property
def collection(self):
return self.model.col()
def user_hash_input(self, input_data):
return input_data.doc.href
def user_process(self, input_data):
"""
:param input_data:
:return: output_data, list of next model instance. For example, if
model is :class:`~crawl_zillow.model.State`, then next model is
:class:`~crawl_zillow.model.County`.
"""
url = input_data.doc.url
self.logger.info("Crawl %s ." % url, 1)
output_data = OutputData(data=list())
try:
html = get_html(
url,
wait_time=Config.Crawler.wait_time,
driver=self._selenium_driver,
**input_data.get_html_kwargs)
# some this model's attributes will also available in next model
d = input_data.doc.to_dict()
del d[primary_key]
del d[status_key]
del d[edit_at_key]
del d[n_children_key]
try:
for href, name in htmlparser.get_items(html):
data = {
primary_key: href,
self.next_model_col_name: name,
}
data.update(d)
next_model_instance = self.next_model(**data)
output_data.data.append(next_model_instance)
self.logger.info(Status.S50_Finished.description, 1)
output_data.status = Status.S50_Finished.id
except Exception as e:
raise exc.ParseError
except exc.CaptchaError as e:
time.sleep(10.0) # Wait for 10 seconds to solve Captcha
self.logger.info(Status.S20_WrongPage.description, 1)
output_data.status = Status.S20_WrongPage.id
output_data.errors = e
except exc.WrongHtmlError as e:
self.logger.info(Status.S20_WrongPage.description, 1)
output_data.status = Status.S20_WrongPage.id
output_data.errors = e
except exc.ParseError as e:
self.logger.info(Status.S30_ParseError.description, 1)
output_data.status = Status.S30_ParseError.id
output_data.errors = e
except exc.ServerSideError as e:
self.logger.info(Status.S60_ServerSideError.description, 1)
output_data.status = Status.S60_ServerSideError.id
output_data.errors = e
except Exception as e:
self.logger.info(Status.S10_HttpError.description, 1)
output_data.status = Status.S10_HttpError.id
output_data.errors = e
# output_data.data = output_data.data[:2] # COMMENT OUT IN PROD
return output_data
def user_post_process(self, task):
"""
:param task:
:return:
"""
# insert into next model's collection
output_data = task.output_data
parent_to_set = {
self.status_key: output_data.status,
self.edit_at_key: datetime.utcnow(),
}
if output_data.status == Status.S50_Finished.id:
n_children = len(output_data.data)
if n_children:
self.next_model.smart_insert(output_data.data)
parent_to_set[self.n_children_key] = n_children
self.collection.update({"_id": task.id}, {"$set": parent_to_set})
if output_data.status < Status.S50_Finished.id:
raise output_data.errors
def get_todo(self, filters=None, limit=None, get_html_kwargs=None):
if filters is None:
filters = {
"$or": [ # not the finished document
{status_key: {"$not": {"$gte": Status.S50_Finished.id}}},
# now - edit_at <= update_interval
# means now - update_interval <= edit_at
{
edit_at_key: {
"$not": {
"$gte": datetime.utcnow() - timedelta(seconds=self.update_interval)
},
}
}
]
}
if get_html_kwargs is None:
get_html_kwargs = dict()
input_data_queue = list()
for doc in self.model.by_filter(filters=filters).limit(limit):
input_data = InputData(
doc=doc,
get_html_kwargs=get_html_kwargs,
)
input_data_queue.append(input_data)
return input_data_queue
def do(self,
input_data_queue,
pre_process=None,
multiprocess=False,
ignore_error=True,
use_browser=False):
self._use_browser = use_browser
if self._use_browser:
self._selenium_driver = ChromeSpider(
driver_executable_path=Config.Crawler.chrome_driver_executable,
default_timeout=5.0,
default_wait_time=Config.Crawler.wait_time,
)
# Wait 10 seconds to initiate something, like log-in.
time.sleep(Config.Crawler.browser_prepare_time)
with self._selenium_driver as driver:
super(BaseScheduler, self).do(
input_data_queue,
pre_process=pre_process,
multiprocess=False,
ignore_error=ignore_error,
)
else:
self._selenium_driver = None
super(BaseScheduler, self).do(
input_data_queue,
pre_process=pre_process,
multiprocess=multiprocess,
ignore_error=ignore_error,
)
|
class BaseScheduler(StatusFlagScheduler):
@property
def collection(self):
pass
def user_hash_input(self, input_data):
pass
def user_process(self, input_data):
'''
:param input_data:
:return: output_data, list of next model instance. For example, if
model is :class:`~crawl_zillow.model.State`, then next model is
:class:`~crawl_zillow.model.County`.
'''
pass
def user_post_process(self, task):
'''
:param task:
:return:
'''
pass
def get_todo(self, filters=None, limit=None, get_html_kwargs=None):
pass
def do(self,
input_data_queue,
pre_process=None,
multiprocess=False,
ignore_error=True,
use_browser=False):
pass
| 8 | 2 | 25 | 2 | 20 | 3 | 3 | 0.14 | 1 | 10 | 4 | 4 | 6 | 0 | 6 | 6 | 172 | 23 | 133 | 38 | 120 | 19 | 88 | 30 | 81 | 8 | 1 | 3 | 20 |
147,989 |
MacHu-GWU/crawl_zillow-project
|
MacHu-GWU_crawl_zillow-project/crawl_zillow/ns.py
|
crawl_zillow.ns.Model
|
class Model(object):
state = "state"
county = "county"
zipcode = "zipcode"
street = "street"
address = "address"
|
class Model(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0 | 6 | 6 | 5 | 0 | 6 | 6 | 5 | 0 | 1 | 0 | 0 |
147,990 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/mongo_backend/test_mongoengine_orm.py
|
test_mongoengine_orm.User
|
class User(ExtendedDocument, UserAttributeMixin):
pass
|
class User(ExtendedDocument, UserAttributeMixin):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 0 |
147,991 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s1_movie/entity_movie.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s1_movie.entity_movie.MovieCoverImagePage
|
class MovieCoverImagePage(MoviePageBase):
CONF_UPDATE_INTERVAL = 24 * 3600
CONF_STATUS_KEY = "status_cover_image"
CONF_EDIT_AT_KEY = "edit_at_cover_image"
CONF_UPDATE_FIELDS = (
"image_content",
)
meta = dict(
collection="site_movie_movie",
db_alias=config.DB_DATABASE.get_value(),
)
def build_url(self):
return url_builder.url_movie_detail(self._id)
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
entity_data = dict(image_content=html)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
additional_data={},
status=status,
)
return pres
|
class MovieCoverImagePage(MoviePageBase):
def build_url(self):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 4 | 0 | 7 | 1 | 6 | 0 | 1 | 0 | 1 | 2 | 1 | 0 | 2 | 0 | 2 | 36 | 28 | 5 | 23 | 12 | 19 | 0 | 13 | 11 | 10 | 1 | 6 | 0 | 2 |
147,992 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s1_movie/entity_movie.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s1_movie.entity_movie.MoviePage
|
class MoviePage(MoviePageBase):
CONF_UPDATE_INTERVAL = 24 * 3600
CONF_STATUS_KEY = "status_movie_info"
CONF_EDIT_AT_KEY = "edit_at_movie_info"
CONF_UPDATE_FIELDS = (
"title",
)
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(MovieCoverImagePage,
Relationship.Option.one, recursive=True)
])
meta = dict(
collection="site_movie_movie",
db_alias=config.DB_DATABASE.get_value(),
)
def build_url(self):
return url_builder.url_movie_detail(self._id)
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
span_title = soup.find("span", class_="title")
title = span_title.text
entity_data = dict(title=title)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
additional_data={},
status=status,
)
return pres
|
class MoviePage(MoviePageBase):
def build_url(self):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 4 | 0 | 8 | 1 | 7 | 0 | 1 | 0 | 1 | 2 | 1 | 0 | 2 | 0 | 2 | 36 | 34 | 6 | 28 | 15 | 24 | 0 | 16 | 14 | 13 | 1 | 6 | 0 | 2 |
147,993 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s2_music/entity_music.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s2_music.entity_music.MusicPage
|
class MusicPage(MusicWebsiteEntity):
CONF_UPDATE_INTERVAL = 24 * 3600
_id = fields.IntField(primary_key=True)
title = fields.StringField()
artists = fields.ListField(fields.IntField())
n_artist = fields.IntField()
genres = fields.ListField(fields.IntField())
n_genre = fields.IntField()
meta = dict(
collection="site_music_music",
db_alias=config.DB_DATABASE.get_value(),
)
@property
def music_id(self):
return self._id
def build_url(self):
return url_builder.url_music_detail(self._id)
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
div_detail = soup.find("div", id="detail")
title = div_detail.find("div", class_="title").find("span").text
artists = [
int(a["href"].split("/")[-1])
for a in div_detail.find("div", class_="artists").find_all("a")
]
genres = [
int(a["href"].split("/")[-1])
for a in div_detail.find("div", class_="genres").find_all("a")
]
entity_data = dict(title=title, artists=artists, genres=genres)
children = list()
for artist_id in artists:
children.append(ArtistPage(_id=artist_id))
for genre_id in genres:
children.append(GenrePage(_id=genre_id))
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class MusicPage(MusicWebsiteEntity):
@property
def music_id(self):
pass
def build_url(self):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 6 | 0 | 11 | 1 | 10 | 0 | 2 | 0 | 1 | 8 | 5 | 0 | 3 | 0 | 3 | 36 | 53 | 10 | 43 | 24 | 37 | 0 | 27 | 22 | 23 | 3 | 7 | 1 | 5 |
147,994 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s2_music/entity_music.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s2_music.entity_music.RandomMusicPage
|
class RandomMusicPage(MusicWebsiteEntity):
CONF_UPDATE_INTERVAL = 1
CONF_UPDATE_FIELDS = ("musics", "n_music")
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(MusicPage, Relationship.Option.many, "n_music")
])
_id = fields.IntField(primary_key=True)
musics = fields.ListField(fields.IntField())
n_music = fields.IntField()
meta = dict(
collection="site_music_random_music",
db_alias=config.DB_DATABASE.get_value(),
)
def build_url(self, **kwargs):
return url_builder.url_random_music()
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
musics = [
int(a["href"].split("/")[-1])
for a in soup.find_all("a")
]
entity_data = dict(musics=musics)
children = list()
for music_id in musics:
music = MusicPage(_id=music_id)
children.append(music)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class RandomMusicPage(MusicWebsiteEntity):
def build_url(self, **kwargs):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 4 | 0 | 12 | 2 | 10 | 0 | 2 | 0 | 1 | 7 | 4 | 0 | 2 | 0 | 2 | 35 | 42 | 8 | 34 | 18 | 30 | 0 | 20 | 17 | 17 | 2 | 7 | 1 | 3 |
147,995 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s1_movie/entity_movie_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s1_movie.entity_movie_sql_backend.ListPage
|
class ListPage(SingleStatusEntityBase):
__tablename__ = "site_movie_listpage"
CONF_UPDATE_INTERVAL = 1
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(MoviePage, Relationship.Option.many,
"n_movie", recursive=True)
])
id = sa.Column(sa.Integer, primary_key=True)
n_movie = sa.Column(sa.Integer)
@property
def page_num(self):
return self.id
def build_url(self):
return url_builder.url_nth_listpage(self.page_num)
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
div_listpage = soup.find("div", id="listpage")
a_tag_list = div_listpage.find_all("a")
entity_data = {}
children = list()
for a in a_tag_list:
href = a["href"]
movie_id = int(href.split("/")[-1])
movie = MoviePage(id=movie_id)
children.append(movie)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class ListPage(SingleStatusEntityBase):
@property
def page_num(self):
pass
def build_url(self):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 6 | 0 | 9 | 1 | 7 | 0 | 1 | 0 | 1 | 4 | 2 | 0 | 3 | 0 | 3 | 39 | 42 | 10 | 32 | 20 | 26 | 0 | 23 | 18 | 19 | 2 | 7 | 1 | 4 |
147,996 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s1_movie/entity_movie_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s1_movie.entity_movie_sql_backend.HomePage
|
class HomePage(SingleStatusEntityBase):
__tablename__ = "site_movie_homepage"
CONF_UPDATE_INTERVAL = 1
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(ListPage, Relationship.Option.many,
"n_listpage", recursive=True)
])
id = sa.Column(sa.Integer, primary_key=True)
description = sa.Column(sa.String)
max_page_num = sa.Column(sa.Integer)
n_listpage = sa.Column(sa.Integer)
def build_url(self, **kwargs):
return url_builder.url_first_listpage()
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
div_pagination = soup.find("div", id="pagination")
a_tag_list = div_pagination.find_all("a")
href = a_tag_list[-1]["href"]
max_page_num = int(href.split("/")[-1])
entity_data = dict(max_page_num=max_page_num)
children = list()
for page_num in range(1, 1 + max_page_num):
listpage = ListPage(id=page_num)
children.append(listpage)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class HomePage(SingleStatusEntityBase):
def build_url(self, **kwargs):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 4 | 0 | 11 | 1 | 10 | 0 | 2 | 0 | 1 | 6 | 2 | 0 | 2 | 0 | 2 | 38 | 37 | 6 | 31 | 20 | 27 | 0 | 23 | 19 | 20 | 2 | 7 | 1 | 3 |
147,997 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/mongo_backend/site_music/test_music_music_page_mongo_backend.py
|
test_music_music_page_mongo_backend.TestMusicPage
|
class TestMusicPage(object):
def test_parse_response(self):
music_id = 20
music = MusicPage(_id=music_id)
url = music.build_url()
html = spider.request_for_html(url)
pres = music.parse_response(url, request=None, response=None, html=html)
assert pres.entity_data["title"] == "Music {} Title".format(music_id)
assert len(pres.children) == (max_n_artist + max_n_genre)
|
class TestMusicPage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 1 | 1 | 0 | 1 |
147,998 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/mongo_backend/site_music/test_music_random_music_page_mongo_backend.py
|
test_music_random_music_page_mongo_backend.TestHomePage
|
class TestHomePage(object):
def test_parse_response(self):
rand_music_page = RandomMusicPage()
url = rand_music_page.build_url()
html = spider.request_for_html(url)
pres = rand_music_page.parse_response(url, request=None, response=None, html=html)
assert len(pres.entity_data["musics"]) == n_random_music
assert len(pres.entity_data["musics"]) == len(pres.children)
|
class TestHomePage(object):
def test_parse_response(self):
pass
| 2 | 0 | 7 | 0 | 7 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 8 | 0 | 8 | 6 | 6 | 0 | 8 | 6 | 6 | 1 | 1 | 0 | 1 |
147,999 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/test_psql_db_connect.py
|
test_psql_db_connect.DummyForTest
|
class DummyForTest(Base, ExtendedBase):
__tablename__ = "dummy_for_test"
id = sa.Column(sa.Integer, primary_key=True)
value = sa.Column(sa.String)
|
class DummyForTest(Base, ExtendedBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 1 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 1 | 0 | 0 |
148,000 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/test_sqlalchemy_orm.py
|
test_sqlalchemy_orm.Test
|
class Test(object):
def test(self):
user = User()
assert user.id is None
user = User(id=1)
assert user.name is None
assert user.status is None
assert user.to_dict() == {"id": 1, "name": None, "status": None}
user = User(id=1, name="Alice")
assert user.status is None
assert user.to_dict() == {"id": 1, "name": "Alice", "status": None}
|
class Test(object):
def test(self):
pass
| 2 | 0 | 12 | 2 | 10 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 13 | 2 | 11 | 3 | 9 | 0 | 11 | 3 | 9 | 1 | 1 | 0 | 1 |
148,001 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/test_sqlalchemy_orm.py
|
test_sqlalchemy_orm.User
|
class User(Base, UserAttributeMixin, ExtendedBase):
__tablename__ = "users"
id = sa.Column(sa.Integer, primary_key=True)
|
class User(Base, UserAttributeMixin, ExtendedBase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 1 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 2 | 0 | 0 |
148,002 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/test_sqlalchemy_orm.py
|
test_sqlalchemy_orm.UserAttributeMixin
|
class UserAttributeMixin(object):
name = sa.Column(sa.String)
status = sa.Column(sa.Integer, default=0)
|
class UserAttributeMixin(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
148,003 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/sql/test_sql_entity.py
|
test_sql_entity.TestSqlEntity.test_all.DummyEntityForTest
|
class DummyEntityForTest(Base, SqlEntitySingleStatus):
__tablename__ = "dummy_entity_for_test"
id = sa.Column(sa.Integer, primary_key=True)
value = sa.Column(sa.String, nullable=True)
CONF_UPDATE_INTERVAL = 1
CONF_STATUS_KEY = "status"
CONF_EDIT_AT_KEY = "edit_at"
CONF_UPDATE_FIELDS = (
"value",
)
CONF_ONLY_FIELDS = (
"id",
)
|
class DummyEntityForTest(Base, SqlEntitySingleStatus):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 15 | 2 | 13 | 8 | 12 | 0 | 9 | 8 | 8 | 0 | 6 | 0 | 0 |
148,004 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/test_util.py
|
test_util.test_get_all_subclass.A
|
class A(X): pass
|
class A(X):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
148,005 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/test_util.py
|
test_util.test_get_all_subclass.B
|
class B(X): pass
|
class B(X):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
148,006 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/test_util.py
|
test_util.test_get_all_subclass.C
|
class C(A, B): pass
|
class C(A, B):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 0 |
148,007 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/test_util.py
|
test_util.test_get_all_subclass.D
|
class D(C): pass
|
class D(C):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
148,008 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/test_util.py
|
test_util.test_get_all_subclass.E
|
class E(C): pass
|
class E(C):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
148,009 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/sql/test_sql_entity.py
|
test_sql_entity.TestSqlEntity.test_pre_process_only_fields_arg.DummyEntityForOnlyFieldsArgTest
|
class DummyEntityForOnlyFieldsArgTest(Base, SqlEntitySingleStatus):
__tablename__ = "dummy_entity_for_only_fields_arg_test"
id = sa.Column(sa.Integer, primary_key=True)
value = sa.Column(sa.String, nullable=True)
|
class DummyEntityForOnlyFieldsArgTest(Base, SqlEntitySingleStatus):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 5 | 1 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 6 | 0 | 0 |
148,010 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/test_util.py
|
test_util.test_get_all_subclass.X
|
class X: pass
|
class X:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 0 | 0 | 0 |
148,011 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/sql/test_sql_entity.py
|
test_sql_entity.TestSqlEntity
|
class TestSqlEntity(object):
def test_pre_process_only_fields_arg(self):
class DummyEntityForOnlyFieldsArgTest(Base, SqlEntitySingleStatus):
__tablename__ = "dummy_entity_for_only_fields_arg_test"
id = sa.Column(sa.Integer, primary_key=True)
value = sa.Column(sa.String, nullable=True)
Base.metadata.create_all(engine)
(
is_partial_load,
only_column_names,
only_column_objects,
) = DummyEntityForOnlyFieldsArgTest._pre_process_only_fields_arg(only_fields=None)
assert is_partial_load is False
assert len(only_column_names) == 0
assert len(only_column_objects) == 0
(
is_partial_load,
only_column_names,
only_column_objects,
) = DummyEntityForOnlyFieldsArgTest._pre_process_only_fields_arg(only_fields=("id",))
assert is_partial_load is True
assert only_column_names == ["id", ]
assert only_column_objects == [DummyEntityForOnlyFieldsArgTest.id, ]
(
is_partial_load,
only_column_names,
only_column_objects,
) = DummyEntityForOnlyFieldsArgTest._pre_process_only_fields_arg(
only_fields=(DummyEntityForOnlyFieldsArgTest.id,))
assert is_partial_load is True
assert only_column_names == ["id", ]
assert only_column_objects == [DummyEntityForOnlyFieldsArgTest.id, ]
# edit `CONF_ONLY_FIELDS` config
DummyEntityForOnlyFieldsArgTest.CONF_ONLY_FIELDS = ("id", "value")
(
is_partial_load,
only_column_names,
only_column_objects,
) = DummyEntityForOnlyFieldsArgTest._pre_process_only_fields_arg(only_fields=None)
assert is_partial_load is True
assert only_column_names == ["id", "value"]
assert only_column_objects == [DummyEntityForOnlyFieldsArgTest.id, DummyEntityForOnlyFieldsArgTest.value]
(
is_partial_load,
only_column_names,
only_column_objects,
) = DummyEntityForOnlyFieldsArgTest._pre_process_only_fields_arg(only_fields=("id",))
assert is_partial_load is True
assert only_column_names == ["id", ]
assert only_column_objects == [DummyEntityForOnlyFieldsArgTest.id, ]
(
is_partial_load,
only_column_names,
only_column_objects,
) = DummyEntityForOnlyFieldsArgTest._pre_process_only_fields_arg(
only_fields=(DummyEntityForOnlyFieldsArgTest.id,))
assert is_partial_load is True
assert only_column_names == ["id", ]
assert only_column_objects == [DummyEntityForOnlyFieldsArgTest.id, ]
def test_all(self):
class DummyEntityForTest(Base, SqlEntitySingleStatus):
__tablename__ = "dummy_entity_for_test"
id = sa.Column(sa.Integer, primary_key=True)
value = sa.Column(sa.String, nullable=True)
CONF_UPDATE_INTERVAL = 1
CONF_STATUS_KEY = "status"
CONF_EDIT_AT_KEY = "edit_at"
CONF_UPDATE_FIELDS = (
"value",
)
CONF_ONLY_FIELDS = (
"id",
)
Base.metadata.create_all(engine)
session = Session()
# --- test SqlEntity.set_db_values() method ---
session.query(DummyEntityForTest).delete()
session.commit()
DummyEntityForTest.smart_insert(engine, DummyEntityForTest(id=1, value="Alice"))
DummyEntityForTest(id=1).set_db_values(engine=engine, data={"value": "Bob"})
entity = DummyEntityForTest.by_id(1, session)
assert entity.id == 1
assert entity.value == "Bob"
# --- test SqlEntity.get_unfinished(), SqlEntity.get_finished() methods ---
session.query(DummyEntityForTest).delete()
DummyEntityForTest.smart_insert(
session,
[
DummyEntityForTest(id=1, value="Alice", edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(id=2, value="Bob", status=Status.S50_Finished.id, edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(id=3, value="Cathy", status=Status.S50_Finished.id, edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(id=4, value="David", edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(id=5, value="Edward", edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(id=6, value="Frank", status=Status.S50_Finished.id, edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(id=7, value="George", status=Status.S50_Finished.id, edit_at=datetime(2099, 1, 1)),
]
)
assert DummyEntityForTest.count_unfinished(session) == 3
assert DummyEntityForTest.count_unfinished(session, filters=(DummyEntityForTest.id <= 3,)) == 1
assert DummyEntityForTest.count_unfinished(session, filters=(DummyEntityForTest.id > 3,)) == 2
assert DummyEntityForTest.count_unfinished(session, filters=(DummyEntityForTest.id > 3,), limit=1) == 1
assert [
entity.id
for entity in
DummyEntityForTest.get_unfinished(session, order_by=DummyEntityForTest.id.desc(), limit=2)
] == [5, 4]
for dummy_entity in DummyEntityForTest.get_unfinished(session):
assert dummy_entity.status is None
assert dummy_entity.edit_at is None
for dummy_entity in DummyEntityForTest.get_unfinished(session, only_fields=["id", "status", "edit_at"]):
assert dummy_entity.status is not None
assert dummy_entity.edit_at is not None
for dummy_entity in DummyEntityForTest.get_unfinished(
session,
only_fields=[
DummyEntityForTest.id,
DummyEntityForTest.status,
DummyEntityForTest.edit_at,
]
):
assert dummy_entity.status is not None
assert dummy_entity.edit_at is not None
assert DummyEntityForTest.count_finished(session) == 4
assert DummyEntityForTest.count_finished(session, filters=(DummyEntityForTest.id <= 3,)) == 2
assert DummyEntityForTest.count_finished(session, filters=(DummyEntityForTest.id > 3,)) == 2
assert DummyEntityForTest.count_finished(session, filters=(DummyEntityForTest.id > 3,), limit=1) == 1
assert [
entity.id
for entity in
DummyEntityForTest.get_finished(session, order_by=[DummyEntityForTest.id.desc(), ], limit=2)
] == [7, 6]
for dummy_entity in DummyEntityForTest.get_finished(session):
assert dummy_entity.status is None
assert dummy_entity.edit_at is None
for dummy_entity in DummyEntityForTest.get_finished(session, only_fields=["id", "status", "edit_at"]):
assert dummy_entity.status is not None
assert dummy_entity.edit_at is not None
for dummy_entity in DummyEntityForTest.get_finished(
session,
only_fields=[
DummyEntityForTest.id,
DummyEntityForTest.status,
DummyEntityForTest.edit_at,
]
):
assert dummy_entity.status is not None
assert dummy_entity.edit_at is not None
session.commit()
session.close()
|
class TestSqlEntity(object):
def test_pre_process_only_fields_arg(self):
pass
class DummyEntityForOnlyFieldsArgTest(Base, SqlEntitySingleStatus):
def test_all(self):
pass
class DummyEntityForTest(Base, SqlEntitySingleStatus):
| 5 | 0 | 85 | 11 | 73 | 2 | 4 | 0.02 | 1 | 3 | 2 | 0 | 2 | 0 | 2 | 2 | 172 | 23 | 146 | 20 | 141 | 3 | 83 | 18 | 78 | 7 | 1 | 1 | 8 |
148,012 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/middleware/test_url_builder.py
|
test_url_builder.PythonOrgUrlBuilder
|
class PythonOrgUrlBuilder(BaseUrlBuilder):
pass
|
class PythonOrgUrlBuilder(BaseUrlBuilder):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
148,013 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s1_movie/entity_homepage.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s1_movie.entity_homepage.HomePage
|
class HomePage(MovieWebsiteEntity):
CONF_UPDATE_INTERVAL = 1
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(ListPage, Relationship.Option.many,
"n_listpage", recursive=True)
])
_id = fields.IntField(primary_key=True)
description = fields.StringField()
max_page_num = fields.IntField()
n_listpage = fields.IntField()
meta = dict(
collection="site_movie_homepage",
db_alias=config.DB_DATABASE.get_value(),
)
def build_url(self, **kwargs):
return url_builder.url_first_listpage()
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
div_pagination = soup.find("div", id="pagination")
a_tag_list = div_pagination.find_all("a")
href = a_tag_list[-1]["href"]
max_page_num = int(href.split("/")[-1])
entity_data = dict(max_page_num=max_page_num)
children = list()
for page_num in range(1, 1 + max_page_num):
listpage = ListPage(_id=page_num)
children.append(listpage)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class HomePage(MovieWebsiteEntity):
def build_url(self, **kwargs):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 4 | 0 | 11 | 1 | 10 | 0 | 2 | 0 | 1 | 6 | 2 | 0 | 2 | 0 | 2 | 35 | 40 | 6 | 34 | 21 | 30 | 0 | 23 | 20 | 20 | 2 | 7 | 1 | 3 |
148,014 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.Status.S99_Finalized
|
class S99_Finalized(StatusDetail):
id = 99
description = "Finalized, will nolonger be crawled / changed."
description_en = description
description_cn = "强制禁止对其进行任何的修改和抓取,通常是由于有人工修改介入。"
|
class S99_Finalized(StatusDetail):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,015 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestRelationshipConfig.test.MovieCoverImage
|
class MovieCoverImage(Entity): pass
|
class MovieCoverImage(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,016 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestRelationshipConfig.test.Movie
|
class Movie(Entity): pass
|
class Movie(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,017 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/site_movie/test_movie_model_sql_backend.py
|
test_movie_model_sql_backend.TestListPage
|
class TestListPage(object):
def test_parse_response(self):
page_num = 3
listpage = ListPage(id=page_num)
url = listpage.build_url()
html = spider.request_for_html(url)
pres = listpage.parse_response(url, request=None, response=None, html=html)
assert len(pres.children) == n_movie_each_page
|
class TestListPage(object):
def test_parse_response(self):
pass
| 2 | 0 | 7 | 0 | 7 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 8 | 0 | 8 | 7 | 6 | 0 | 8 | 7 | 6 | 1 | 1 | 0 | 1 |
148,018 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/site_music/test_music_model_sql_backend.py
|
test_music_model_sql_backend.TestMusicPage
|
class TestMusicPage(object):
def test_parse_response(self):
music_id = 20
music = MusicPage(id=music_id)
url = music.build_url()
html = spider.request_for_html(url)
pres = music.parse_response(url, request=None, response=None, html=html)
assert pres.entity_data["title"] == "Music {} Title".format(music_id)
assert len(pres.children) == (max_n_artist + max_n_genre)
|
class TestMusicPage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 1 | 1 | 0 | 1 |
148,019 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/site_music/test_music_model_sql_backend.py
|
test_music_model_sql_backend.TestGenrePage
|
class TestGenrePage(object):
def test_parse_response(self):
genre_id = 5
genre_page = GenrePage(id=genre_id)
url = genre_page.build_url()
html = spider.request_for_html(url)
pres = genre_page.parse_response(url, request=None, response=None, html=html)
assert len(pres.entity_data["musics"]) > 0
assert len(pres.entity_data["musics"]) == len(pres.children)
|
class TestGenrePage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 1 | 1 | 0 | 1 |
148,020 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/site_music/test_music_model_sql_backend.py
|
test_music_model_sql_backend.TestArtistPage
|
class TestArtistPage(object):
def test_parse_response(self):
artist_id = 5
artist_page = ArtistPage(id=artist_id)
url = artist_page.build_url()
html = spider.request_for_html(url)
pres = artist_page.parse_response(url, request=None, response=None, html=html)
assert len(pres.entity_data["musics"]) > 0
assert len(pres.entity_data["musics"]) == len(pres.children)
|
class TestArtistPage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 1 | 1 | 0 | 1 |
148,021 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestRelationshipConfig.test.MovieListpage
|
class MovieListpage(Entity): pass
|
class MovieListpage(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,022 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/site_movie/test_movie_model_sql_backend.py
|
test_movie_model_sql_backend.TestHomePage
|
class TestHomePage(object):
def test_parse_response(self):
homepage = HomePage()
url = homepage.build_url()
html = spider.request_for_html(url)
pres = homepage.parse_response(url, request=None, response=None, html=html)
assert pres.entity_data["max_page_num"] == max_page_id
assert len(pres.children) == max_page_id
assert pres.children[-1].id == max_page_id
|
class TestHomePage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 6 | 7 | 0 | 9 | 6 | 7 | 1 | 1 | 0 | 1 |
148,023 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/mongo_backend/site_movie/test_movie_listpage_mongo_backend.py
|
test_movie_listpage_mongo_backend.TestListPage
|
class TestListPage(object):
def test_parse_response(self):
page_num = 3
listpage = ListPage(_id=page_num)
url = listpage.build_url()
html = spider.request_for_html(url)
pres = listpage.parse_response(url, request=None, response=None, html=html)
assert len(pres.children) == n_movie_each_page
|
class TestListPage(object):
def test_parse_response(self):
pass
| 2 | 0 | 7 | 0 | 7 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 8 | 0 | 8 | 7 | 6 | 0 | 8 | 7 | 6 | 1 | 1 | 0 | 1 |
148,024 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/mongo_backend/site_movie/test_movie_homepage_mongo_backend.py
|
test_movie_homepage_mongo_backend.TestHomePage
|
class TestHomePage(object):
def test_parse_response(self):
homepage = HomePage()
url = homepage.build_url()
html = spider.request_for_html(url)
pres = homepage.parse_response(url, request=None, response=None, html=html)
assert pres.entity_data["max_page_num"] == max_page_id
assert len(pres.children) == max_page_id
assert pres.children[-1]._id == max_page_id
|
class TestHomePage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 6 | 7 | 0 | 9 | 6 | 7 | 1 | 1 | 0 | 1 |
148,025 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/mongo_backend/test_mongoengine_orm.py
|
test_mongoengine_orm.UserAttributeMixin
|
class UserAttributeMixin(object):
_id = me.fields.IntField(primary_key=True)
name = me.fields.StringField()
status = me.fields.IntField(default=0)
|
class UserAttributeMixin(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 1 | 0 | 0 |
148,026 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/mongo_backend/site_music/test_music_genres_page_mongo_backend.py
|
test_music_genres_page_mongo_backend.TestGenrePage
|
class TestGenrePage(object):
def test_parse_response(self):
genre_id = 5
genre_page = GenrePage(_id=genre_id)
url = genre_page.build_url()
html = spider.request_for_html(url)
pres = genre_page.parse_response(url, request=None, response=None, html=html)
assert len(pres.entity_data["musics"]) > 0
assert len(pres.entity_data["musics"]) == len(pres.children)
|
class TestGenrePage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 1 | 1 | 0 | 1 |
148,027 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/site_movie/test_movie_model_sql_backend.py
|
test_movie_model_sql_backend.TestMovieCoverImagePage
|
class TestMovieCoverImagePage(object):
def test_parse_response(self):
movie_id = 25
movie_cover_image = MovieCoverImagePage(id=movie_id)
url = movie_cover_image.build_url()
html = spider.request_for_html(url)
pres = movie_cover_image.parse_response(url, request=None, response=None, html=html)
assert "<div" in pres.entity_data["image_content"]
assert len(pres.children) == 0
|
class TestMovieCoverImagePage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 1 | 1 | 0 | 1 |
148,028 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/sql_backend/site_movie/test_movie_model_sql_backend.py
|
test_movie_model_sql_backend.TestMoviePage
|
class TestMoviePage(object):
def test_parse_response(self):
movie_id = 25
movie = MoviePage(id=movie_id)
url = movie.build_url()
html = spider.request_for_html(url)
pres = movie.parse_response(url, request=None, response=None, html=html)
assert pres.entity_data["title"] == "Movie {} Title".format(movie_id)
assert len(pres.children) == 0
|
class TestMoviePage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 1 | 1 | 0 | 1 |
148,029 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/mongo_backend/site_movie/test_movie_moviepage_mongo_backend.py
|
test_movie_moviepage_mongo_backend.TestMoviePage
|
class TestMoviePage(object):
def test_parse_response(self):
movie_id = 25
movie = MoviePage(_id=movie_id)
url = movie.build_url()
html = spider.request_for_html(url)
pres = movie.parse_response(url, request=None, response=None, html=html)
assert pres.entity_data["title"] == "Movie {} Title".format(movie_id)
assert len(pres.children) == 0
|
class TestMoviePage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 1 | 1 | 0 | 1 |
148,030 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/mongo_backend/site_music/test_music_artists_page_mongo_backend.py
|
test_music_artists_page_mongo_backend.TestArtistPage
|
class TestArtistPage(object):
def test_parse_response(self):
artist_id = 5
artist_page = ArtistPage(_id=artist_id)
url = artist_page.build_url()
html = spider.request_for_html(url)
pres = artist_page.parse_response(url, request=None, response=None, html=html)
assert len(pres.entity_data["musics"]) > 0
assert len(pres.entity_data["musics"]) == len(pres.children)
|
class TestArtistPage(object):
def test_parse_response(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 9 | 0 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 1 | 1 | 0 | 1 |
148,031 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/decode.py
|
crawlib.decode.UrlSpecifiedDecoder.ErrorsHandle
|
class ErrorsHandle(object):
strict = "strict"
ignore = "ignore"
replace = "replace"
|
class ErrorsHandle(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 1 | 0 | 0 |
148,032 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/entity/base.py
|
crawlib.entity.base.Relationship.Option
|
class Option(object):
one = "one"
many = "many"
|
class Option(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
148,033 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/example/scrapy_movie/config.py
|
crawlib.example.scrapy_movie.config.Config.MongoDB
|
class MongoDB:
host = "ds019633.mlab.com"
port = 19633
database = "crawlib-test-webapp"
username = "admin"
password = "k3nLDC^xUBcA"
|
class MongoDB:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0 | 6 | 6 | 5 | 0 | 6 | 6 | 5 | 0 | 0 | 0 | 0 |
148,034 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/example/scrapy_movie/config.py
|
crawlib.example.scrapy_movie.config.Config.Url
|
class Url:
domain = "http://127.0.0.1:{}/movie".format(PORT)
|
class Url:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 0 | 0 | 0 |
148,035 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.Status.S0_ToDo
|
class S0_ToDo(StatusDetail):
id = 0
description = "To do"
description_en = description
description_cn = "还未对以该Primary Key为基准生成的Url尝试过抓取。"
|
class S0_ToDo(StatusDetail):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,036 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.Status.S10_HttpError
|
class S10_HttpError(StatusDetail):
id = 10
description = "Failed to make a http request."
description_en = description
description_cn = "执行spider.get_html(url)失败,无法获得响应。"
|
class S10_HttpError(StatusDetail):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,037 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.Status.S20_WrongPage
|
class S20_WrongPage(StatusDetail):
id = 20
description = ("Successfully get http request, but get the wrong html"
"could be due to Banned, server temporarily not available.")
description_en = description
description_cn = ("成功获得了Html, 但Html不是我们想要的,"
"可能是由于被Ban, 服务器临时出错等情况,"
"使得服务器返回了错误的页面。")
|
class S20_WrongPage(StatusDetail):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0 | 8 | 4 | 7 | 0 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,038 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.Status.S25_DecodeError
|
class S25_DecodeError(StatusDetail):
id = 25
description = ("Failed to decode binary response.")
description_en = description
description_cn = ("无法从http响应的字节码中解码出字符串。")
|
class S25_DecodeError(StatusDetail):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,039 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.Status.S30_ParseError
|
class S30_ParseError(StatusDetail):
id = 30
description = "Html parser method failed."
description_en = description
description_cn = "在从Html提取数据时出现异常,导致程序失败。"
|
class S30_ParseError(StatusDetail):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,040 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.Status.S40_InCompleteData
|
class S40_InCompleteData(StatusDetail):
id = 40
description = "Html parser method success, but data is wrong."
description_en = description
description_cn = ("提取数据的函数被成功运行,虽然没有出现异常,"
"但是某些数据点出现了错误, 结果可能不完整。")
|
class S40_InCompleteData(StatusDetail):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0 | 6 | 4 | 5 | 0 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,041 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.Status.S50_Finished
|
class S50_Finished(StatusDetail):
"""
Break point, status code greater than this should be consider as 'Finished'.
"""
id = 50
description = "Finished."
description_en = description
description_cn = "成功的抓取了所有数据"
|
class S50_Finished(StatusDetail):
'''
Break point, status code greater than this should be consider as 'Finished'.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0 | 5 | 4 | 4 | 3 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,042 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.Status.S5_UrlError
|
class S5_UrlError(StatusDetail):
id = 5
description = "Failed to build url endpoint"
description_en = description
description_cn = "生成Url的过程出现了错误。"
|
class S5_UrlError(StatusDetail):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,043 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.Status.S60_ServerSideError
|
class S60_ServerSideError(StatusDetail):
id = 60
description = "Serverside error, so we temporarily mark it as 'finished'."
description_en = description
description_cn = ("服务器端出现问题,导致该Url是不可能被抓取的,"
"或是目前的数据不是我们最终想要的,但是可以凑活暂时用,"
"我们暂时将其标记为完成,但以后可能再次进行尝试。")
|
class S60_ServerSideError(StatusDetail):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0 | 7 | 4 | 6 | 0 | 5 | 4 | 4 | 0 | 2 | 0 | 0 |
148,044 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s1_movie/entity_listpage.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s1_movie.entity_listpage.ListPage
|
class ListPage(MovieWebsiteEntity):
CONF_UPDATE_INTERVAL = 1
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(MoviePage, Relationship.Option.many,
"n_movie", recursive=True)
])
_id = fields.IntField(primary_key=True)
n_movie = fields.IntField()
meta = dict(
collection="site_movie_listpage",
db_alias=config.DB_DATABASE.get_value(),
)
@property
def page_num(self):
return self._id
def build_url(self):
return url_builder.url_nth_listpage(self.page_num)
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
div_listpage = soup.find("div", id="listpage")
a_tag_list = div_listpage.find_all("a")
entity_data = dict()
children = list()
for a in a_tag_list:
href = a["href"]
movie_id = int(href.split("/")[-1])
movie = MoviePage(_id=movie_id)
children.append(movie)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class ListPage(MovieWebsiteEntity):
@property
def page_num(self):
pass
def build_url(self):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 6 | 0 | 9 | 1 | 7 | 0 | 1 | 0 | 1 | 5 | 2 | 0 | 3 | 0 | 3 | 36 | 44 | 9 | 35 | 21 | 29 | 0 | 23 | 19 | 19 | 2 | 7 | 1 | 4 |
148,045 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/mongodb/test_mongo_entity.py
|
test_mongo_entity.TestMongoEntity.test_all.DummyEntityForTest
|
class DummyEntityForTest(MongodbEntitySingleStatus):
_id = me.fields.IntField(primary_key=True)
value = me.fields.StringField()
CONF_UPDATE_INTERVAL = 1
CONF_STATUS_KEY = "status"
CONF_EDIT_AT_KEY = "edit_at"
CONF_ONLY_FIELDS = (
"_id",
)
meta = dict(
collection="dummy_entity_for_test",
db_alias=config.DB_DATABASE.get_value(),
)
|
class DummyEntityForTest(MongodbEntitySingleStatus):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 15 | 2 | 13 | 8 | 12 | 0 | 8 | 8 | 7 | 0 | 6 | 0 | 0 |
148,046 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/dummy_site_crawler/mongo_backend/test_mongoengine_orm.py
|
test_mongoengine_orm.Test
|
class Test(object):
def test(self):
user = User()
assert user._id is None
user = User(id=1)
assert user.name is None
assert user.status == 0
assert user.to_dict() == {"_id": 1, "name": None, "status": 0}
user = User(id=1, name="Alice")
assert user.status == 0
assert user.to_dict() == {"_id": 1, "name": "Alice", "status": 0}
|
class Test(object):
def test(self):
pass
| 2 | 0 | 12 | 2 | 10 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 13 | 2 | 11 | 3 | 9 | 0 | 11 | 3 | 9 | 1 | 1 | 0 | 1 |
148,047 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/cache.py
|
crawlib.cache.CompressedDisk
|
class CompressedDisk(diskcache.Disk): # pragma: no cover
"""
Serialization Layer. Value has to be bytes or string type, and will be
compressed using zlib before stored to disk.
- Key: str, url.
- Value: str or bytes, html or binary content.
"""
def __init__(self,
directory,
compress_level=6,
value_type_is_binary=False,
**kwargs):
self.compress_level = compress_level
self.value_type_is_binary = value_type_is_binary
if value_type_is_binary is True:
self._decompress = self._decompress_return_bytes
self._compress = self._compress_bytes
elif value_type_is_binary is False:
self._decompress = self._decompress_return_str
self._compress = self._compress_str
else:
msg = "`value_type_is_binary` arg has to be a boolean value!"
raise ValueError(msg)
super(CompressedDisk, self).__init__(directory, **kwargs)
def _decompress_return_str(self, data):
return zlib.decompress(data).decode("utf-8")
def _decompress_return_bytes(self, data):
return zlib.decompress(data)
def _compress_str(self, data):
return zlib.compress(data.encode("utf-8"), self.compress_level)
def _compress_bytes(self, data):
return zlib.compress(data, self.compress_level)
def get(self, key, raw):
data = super(CompressedDisk, self).get(key, raw)
return self._decompress(data)
def store(self, value, read, **kwargs):
if not read:
value = self._compress(value)
return super(CompressedDisk, self).store(value, read, **kwargs)
def fetch(self, mode, filename, value, read):
data = super(CompressedDisk, self). \
fetch(mode, filename, value, read)
if not read:
data = self._decompress(data)
return data
|
class CompressedDisk(diskcache.Disk):
'''
Serialization Layer. Value has to be bytes or string type, and will be
compressed using zlib before stored to disk.
- Key: str, url.
- Value: str or bytes, html or binary content.
'''
def __init__(self,
directory,
compress_level=6,
value_type_is_binary=False,
**kwargs):
pass
def _decompress_return_str(self, data):
pass
def _decompress_return_bytes(self, data):
pass
def _compress_str(self, data):
pass
def _compress_bytes(self, data):
pass
def get(self, key, raw):
pass
def store(self, value, read, **kwargs):
pass
def fetch(self, mode, filename, value, read):
pass
| 9 | 1 | 5 | 0 | 5 | 0 | 2 | 0.18 | 1 | 2 | 0 | 0 | 8 | 4 | 8 | 8 | 54 | 9 | 39 | 20 | 26 | 7 | 32 | 16 | 23 | 3 | 1 | 1 | 12 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.