nwo
stringlengths
10
28
sha
stringlengths
40
40
path
stringlengths
11
97
identifier
stringlengths
1
64
parameters
stringlengths
2
2.24k
return_statement
stringlengths
0
2.17k
docstring
stringlengths
0
5.45k
docstring_summary
stringlengths
0
3.83k
func_begin
int64
1
13.4k
func_end
int64
2
13.4k
function
stringlengths
28
56.4k
url
stringlengths
106
209
project
int64
1
48
executed_lines
list
executed_lines_pc
float64
0
153
missing_lines
list
missing_lines_pc
float64
0
100
covered
bool
2 classes
filecoverage
float64
2.53
100
function_lines
int64
2
1.46k
mccabe
int64
1
253
coverage
float64
0
100
docstring_lines
int64
0
112
function_nodoc
stringlengths
9
56.4k
id
int64
0
29.8k
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/compat.py
is_flask_legacy
()
return int(v[0]) == 0 and int(v[1]) < 11
22
24
def is_flask_legacy(): v = flask_version.split(".") return int(v[0]) == 0 and int(v[1]) < 11
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/compat.py#L22-L24
3
[ 0, 1, 2 ]
100
[]
0
true
54.545455
3
2
100
0
def is_flask_legacy(): v = flask_version.split(".") return int(v[0]) == 0 and int(v[1]) < 11
1,639
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/mediatypes.py
parse_accept_header
(accept)
return [media_types for media_types in ret if media_types]
Parses the value of a clients accept header, and returns a list of sets of media types it included, ordered by precedence. For example, 'application/json, application/xml, */*' would return: [ set([<MediaType "application/xml">, <MediaType "application/json">]), set([<MediaType "*/*">]) ]
Parses the value of a clients accept header, and returns a list of sets of media types it included, ordered by precedence.
96
112
def parse_accept_header(accept): """ Parses the value of a clients accept header, and returns a list of sets of media types it included, ordered by precedence. For example, 'application/json, application/xml, */*' would return: [ set([<MediaType "application/xml">, <MediaType "application/json">]), set([<MediaType "*/*">]) ] """ ret = [set(), set(), set(), set()] for token in accept.split(","): media_type = MediaType(token.strip()) ret[3 - media_type.precedence].add(media_type) return [media_types for media_types in ret if media_types]
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/mediatypes.py#L96-L112
3
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ]
100
[]
0
true
100
17
3
100
9
def parse_accept_header(accept): ret = [set(), set(), set(), set()] for token in accept.split(","): media_type = MediaType(token.strip()) ret[3 - media_type.precedence].add(media_type) return [media_types for media_types in ret if media_types]
1,640
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/mediatypes.py
MediaType.__init__
(self, media_type)
2
3
def __init__(self, media_type): self.main_type, self.sub_type, self.params = self._parse(media_type)
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/mediatypes.py#L2-L3
3
[ 0, 1 ]
100
[]
0
true
100
2
1
100
0
def __init__(self, media_type): self.main_type, self.sub_type, self.params = self._parse(media_type)
1,641
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/mediatypes.py
MediaType.full_type
(self)
return self.main_type + "/" + self.sub_type
6
7
def full_type(self): return self.main_type + "/" + self.sub_type
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/mediatypes.py#L6-L7
3
[ 0, 1 ]
100
[]
0
true
100
2
1
100
0
def full_type(self): return self.main_type + "/" + self.sub_type
1,642
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/mediatypes.py
MediaType.precedence
(self)
return 3
Precedence is determined by how specific a media type is: 3. 'type/subtype; param=val' 2. 'type/subtype' 1. 'type/*' 0. '*/*'
Precedence is determined by how specific a media type is:
10
25
def precedence(self): """ Precedence is determined by how specific a media type is: 3. 'type/subtype; param=val' 2. 'type/subtype' 1. 'type/*' 0. '*/*' """ if self.main_type == "*": return 0 elif self.sub_type == "*": return 1 elif not self.params or list(self.params.keys()) == ["q"]: return 2 return 3
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/mediatypes.py#L10-L25
3
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ]
100
[]
0
true
100
16
5
100
6
def precedence(self): if self.main_type == "*": return 0 elif self.sub_type == "*": return 1 elif not self.params or list(self.params.keys()) == ["q"]: return 2 return 3
1,643
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/mediatypes.py
MediaType.satisfies
(self, other)
return True
Returns `True` if this media type is a superset of `other`. Some examples of cases where this holds true: 'application/json; version=1.0' >= 'application/json; version=1.0' 'application/json' >= 'application/json; indent=4' 'text/*' >= 'text/plain' '*/*' >= 'text/plain'
Returns `True` if this media type is a superset of `other`. Some examples of cases where this holds true:
27
55
def satisfies(self, other): """ Returns `True` if this media type is a superset of `other`. Some examples of cases where this holds true: 'application/json; version=1.0' >= 'application/json; version=1.0' 'application/json' >= 'application/json; indent=4' 'text/*' >= 'text/plain' '*/*' >= 'text/plain' """ for key in self.params.keys(): if key != "q" and other.params.get(key, None) != self.params.get(key, None): return False if ( self.sub_type != "*" and other.sub_type != "*" and other.sub_type != self.sub_type ): return False if ( self.main_type != "*" and other.main_type != "*" and other.main_type != self.main_type ): return False return True
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/mediatypes.py#L27-L55
3
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28 ]
100
[]
0
true
100
29
10
100
7
def satisfies(self, other): for key in self.params.keys(): if key != "q" and other.params.get(key, None) != self.params.get(key, None): return False if ( self.sub_type != "*" and other.sub_type != "*" and other.sub_type != self.sub_type ): return False if ( self.main_type != "*" and other.main_type != "*" and other.main_type != self.main_type ): return False return True
1,644
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/mediatypes.py
MediaType._parse
(self, media_type)
return (main_type, sub_type, params)
Parse a media type string, like "application/json; indent=4" into a three-tuple, like: ('application', 'json', {'indent': 4})
Parse a media type string, like "application/json; indent=4" into a three-tuple, like: ('application', 'json', {'indent': 4})
57
71
def _parse(self, media_type): """ Parse a media type string, like "application/json; indent=4" into a three-tuple, like: ('application', 'json', {'indent': 4}) """ full_type, sep, param_string = media_type.partition(";") params = {} for token in param_string.strip().split(","): key, sep, value = [s.strip() for s in token.partition("=")] if value.startswith('"') and value.endswith('"'): value = value[1:-1] if key: params[key] = value main_type, sep, sub_type = [s.strip() for s in full_type.partition("/")] return (main_type, sub_type, params)
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/mediatypes.py#L57-L71
3
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 ]
100
[]
0
true
100
15
7
100
2
def _parse(self, media_type): full_type, sep, param_string = media_type.partition(";") params = {} for token in param_string.strip().split(","): key, sep, value = [s.strip() for s in token.partition("=")] if value.startswith('"') and value.endswith('"'): value = value[1:-1] if key: params[key] = value main_type, sep, sub_type = [s.strip() for s in full_type.partition("/")] return (main_type, sub_type, params)
1,645
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/mediatypes.py
MediaType.__repr__
(self)
return "<%s '%s'>" % (self.__class__.__name__, str(self))
73
74
def __repr__(self): return "<%s '%s'>" % (self.__class__.__name__, str(self))
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/mediatypes.py#L73-L74
3
[ 0, 1 ]
100
[]
0
true
100
2
1
100
0
def __repr__(self): return "<%s '%s'>" % (self.__class__.__name__, str(self))
1,646
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/mediatypes.py
MediaType.__str__
(self)
return self.full_type
Return a canonical string representing the media type. Note that this ensures the params are sorted.
Return a canonical string representing the media type. Note that this ensures the params are sorted.
76
86
def __str__(self): """ Return a canonical string representing the media type. Note that this ensures the params are sorted. """ if self.params: params_str = ", ".join( ['%s="%s"' % (key, val) for key, val in sorted(self.params.items())] ) return self.full_type + "; " + params_str return self.full_type
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/mediatypes.py#L76-L86
3
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
100
[]
0
true
100
11
3
100
2
def __str__(self): if self.params: params_str = ", ".join( ['%s="%s"' % (key, val) for key, val in sorted(self.params.items())] ) return self.full_type + "; " + params_str return self.full_type
1,647
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/mediatypes.py
MediaType.__hash__
(self)
return hash(str(self))
88
89
def __hash__(self): return hash(str(self))
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/mediatypes.py#L88-L89
3
[ 0, 1 ]
100
[]
0
true
100
2
1
100
0
def __hash__(self): return hash(str(self))
1,648
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/mediatypes.py
MediaType.__eq__
(self, other)
return self.full_type == other.full_type and self.params == other.params
91
93
def __eq__(self, other): # Compare two MediaType instances, ignoring parameter ordering. return self.full_type == other.full_type and self.params == other.params
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/mediatypes.py#L91-L93
3
[ 0, 1, 2 ]
100
[]
0
true
100
3
2
100
0
def __eq__(self, other): # Compare two MediaType instances, ignoring parameter ordering. return self.full_type == other.full_type and self.params == other.params
1,649
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/status.py
is_informational
(code)
return code >= 100 and code <= 199
11
12
def is_informational(code): return code >= 100 and code <= 199
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/status.py#L11-L12
3
[ 0, 1 ]
100
[]
0
true
100
2
2
100
0
def is_informational(code): return code >= 100 and code <= 199
1,650
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/status.py
is_success
(code)
return code >= 200 and code <= 299
15
16
def is_success(code): return code >= 200 and code <= 299
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/status.py#L15-L16
3
[ 0, 1 ]
100
[]
0
true
100
2
2
100
0
def is_success(code): return code >= 200 and code <= 299
1,651
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/status.py
is_redirect
(code)
return code >= 300 and code <= 399
19
20
def is_redirect(code): return code >= 300 and code <= 399
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/status.py#L19-L20
3
[ 0, 1 ]
100
[]
0
true
100
2
2
100
0
def is_redirect(code): return code >= 300 and code <= 399
1,652
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/status.py
is_client_error
(code)
return code >= 400 and code <= 499
23
24
def is_client_error(code): return code >= 400 and code <= 499
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/status.py#L23-L24
3
[ 0, 1 ]
100
[]
0
true
100
2
2
100
0
def is_client_error(code): return code >= 400 and code <= 499
1,653
flask-api/flask-api
fdba680df667662e683bf69e0516e3ffef3f51bc
flask_api/status.py
is_server_error
(code)
return code >= 500 and code <= 599
27
28
def is_server_error(code): return code >= 500 and code <= 599
https://github.com/flask-api/flask-api/blob/fdba680df667662e683bf69e0516e3ffef3f51bc/project3/flask_api/status.py#L27-L28
3
[ 0, 1 ]
100
[]
0
true
100
2
2
100
0
def is_server_error(code): return code >= 500 and code <= 599
1,654
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
every
(interval: int = 1)
return default_scheduler.every(interval)
Calls :meth:`every <Scheduler.every>` on the :data:`default scheduler instance <default_scheduler>`.
Calls :meth:`every <Scheduler.every>` on the :data:`default scheduler instance <default_scheduler>`.
807
811
def every(interval: int = 1) -> Job: """Calls :meth:`every <Scheduler.every>` on the :data:`default scheduler instance <default_scheduler>`. """ return default_scheduler.every(interval)
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L807-L811
4
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.724518
5
1
100
2
def every(interval: int = 1) -> Job: return default_scheduler.every(interval)
1,706
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
run_pending
()
Calls :meth:`run_pending <Scheduler.run_pending>` on the :data:`default scheduler instance <default_scheduler>`.
Calls :meth:`run_pending <Scheduler.run_pending>` on the :data:`default scheduler instance <default_scheduler>`.
814
818
def run_pending() -> None: """Calls :meth:`run_pending <Scheduler.run_pending>` on the :data:`default scheduler instance <default_scheduler>`. """ default_scheduler.run_pending()
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L814-L818
4
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.724518
5
1
100
2
def run_pending() -> None: default_scheduler.run_pending()
1,707
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
run_all
(delay_seconds: int = 0)
Calls :meth:`run_all <Scheduler.run_all>` on the :data:`default scheduler instance <default_scheduler>`.
Calls :meth:`run_all <Scheduler.run_all>` on the :data:`default scheduler instance <default_scheduler>`.
821
825
def run_all(delay_seconds: int = 0) -> None: """Calls :meth:`run_all <Scheduler.run_all>` on the :data:`default scheduler instance <default_scheduler>`. """ default_scheduler.run_all(delay_seconds=delay_seconds)
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L821-L825
4
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.724518
5
1
100
2
def run_all(delay_seconds: int = 0) -> None: default_scheduler.run_all(delay_seconds=delay_seconds)
1,708
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
get_jobs
(tag: Optional[Hashable] = None)
return default_scheduler.get_jobs(tag)
Calls :meth:`get_jobs <Scheduler.get_jobs>` on the :data:`default scheduler instance <default_scheduler>`.
Calls :meth:`get_jobs <Scheduler.get_jobs>` on the :data:`default scheduler instance <default_scheduler>`.
828
832
def get_jobs(tag: Optional[Hashable] = None) -> List[Job]: """Calls :meth:`get_jobs <Scheduler.get_jobs>` on the :data:`default scheduler instance <default_scheduler>`. """ return default_scheduler.get_jobs(tag)
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L828-L832
4
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.724518
5
1
100
2
def get_jobs(tag: Optional[Hashable] = None) -> List[Job]: return default_scheduler.get_jobs(tag)
1,709
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
clear
(tag: Optional[Hashable] = None)
Calls :meth:`clear <Scheduler.clear>` on the :data:`default scheduler instance <default_scheduler>`.
Calls :meth:`clear <Scheduler.clear>` on the :data:`default scheduler instance <default_scheduler>`.
835
839
def clear(tag: Optional[Hashable] = None) -> None: """Calls :meth:`clear <Scheduler.clear>` on the :data:`default scheduler instance <default_scheduler>`. """ default_scheduler.clear(tag)
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L835-L839
4
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.724518
5
1
100
2
def clear(tag: Optional[Hashable] = None) -> None: default_scheduler.clear(tag)
1,710
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
cancel_job
(job: Job)
Calls :meth:`cancel_job <Scheduler.cancel_job>` on the :data:`default scheduler instance <default_scheduler>`.
Calls :meth:`cancel_job <Scheduler.cancel_job>` on the :data:`default scheduler instance <default_scheduler>`.
842
846
def cancel_job(job: Job) -> None: """Calls :meth:`cancel_job <Scheduler.cancel_job>` on the :data:`default scheduler instance <default_scheduler>`. """ default_scheduler.cancel_job(job)
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L842-L846
4
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.724518
5
1
100
2
def cancel_job(job: Job) -> None: default_scheduler.cancel_job(job)
1,711
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
next_run
(tag: Optional[Hashable] = None)
return default_scheduler.get_next_run(tag)
Calls :meth:`next_run <Scheduler.next_run>` on the :data:`default scheduler instance <default_scheduler>`.
Calls :meth:`next_run <Scheduler.next_run>` on the :data:`default scheduler instance <default_scheduler>`.
849
853
def next_run(tag: Optional[Hashable] = None) -> Optional[datetime.datetime]: """Calls :meth:`next_run <Scheduler.next_run>` on the :data:`default scheduler instance <default_scheduler>`. """ return default_scheduler.get_next_run(tag)
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L849-L853
4
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.724518
5
1
100
2
def next_run(tag: Optional[Hashable] = None) -> Optional[datetime.datetime]: return default_scheduler.get_next_run(tag)
1,712
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
idle_seconds
()
return default_scheduler.idle_seconds
Calls :meth:`idle_seconds <Scheduler.idle_seconds>` on the :data:`default scheduler instance <default_scheduler>`.
Calls :meth:`idle_seconds <Scheduler.idle_seconds>` on the :data:`default scheduler instance <default_scheduler>`.
856
860
def idle_seconds() -> Optional[float]: """Calls :meth:`idle_seconds <Scheduler.idle_seconds>` on the :data:`default scheduler instance <default_scheduler>`. """ return default_scheduler.idle_seconds
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L856-L860
4
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.724518
5
1
100
2
def idle_seconds() -> Optional[float]: return default_scheduler.idle_seconds
1,713
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
repeat
(job, *args, **kwargs)
return _schedule_decorator
Decorator to schedule a new periodic job. Any additional arguments are passed on to the decorated function when the job runs. :param job: a :class:`Jobs <Job>`
Decorator to schedule a new periodic job.
863
877
def repeat(job, *args, **kwargs): """ Decorator to schedule a new periodic job. Any additional arguments are passed on to the decorated function when the job runs. :param job: a :class:`Jobs <Job>` """ def _schedule_decorator(decorated_function): job.do(decorated_function, *args, **kwargs) return decorated_function return _schedule_decorator
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L863-L877
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 ]
100
[]
0
true
99.724518
15
2
100
6
def repeat(job, *args, **kwargs): def _schedule_decorator(decorated_function): job.do(decorated_function, *args, **kwargs) return decorated_function return _schedule_decorator
1,714
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Scheduler.__init__
(self)
85
86
def __init__(self) -> None: self.jobs: List[Job] = []
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L85-L86
4
[ 0, 1 ]
100
[]
0
true
99.724518
2
1
100
0
def __init__(self) -> None: self.jobs: List[Job] = []
1,715
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Scheduler.run_pending
(self)
Run all jobs that are scheduled to run. Please note that it is *intended behavior that run_pending() does not run missed jobs*. For example, if you've registered a job that should run every minute and you only call run_pending() in one hour increments then your job won't be run 60 times in between but only once.
Run all jobs that are scheduled to run.
88
100
def run_pending(self) -> None: """ Run all jobs that are scheduled to run. Please note that it is *intended behavior that run_pending() does not run missed jobs*. For example, if you've registered a job that should run every minute and you only call run_pending() in one hour increments then your job won't be run 60 times in between but only once. """ runnable_jobs = (job for job in self.jobs if job.should_run) for job in sorted(runnable_jobs): self._run_job(job)
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L88-L100
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]
100
[]
0
true
99.724518
13
2
100
7
def run_pending(self) -> None: runnable_jobs = (job for job in self.jobs if job.should_run) for job in sorted(runnable_jobs): self._run_job(job)
1,716
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Scheduler.run_all
(self, delay_seconds: int = 0)
Run all jobs regardless if they are scheduled to run or not. A delay of `delay` seconds is added between each job. This helps distribute system load generated by the jobs more evenly over time. :param delay_seconds: A delay added between every executed job
Run all jobs regardless if they are scheduled to run or not.
102
119
def run_all(self, delay_seconds: int = 0) -> None: """ Run all jobs regardless if they are scheduled to run or not. A delay of `delay` seconds is added between each job. This helps distribute system load generated by the jobs more evenly over time. :param delay_seconds: A delay added between every executed job """ logger.debug( "Running *all* %i jobs with %is delay in between", len(self.jobs), delay_seconds, ) for job in self.jobs[:]: self._run_job(job) time.sleep(delay_seconds)
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L102-L119
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ]
100
[]
0
true
99.724518
18
2
100
7
def run_all(self, delay_seconds: int = 0) -> None: logger.debug( "Running *all* %i jobs with %is delay in between", len(self.jobs), delay_seconds, ) for job in self.jobs[:]: self._run_job(job) time.sleep(delay_seconds)
1,717
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Scheduler.get_jobs
(self, tag: Optional[Hashable] = None)
Gets scheduled jobs marked with the given tag, or all jobs if tag is omitted. :param tag: An identifier used to identify a subset of jobs to retrieve
Gets scheduled jobs marked with the given tag, or all jobs if tag is omitted.
121
132
def get_jobs(self, tag: Optional[Hashable] = None) -> List["Job"]: """ Gets scheduled jobs marked with the given tag, or all jobs if tag is omitted. :param tag: An identifier used to identify a subset of jobs to retrieve """ if tag is None: return self.jobs[:] else: return [job for job in self.jobs if tag in job.tags]
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L121-L132
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ]
100
[]
0
true
99.724518
12
3
100
5
def get_jobs(self, tag: Optional[Hashable] = None) -> List["Job"]: if tag is None: return self.jobs[:] else: return [job for job in self.jobs if tag in job.tags]
1,718
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Scheduler.clear
(self, tag: Optional[Hashable] = None)
Deletes scheduled jobs marked with the given tag, or all jobs if tag is omitted. :param tag: An identifier used to identify a subset of jobs to delete
Deletes scheduled jobs marked with the given tag, or all jobs if tag is omitted.
134
147
def clear(self, tag: Optional[Hashable] = None) -> None: """ Deletes scheduled jobs marked with the given tag, or all jobs if tag is omitted. :param tag: An identifier used to identify a subset of jobs to delete """ if tag is None: logger.debug("Deleting *all* jobs") del self.jobs[:] else: logger.debug('Deleting all jobs tagged "%s"', tag) self.jobs[:] = (job for job in self.jobs if tag not in job.tags)
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L134-L147
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 ]
100
[]
0
true
99.724518
14
2
100
5
def clear(self, tag: Optional[Hashable] = None) -> None: if tag is None: logger.debug("Deleting *all* jobs") del self.jobs[:] else: logger.debug('Deleting all jobs tagged "%s"', tag) self.jobs[:] = (job for job in self.jobs if tag not in job.tags)
1,719
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Scheduler.cancel_job
(self, job: "Job")
Delete a scheduled job. :param job: The job to be unscheduled
Delete a scheduled job.
149
159
def cancel_job(self, job: "Job") -> None: """ Delete a scheduled job. :param job: The job to be unscheduled """ try: logger.debug('Cancelling job "%s"', str(job)) self.jobs.remove(job) except ValueError: logger.debug('Cancelling not-scheduled job "%s"', str(job))
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L149-L159
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
100
[]
0
true
99.724518
11
2
100
3
def cancel_job(self, job: "Job") -> None: try: logger.debug('Cancelling job "%s"', str(job)) self.jobs.remove(job) except ValueError: logger.debug('Cancelling not-scheduled job "%s"', str(job))
1,720
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Scheduler.every
(self, interval: int = 1)
return job
Schedule a new periodic job. :param interval: A quantity of a certain time unit :return: An unconfigured :class:`Job <Job>`
Schedule a new periodic job.
161
169
def every(self, interval: int = 1) -> "Job": """ Schedule a new periodic job. :param interval: A quantity of a certain time unit :return: An unconfigured :class:`Job <Job>` """ job = Job(interval, self) return job
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L161-L169
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8 ]
100
[]
0
true
99.724518
9
1
100
4
def every(self, interval: int = 1) -> "Job": job = Job(interval, self) return job
1,721
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Scheduler._run_job
(self, job: "Job")
171
174
def _run_job(self, job: "Job") -> None: ret = job.run() if isinstance(ret, CancelJob) or ret is CancelJob: self.cancel_job(job)
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L171-L174
4
[ 0, 1, 2, 3 ]
100
[]
0
true
99.724518
4
3
100
0
def _run_job(self, job: "Job") -> None: ret = job.run() if isinstance(ret, CancelJob) or ret is CancelJob: self.cancel_job(job)
1,722
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Scheduler.get_next_run
( self, tag: Optional[Hashable] = None )
return min(jobs_filtered).next_run
Datetime when the next job should run. :param tag: Filter the next run for the given tag parameter :return: A :class:`~datetime.datetime` object or None if no jobs scheduled
Datetime when the next job should run.
176
192
def get_next_run( self, tag: Optional[Hashable] = None ) -> Optional[datetime.datetime]: """ Datetime when the next job should run. :param tag: Filter the next run for the given tag parameter :return: A :class:`~datetime.datetime` object or None if no jobs scheduled """ if not self.jobs: return None jobs_filtered = self.get_jobs(tag) if not jobs_filtered: return None return min(jobs_filtered).next_run
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L176-L192
4
[ 0, 10, 11, 12, 13, 14, 15, 16 ]
47.058824
[]
0
false
99.724518
17
3
100
6
def get_next_run( self, tag: Optional[Hashable] = None ) -> Optional[datetime.datetime]: if not self.jobs: return None jobs_filtered = self.get_jobs(tag) if not jobs_filtered: return None return min(jobs_filtered).next_run
1,723
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Scheduler.idle_seconds
(self)
return (self.next_run - datetime.datetime.now()).total_seconds()
:return: Number of seconds until :meth:`next_run <Scheduler.next_run>` or None if no jobs are scheduled
:return: Number of seconds until :meth:`next_run <Scheduler.next_run>` or None if no jobs are scheduled
197
205
def idle_seconds(self) -> Optional[float]: """ :return: Number of seconds until :meth:`next_run <Scheduler.next_run>` or None if no jobs are scheduled """ if not self.next_run: return None return (self.next_run - datetime.datetime.now()).total_seconds()
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L197-L205
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8 ]
100
[]
0
true
99.724518
9
2
100
3
def idle_seconds(self) -> Optional[float]: if not self.next_run: return None return (self.next_run - datetime.datetime.now()).total_seconds()
1,724
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.__init__
(self, interval: int, scheduler: Scheduler = None)
226
256
def __init__(self, interval: int, scheduler: Scheduler = None): self.interval: int = interval # pause interval * unit between runs self.latest: Optional[int] = None # upper limit to the interval self.job_func: Optional[functools.partial] = None # the job job_func to run # time units, e.g. 'minutes', 'hours', ... self.unit: Optional[str] = None # optional time at which this job runs self.at_time: Optional[datetime.time] = None # optional time zone of the self.at_time field. Only relevant when at_time is not None self.at_time_zone = None # datetime of the last run self.last_run: Optional[datetime.datetime] = None # datetime of the next run self.next_run: Optional[datetime.datetime] = None # timedelta between runs, only valid for self.period: Optional[datetime.timedelta] = None # Specific day of the week to start on self.start_day: Optional[str] = None # optional time of final run self.cancel_after: Optional[datetime.datetime] = None self.tags: Set[Hashable] = set() # unique set of tags for the job self.scheduler: Optional[Scheduler] = scheduler
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L226-L256
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 ]
100
[]
0
true
99.724518
31
1
100
0
def __init__(self, interval: int, scheduler: Scheduler = None): self.interval: int = interval # pause interval * unit between runs self.latest: Optional[int] = None # upper limit to the interval self.job_func: Optional[functools.partial] = None # the job job_func to run # time units, e.g. 'minutes', 'hours', ... self.unit: Optional[str] = None # optional time at which this job runs self.at_time: Optional[datetime.time] = None # optional time zone of the self.at_time field. Only relevant when at_time is not None self.at_time_zone = None # datetime of the last run self.last_run: Optional[datetime.datetime] = None # datetime of the next run self.next_run: Optional[datetime.datetime] = None # timedelta between runs, only valid for self.period: Optional[datetime.timedelta] = None # Specific day of the week to start on self.start_day: Optional[str] = None # optional time of final run self.cancel_after: Optional[datetime.datetime] = None self.tags: Set[Hashable] = set() # unique set of tags for the job self.scheduler: Optional[Scheduler] = scheduler
1,725
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.__lt__
(self, other)
return self.next_run < other.next_run
PeriodicJobs are sortable based on the scheduled time they run next.
PeriodicJobs are sortable based on the scheduled time they run next.
258
263
def __lt__(self, other) -> bool: """ PeriodicJobs are sortable based on the scheduled time they run next. """ return self.next_run < other.next_run
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L258-L263
4
[ 0, 1, 2, 3, 4, 5 ]
100
[]
0
true
99.724518
6
1
100
2
def __lt__(self, other) -> bool: return self.next_run < other.next_run
1,726
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.__str__
(self)
return ("Job(interval={}, unit={}, do={}, args={}, kwargs={})").format( self.interval, self.unit, job_func_name, "()" if self.job_func is None else self.job_func.args, "{}" if self.job_func is None else self.job_func.keywords, )
265
277
def __str__(self) -> str: if hasattr(self.job_func, "__name__"): job_func_name = self.job_func.__name__ # type: ignore else: job_func_name = repr(self.job_func) return ("Job(interval={}, unit={}, do={}, args={}, kwargs={})").format( self.interval, self.unit, job_func_name, "()" if self.job_func is None else self.job_func.args, "{}" if self.job_func is None else self.job_func.keywords, )
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L265-L277
4
[ 0, 1, 2, 4, 5, 6 ]
46.153846
[]
0
false
99.724518
13
2
100
0
def __str__(self) -> str: if hasattr(self.job_func, "__name__"): job_func_name = self.job_func.__name__ # type: ignore else: job_func_name = repr(self.job_func) return ("Job(interval={}, unit={}, do={}, args={}, kwargs={})").format( self.interval, self.unit, job_func_name, "()" if self.job_func is None else self.job_func.args, "{}" if self.job_func is None else self.job_func.keywords, )
1,727
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.__repr__
(self)
279
320
def __repr__(self): def format_time(t): return t.strftime("%Y-%m-%d %H:%M:%S") if t else "[never]" def is_repr(j): return not isinstance(j, Job) timestats = "(last run: %s, next run: %s)" % ( format_time(self.last_run), format_time(self.next_run), ) if hasattr(self.job_func, "__name__"): job_func_name = self.job_func.__name__ else: job_func_name = repr(self.job_func) args = [repr(x) if is_repr(x) else str(x) for x in self.job_func.args] kwargs = ["%s=%s" % (k, repr(v)) for k, v in self.job_func.keywords.items()] call_repr = job_func_name + "(" + ", ".join(args + kwargs) + ")" if self.at_time is not None: return "Every %s %s at %s do %s %s" % ( self.interval, self.unit[:-1] if self.interval == 1 else self.unit, self.at_time, call_repr, timestats, ) else: fmt = ( "Every %(interval)s " + ("to %(latest)s " if self.latest is not None else "") + "%(unit)s do %(call_repr)s %(timestats)s" ) return fmt % dict( interval=self.interval, latest=self.latest, unit=(self.unit[:-1] if self.interval == 1 else self.unit), call_repr=call_repr, timestats=timestats, )
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L279-L320
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 29, 34, 35 ]
50
[]
0
false
99.724518
42
7
100
0
def __repr__(self): def format_time(t): return t.strftime("%Y-%m-%d %H:%M:%S") if t else "[never]" def is_repr(j): return not isinstance(j, Job) timestats = "(last run: %s, next run: %s)" % ( format_time(self.last_run), format_time(self.next_run), ) if hasattr(self.job_func, "__name__"): job_func_name = self.job_func.__name__ else: job_func_name = repr(self.job_func) args = [repr(x) if is_repr(x) else str(x) for x in self.job_func.args] kwargs = ["%s=%s" % (k, repr(v)) for k, v in self.job_func.keywords.items()] call_repr = job_func_name + "(" + ", ".join(args + kwargs) + ")" if self.at_time is not None: return "Every %s %s at %s do %s %s" % ( self.interval, self.unit[:-1] if self.interval == 1 else self.unit, self.at_time, call_repr, timestats, ) else: fmt = ( "Every %(interval)s " + ("to %(latest)s " if self.latest is not None else "") + "%(unit)s do %(call_repr)s %(timestats)s" ) return fmt % dict( interval=self.interval, latest=self.latest, unit=(self.unit[:-1] if self.interval == 1 else self.unit), call_repr=call_repr, timestats=timestats, )
1,728
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.second
(self)
return self.seconds
323
326
def second(self): if self.interval != 1: raise IntervalError("Use seconds instead of second") return self.seconds
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L323-L326
4
[ 0, 1, 2, 3 ]
100
[]
0
true
99.724518
4
2
100
0
def second(self): if self.interval != 1: raise IntervalError("Use seconds instead of second") return self.seconds
1,729
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.seconds
(self)
return self
329
331
def seconds(self): self.unit = "seconds" return self
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L329-L331
4
[ 0, 1, 2 ]
100
[]
0
true
99.724518
3
1
100
0
def seconds(self): self.unit = "seconds" return self
1,730
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.minute
(self)
return self.minutes
334
337
def minute(self): if self.interval != 1: raise IntervalError("Use minutes instead of minute") return self.minutes
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L334-L337
4
[ 0, 1, 2, 3 ]
100
[]
0
true
99.724518
4
2
100
0
def minute(self): if self.interval != 1: raise IntervalError("Use minutes instead of minute") return self.minutes
1,731
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.minutes
(self)
return self
340
342
def minutes(self): self.unit = "minutes" return self
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L340-L342
4
[ 0, 1, 2 ]
100
[]
0
true
99.724518
3
1
100
0
def minutes(self): self.unit = "minutes" return self
1,732
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.hour
(self)
return self.hours
345
348
def hour(self): if self.interval != 1: raise IntervalError("Use hours instead of hour") return self.hours
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L345-L348
4
[ 0, 1, 2, 3 ]
100
[]
0
true
99.724518
4
2
100
0
def hour(self): if self.interval != 1: raise IntervalError("Use hours instead of hour") return self.hours
1,733
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.hours
(self)
return self
351
353
def hours(self): self.unit = "hours" return self
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L351-L353
4
[ 0, 1, 2 ]
100
[]
0
true
99.724518
3
1
100
0
def hours(self): self.unit = "hours" return self
1,734
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.day
(self)
return self.days
356
359
def day(self): if self.interval != 1: raise IntervalError("Use days instead of day") return self.days
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L356-L359
4
[ 0, 1, 2, 3 ]
100
[]
0
true
99.724518
4
2
100
0
def day(self): if self.interval != 1: raise IntervalError("Use days instead of day") return self.days
1,735
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.days
(self)
return self
362
364
def days(self): self.unit = "days" return self
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L362-L364
4
[ 0, 1, 2 ]
100
[]
0
true
99.724518
3
1
100
0
def days(self): self.unit = "days" return self
1,736
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.week
(self)
return self.weeks
367
370
def week(self): if self.interval != 1: raise IntervalError("Use weeks instead of week") return self.weeks
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L367-L370
4
[ 0, 1, 2, 3 ]
100
[]
0
true
99.724518
4
2
100
0
def week(self): if self.interval != 1: raise IntervalError("Use weeks instead of week") return self.weeks
1,737
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.weeks
(self)
return self
373
375
def weeks(self): self.unit = "weeks" return self
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L373-L375
4
[ 0, 1, 2 ]
100
[]
0
true
99.724518
3
1
100
0
def weeks(self): self.unit = "weeks" return self
1,738
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.monday
(self)
return self.weeks
378
386
def monday(self): if self.interval != 1: raise IntervalError( "Scheduling .monday() jobs is only allowed for weekly jobs. " "Using .monday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "monday" return self.weeks
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L378-L386
4
[ 0, 1, 2, 7, 8 ]
55.555556
[]
0
false
99.724518
9
2
100
0
def monday(self): if self.interval != 1: raise IntervalError( "Scheduling .monday() jobs is only allowed for weekly jobs. " "Using .monday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "monday" return self.weeks
1,739
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.tuesday
(self)
return self.weeks
389
397
def tuesday(self): if self.interval != 1: raise IntervalError( "Scheduling .tuesday() jobs is only allowed for weekly jobs. " "Using .tuesday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "tuesday" return self.weeks
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L389-L397
4
[ 0, 1, 2, 7, 8 ]
55.555556
[]
0
false
99.724518
9
2
100
0
def tuesday(self): if self.interval != 1: raise IntervalError( "Scheduling .tuesday() jobs is only allowed for weekly jobs. " "Using .tuesday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "tuesday" return self.weeks
1,740
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.wednesday
(self)
return self.weeks
400
408
def wednesday(self): if self.interval != 1: raise IntervalError( "Scheduling .wednesday() jobs is only allowed for weekly jobs. " "Using .wednesday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "wednesday" return self.weeks
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L400-L408
4
[ 0, 1, 2, 7, 8 ]
55.555556
[]
0
false
99.724518
9
2
100
0
def wednesday(self): if self.interval != 1: raise IntervalError( "Scheduling .wednesday() jobs is only allowed for weekly jobs. " "Using .wednesday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "wednesday" return self.weeks
1,741
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.thursday
(self)
return self.weeks
411
419
def thursday(self): if self.interval != 1: raise IntervalError( "Scheduling .thursday() jobs is only allowed for weekly jobs. " "Using .thursday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "thursday" return self.weeks
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L411-L419
4
[ 0, 1, 2, 7, 8 ]
55.555556
[]
0
false
99.724518
9
2
100
0
def thursday(self): if self.interval != 1: raise IntervalError( "Scheduling .thursday() jobs is only allowed for weekly jobs. " "Using .thursday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "thursday" return self.weeks
1,742
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.friday
(self)
return self.weeks
422
430
def friday(self): if self.interval != 1: raise IntervalError( "Scheduling .friday() jobs is only allowed for weekly jobs. " "Using .friday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "friday" return self.weeks
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L422-L430
4
[ 0, 1, 2, 7, 8 ]
55.555556
[]
0
false
99.724518
9
2
100
0
def friday(self): if self.interval != 1: raise IntervalError( "Scheduling .friday() jobs is only allowed for weekly jobs. " "Using .friday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "friday" return self.weeks
1,743
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.saturday
(self)
return self.weeks
433
441
def saturday(self): if self.interval != 1: raise IntervalError( "Scheduling .saturday() jobs is only allowed for weekly jobs. " "Using .saturday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "saturday" return self.weeks
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L433-L441
4
[ 0, 1, 2, 7, 8 ]
55.555556
[]
0
false
99.724518
9
2
100
0
def saturday(self): if self.interval != 1: raise IntervalError( "Scheduling .saturday() jobs is only allowed for weekly jobs. " "Using .saturday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "saturday" return self.weeks
1,744
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.sunday
(self)
return self.weeks
444
452
def sunday(self): if self.interval != 1: raise IntervalError( "Scheduling .sunday() jobs is only allowed for weekly jobs. " "Using .sunday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "sunday" return self.weeks
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L444-L452
4
[ 0, 1, 2, 7, 8 ]
55.555556
[]
0
false
99.724518
9
2
100
0
def sunday(self): if self.interval != 1: raise IntervalError( "Scheduling .sunday() jobs is only allowed for weekly jobs. " "Using .sunday() on a job scheduled to run every 2 or more weeks " "is not supported." ) self.start_day = "sunday" return self.weeks
1,745
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.tag
(self, *tags: Hashable)
return self
Tags the job with one or more unique identifiers. Tags must be hashable. Duplicate tags are discarded. :param tags: A unique list of ``Hashable`` tags. :return: The invoked job instance
Tags the job with one or more unique identifiers.
454
466
def tag(self, *tags: Hashable): """ Tags the job with one or more unique identifiers. Tags must be hashable. Duplicate tags are discarded. :param tags: A unique list of ``Hashable`` tags. :return: The invoked job instance """ if not all(isinstance(tag, Hashable) for tag in tags): raise TypeError("Tags must be hashable") self.tags.update(tags) return self
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L454-L466
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]
100
[]
0
true
99.724518
13
2
100
6
def tag(self, *tags: Hashable): if not all(isinstance(tag, Hashable) for tag in tags): raise TypeError("Tags must be hashable") self.tags.update(tags) return self
1,746
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.at
(self, time_str: str, tz: str = None)
return self
Specify a particular time that the job should be run at. :param time_str: A string in one of the following formats: - For daily jobs -> `HH:MM:SS` or `HH:MM` - For hourly jobs -> `MM:SS` or `:MM` - For minute jobs -> `:SS` The format must make sense given how often the job is repeating; for example, a job that repeats every minute should not be given a string in the form `HH:MM:SS`. The difference between `:MM` and `:SS` is inferred from the selected time-unit (e.g. `every().hour.at(':30')` vs. `every().minute.at(':30')`). :param tz: The timezone that this timestamp refers to. Can be a string that can be parsed by pytz.timezone(), or a pytz.BaseTzInfo object :return: The invoked job instance
Specify a particular time that the job should be run at.
468
557
def at(self, time_str: str, tz: str = None): """ Specify a particular time that the job should be run at. :param time_str: A string in one of the following formats: - For daily jobs -> `HH:MM:SS` or `HH:MM` - For hourly jobs -> `MM:SS` or `:MM` - For minute jobs -> `:SS` The format must make sense given how often the job is repeating; for example, a job that repeats every minute should not be given a string in the form `HH:MM:SS`. The difference between `:MM` and `:SS` is inferred from the selected time-unit (e.g. `every().hour.at(':30')` vs. `every().minute.at(':30')`). :param tz: The timezone that this timestamp refers to. Can be a string that can be parsed by pytz.timezone(), or a pytz.BaseTzInfo object :return: The invoked job instance """ if self.unit not in ("days", "hours", "minutes") and not self.start_day: raise ScheduleValueError( "Invalid unit (valid units are `days`, `hours`, and `minutes`)" ) if tz is not None: import pytz if isinstance(tz, str): self.at_time_zone = pytz.timezone(tz) # type: ignore elif isinstance(tz, pytz.BaseTzInfo): self.at_time_zone = tz else: raise ScheduleValueError( "Timezone must be string or pytz.timezone object" ) if not isinstance(time_str, str): raise TypeError("at() should be passed a string") if self.unit == "days" or self.start_day: if not re.match(r"^[0-2]\d:[0-5]\d(:[0-5]\d)?$", time_str): raise ScheduleValueError( "Invalid time format for a daily job (valid format is HH:MM(:SS)?)" ) if self.unit == "hours": if not re.match(r"^([0-5]\d)?:[0-5]\d$", time_str): raise ScheduleValueError( "Invalid time format for an hourly job (valid format is (MM)?:SS)" ) if self.unit == "minutes": if not re.match(r"^:[0-5]\d$", time_str): raise ScheduleValueError( "Invalid time format for a minutely job (valid format is :SS)" ) time_values = time_str.split(":") hour: Union[str, int] minute: Union[str, int] second: Union[str, int] if len(time_values) == 3: hour, minute, second = time_values elif len(time_values) == 2 and self.unit == "minutes": hour = 0 minute = 0 _, second = time_values elif len(time_values) == 2 and self.unit == "hours" and len(time_values[0]): hour = 0 minute, second = time_values else: hour, minute = time_values second = 0 if self.unit == "days" or self.start_day: hour = int(hour) if not (0 <= hour <= 23): raise ScheduleValueError( "Invalid number of hours ({} is not between 0 and 23)" ) elif self.unit == "hours": hour = 0 elif self.unit == "minutes": hour = 0 minute = 0 hour = int(hour) minute = int(minute) second = int(second) self.at_time = datetime.time(hour, minute, second) return self
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L468-L557
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 ]
100
[]
0
true
99.724518
90
25
100
19
def at(self, time_str: str, tz: str = None): if self.unit not in ("days", "hours", "minutes") and not self.start_day: raise ScheduleValueError( "Invalid unit (valid units are `days`, `hours`, and `minutes`)" ) if tz is not None: import pytz if isinstance(tz, str): self.at_time_zone = pytz.timezone(tz) # type: ignore elif isinstance(tz, pytz.BaseTzInfo): self.at_time_zone = tz else: raise ScheduleValueError( "Timezone must be string or pytz.timezone object" ) if not isinstance(time_str, str): raise TypeError("at() should be passed a string") if self.unit == "days" or self.start_day: if not re.match(r"^[0-2]\d:[0-5]\d(:[0-5]\d)?$", time_str): raise ScheduleValueError( "Invalid time format for a daily job (valid format is HH:MM(:SS)?)" ) if self.unit == "hours": if not re.match(r"^([0-5]\d)?:[0-5]\d$", time_str): raise ScheduleValueError( "Invalid time format for an hourly job (valid format is (MM)?:SS)" ) if self.unit == "minutes": if not re.match(r"^:[0-5]\d$", time_str): raise ScheduleValueError( "Invalid time format for a minutely job (valid format is :SS)" ) time_values = time_str.split(":") hour: Union[str, int] minute: Union[str, int] second: Union[str, int] if len(time_values) == 3: hour, minute, second = time_values elif len(time_values) == 2 and self.unit == "minutes": hour = 0 minute = 0 _, second = time_values elif len(time_values) == 2 and self.unit == "hours" and len(time_values[0]): hour = 0 minute, second = time_values else: hour, minute = time_values second = 0 if self.unit == "days" or self.start_day: hour = int(hour) if not (0 <= hour <= 23): raise ScheduleValueError( "Invalid number of hours ({} is not between 0 and 23)" ) elif self.unit == "hours": hour = 0 elif self.unit == "minutes": hour = 0 minute = 0 hour = int(hour) minute = int(minute) second = int(second) self.at_time = datetime.time(hour, minute, second) return self
1,747
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.to
(self, latest: int)
return self
Schedule the job to run at an irregular (randomized) interval. The job's interval will randomly vary from the value given to `every` to `latest`. The range defined is inclusive on both ends. For example, `every(A).to(B).seconds` executes the job function every N seconds such that A <= N <= B. :param latest: Maximum interval between randomized job runs :return: The invoked job instance
Schedule the job to run at an irregular (randomized) interval.
559
572
def to(self, latest: int): """ Schedule the job to run at an irregular (randomized) interval. The job's interval will randomly vary from the value given to `every` to `latest`. The range defined is inclusive on both ends. For example, `every(A).to(B).seconds` executes the job function every N seconds such that A <= N <= B. :param latest: Maximum interval between randomized job runs :return: The invoked job instance """ self.latest = latest return self
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L559-L572
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 ]
100
[]
0
true
99.724518
14
1
100
9
def to(self, latest: int): self.latest = latest return self
1,748
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.until
( self, until_time: Union[datetime.datetime, datetime.timedelta, datetime.time, str], )
return self
Schedule job to run until the specified moment. The job is canceled whenever the next run is calculated and it turns out the next run is after the until_time. The job is also canceled right before it runs, if the current time is after until_time. This latter case can happen when the the job was scheduled to run before until_time, but runs after until_time. If until_time is a moment in the past, ScheduleValueError is thrown. :param until_time: A moment in the future representing the latest time a job can be run. If only a time is supplied, the date is set to today. The following formats are accepted: - datetime.datetime - datetime.timedelta - datetime.time - String in one of the following formats: "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d", "%H:%M:%S", "%H:%M" as defined by strptime() behaviour. If an invalid string format is passed, ScheduleValueError is thrown. :return: The invoked job instance
Schedule job to run until the specified moment.
574
640
def until( self, until_time: Union[datetime.datetime, datetime.timedelta, datetime.time, str], ): """ Schedule job to run until the specified moment. The job is canceled whenever the next run is calculated and it turns out the next run is after the until_time. The job is also canceled right before it runs, if the current time is after until_time. This latter case can happen when the the job was scheduled to run before until_time, but runs after until_time. If until_time is a moment in the past, ScheduleValueError is thrown. :param until_time: A moment in the future representing the latest time a job can be run. If only a time is supplied, the date is set to today. The following formats are accepted: - datetime.datetime - datetime.timedelta - datetime.time - String in one of the following formats: "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d", "%H:%M:%S", "%H:%M" as defined by strptime() behaviour. If an invalid string format is passed, ScheduleValueError is thrown. :return: The invoked job instance """ if isinstance(until_time, datetime.datetime): self.cancel_after = until_time elif isinstance(until_time, datetime.timedelta): self.cancel_after = datetime.datetime.now() + until_time elif isinstance(until_time, datetime.time): self.cancel_after = datetime.datetime.combine( datetime.datetime.now(), until_time ) elif isinstance(until_time, str): cancel_after = self._decode_datetimestr( until_time, [ "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d", "%H:%M:%S", "%H:%M", ], ) if cancel_after is None: raise ScheduleValueError("Invalid string format for until()") if "-" not in until_time: # the until_time is a time-only format. Set the date to today now = datetime.datetime.now() cancel_after = cancel_after.replace( year=now.year, month=now.month, day=now.day ) self.cancel_after = cancel_after else: raise TypeError( "until() takes a string, datetime.datetime, datetime.timedelta, " "datetime.time parameter" ) if self.cancel_after < datetime.datetime.now(): raise ScheduleValueError( "Cannot schedule a job to run until a time in the past" ) return self
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L574-L640
4
[ 0, 28, 29, 30, 31, 32, 33, 34, 37, 38, 48, 49, 50, 51, 52, 53, 56, 58, 62, 63, 66 ]
31.343284
[]
0
false
99.724518
67
8
100
22
def until( self, until_time: Union[datetime.datetime, datetime.timedelta, datetime.time, str], ): if isinstance(until_time, datetime.datetime): self.cancel_after = until_time elif isinstance(until_time, datetime.timedelta): self.cancel_after = datetime.datetime.now() + until_time elif isinstance(until_time, datetime.time): self.cancel_after = datetime.datetime.combine( datetime.datetime.now(), until_time ) elif isinstance(until_time, str): cancel_after = self._decode_datetimestr( until_time, [ "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d", "%H:%M:%S", "%H:%M", ], ) if cancel_after is None: raise ScheduleValueError("Invalid string format for until()") if "-" not in until_time: # the until_time is a time-only format. Set the date to today now = datetime.datetime.now() cancel_after = cancel_after.replace( year=now.year, month=now.month, day=now.day ) self.cancel_after = cancel_after else: raise TypeError( "until() takes a string, datetime.datetime, datetime.timedelta, " "datetime.time parameter" ) if self.cancel_after < datetime.datetime.now(): raise ScheduleValueError( "Cannot schedule a job to run until a time in the past" ) return self
1,749
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.do
(self, job_func: Callable, *args, **kwargs)
return self
Specifies the job_func that should be called every time the job runs. Any additional arguments are passed on to job_func when the job runs. :param job_func: The function to be scheduled :return: The invoked job instance
Specifies the job_func that should be called every time the job runs.
642
662
def do(self, job_func: Callable, *args, **kwargs): """ Specifies the job_func that should be called every time the job runs. Any additional arguments are passed on to job_func when the job runs. :param job_func: The function to be scheduled :return: The invoked job instance """ self.job_func = functools.partial(job_func, *args, **kwargs) functools.update_wrapper(self.job_func, job_func) self._schedule_next_run() if self.scheduler is None: raise ScheduleError( "Unable to a add job to schedule. " "Job is not associated with an scheduler" ) self.scheduler.jobs.append(self) return self
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L642-L662
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18, 19, 20 ]
85.714286
[ 15 ]
4.761905
false
99.724518
21
2
95.238095
8
def do(self, job_func: Callable, *args, **kwargs): self.job_func = functools.partial(job_func, *args, **kwargs) functools.update_wrapper(self.job_func, job_func) self._schedule_next_run() if self.scheduler is None: raise ScheduleError( "Unable to a add job to schedule. " "Job is not associated with an scheduler" ) self.scheduler.jobs.append(self) return self
1,750
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.should_run
(self)
return datetime.datetime.now() >= self.next_run
:return: ``True`` if the job should be run now.
:return: ``True`` if the job should be run now.
665
670
def should_run(self) -> bool: """ :return: ``True`` if the job should be run now. """ assert self.next_run is not None, "must run _schedule_next_run before" return datetime.datetime.now() >= self.next_run
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L665-L670
4
[ 0, 1, 2, 3, 4, 5 ]
100
[]
0
true
99.724518
6
2
100
1
def should_run(self) -> bool: assert self.next_run is not None, "must run _schedule_next_run before" return datetime.datetime.now() >= self.next_run
1,751
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job.run
(self)
return ret
Run the job and immediately reschedule it. If the job's deadline is reached (configured using .until()), the job is not run and CancelJob is returned immediately. If the next scheduled run exceeds the job's deadline, CancelJob is returned after the execution. In this latter case CancelJob takes priority over any other returned value. :return: The return value returned by the `job_func`, or CancelJob if the job's deadline is reached.
Run the job and immediately reschedule it. If the job's deadline is reached (configured using .until()), the job is not run and CancelJob is returned immediately. If the next scheduled run exceeds the job's deadline, CancelJob is returned after the execution. In this latter case CancelJob takes priority over any other returned value.
672
696
def run(self): """ Run the job and immediately reschedule it. If the job's deadline is reached (configured using .until()), the job is not run and CancelJob is returned immediately. If the next scheduled run exceeds the job's deadline, CancelJob is returned after the execution. In this latter case CancelJob takes priority over any other returned value. :return: The return value returned by the `job_func`, or CancelJob if the job's deadline is reached. """ if self._is_overdue(datetime.datetime.now()): logger.debug("Cancelling job %s", self) return CancelJob logger.debug("Running job %s", self) ret = self.job_func() self.last_run = datetime.datetime.now() self._schedule_next_run() if self._is_overdue(self.next_run): logger.debug("Cancelling job %s", self) return CancelJob return ret
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L672-L696
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 ]
100
[]
0
true
99.724518
25
3
100
8
def run(self): if self._is_overdue(datetime.datetime.now()): logger.debug("Cancelling job %s", self) return CancelJob logger.debug("Running job %s", self) ret = self.job_func() self.last_run = datetime.datetime.now() self._schedule_next_run() if self._is_overdue(self.next_run): logger.debug("Cancelling job %s", self) return CancelJob return ret
1,752
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job._schedule_next_run
(self)
Compute the instant when this job should run next.
Compute the instant when this job should run next.
698
781
def _schedule_next_run(self) -> None: """ Compute the instant when this job should run next. """ if self.unit not in ("seconds", "minutes", "hours", "days", "weeks"): raise ScheduleValueError( "Invalid unit (valid units are `seconds`, `minutes`, `hours`, " "`days`, and `weeks`)" ) if self.latest is not None: if not (self.latest >= self.interval): raise ScheduleError("`latest` is greater than `interval`") interval = random.randint(self.interval, self.latest) else: interval = self.interval self.period = datetime.timedelta(**{self.unit: interval}) self.next_run = datetime.datetime.now() + self.period if self.start_day is not None: if self.unit != "weeks": raise ScheduleValueError("`unit` should be 'weeks'") weekdays = ( "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday", ) if self.start_day not in weekdays: raise ScheduleValueError( "Invalid start day (valid start days are {})".format(weekdays) ) weekday = weekdays.index(self.start_day) days_ahead = weekday - self.next_run.weekday() if days_ahead <= 0: # Target day already happened this week days_ahead += 7 self.next_run += datetime.timedelta(days_ahead) - self.period if self.at_time is not None: if self.unit not in ("days", "hours", "minutes") and self.start_day is None: raise ScheduleValueError("Invalid unit without specifying start day") kwargs = {"second": self.at_time.second, "microsecond": 0} if self.unit == "days" or self.start_day is not None: kwargs["hour"] = self.at_time.hour if self.unit in ["days", "hours"] or self.start_day is not None: kwargs["minute"] = self.at_time.minute self.next_run = self.next_run.replace(**kwargs) # type: ignore if self.at_time_zone is not None: # Convert next_run from the expected timezone into the local time # self.next_run is a naive datetime so after conversion remove tzinfo self.next_run = ( self.at_time_zone.localize(self.next_run) .astimezone() .replace(tzinfo=None) ) # Make sure we run at the specified time *today* (or *this hour*) # as well. This accounts for when a job takes so long it finished # in the next period. if not self.last_run or (self.next_run - self.last_run) > self.period: now = datetime.datetime.now() if ( self.unit == "days" and self.at_time > now.time() and self.interval == 1 ): self.next_run = self.next_run - datetime.timedelta(days=1) elif self.unit == "hours" and ( self.at_time.minute > now.minute or ( self.at_time.minute == now.minute and self.at_time.second > now.second ) ): self.next_run = self.next_run - datetime.timedelta(hours=1) elif self.unit == "minutes" and self.at_time.second > now.second: self.next_run = self.next_run - datetime.timedelta(minutes=1) if self.start_day is not None and self.at_time is not None: # Let's see if we will still make that time we specified today if (self.next_run - datetime.datetime.now()).days >= 7: self.next_run -= self.period
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L698-L781
4
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83 ]
100
[]
0
true
99.724518
84
30
100
1
def _schedule_next_run(self) -> None: if self.unit not in ("seconds", "minutes", "hours", "days", "weeks"): raise ScheduleValueError( "Invalid unit (valid units are `seconds`, `minutes`, `hours`, " "`days`, and `weeks`)" ) if self.latest is not None: if not (self.latest >= self.interval): raise ScheduleError("`latest` is greater than `interval`") interval = random.randint(self.interval, self.latest) else: interval = self.interval self.period = datetime.timedelta(**{self.unit: interval}) self.next_run = datetime.datetime.now() + self.period if self.start_day is not None: if self.unit != "weeks": raise ScheduleValueError("`unit` should be 'weeks'") weekdays = ( "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday", ) if self.start_day not in weekdays: raise ScheduleValueError( "Invalid start day (valid start days are {})".format(weekdays) ) weekday = weekdays.index(self.start_day) days_ahead = weekday - self.next_run.weekday() if days_ahead <= 0: # Target day already happened this week days_ahead += 7 self.next_run += datetime.timedelta(days_ahead) - self.period if self.at_time is not None: if self.unit not in ("days", "hours", "minutes") and self.start_day is None: raise ScheduleValueError("Invalid unit without specifying start day") kwargs = {"second": self.at_time.second, "microsecond": 0} if self.unit == "days" or self.start_day is not None: kwargs["hour"] = self.at_time.hour if self.unit in ["days", "hours"] or self.start_day is not None: kwargs["minute"] = self.at_time.minute self.next_run = self.next_run.replace(**kwargs) # type: ignore if self.at_time_zone is not None: # Convert next_run from the expected timezone into the local time # self.next_run is a naive datetime so after conversion remove tzinfo self.next_run = ( self.at_time_zone.localize(self.next_run) .astimezone() .replace(tzinfo=None) ) # Make sure we run at the specified time *today* (or *this hour*) # as well. This accounts for when a job takes so long it finished # in the next period. if not self.last_run or (self.next_run - self.last_run) > self.period: now = datetime.datetime.now() if ( self.unit == "days" and self.at_time > now.time() and self.interval == 1 ): self.next_run = self.next_run - datetime.timedelta(days=1) elif self.unit == "hours" and ( self.at_time.minute > now.minute or ( self.at_time.minute == now.minute and self.at_time.second > now.second ) ): self.next_run = self.next_run - datetime.timedelta(hours=1) elif self.unit == "minutes" and self.at_time.second > now.second: self.next_run = self.next_run - datetime.timedelta(minutes=1) if self.start_day is not None and self.at_time is not None: # Let's see if we will still make that time we specified today if (self.next_run - datetime.datetime.now()).days >= 7: self.next_run -= self.period
1,753
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job._is_overdue
(self, when: datetime.datetime)
return self.cancel_after is not None and when > self.cancel_after
783
784
def _is_overdue(self, when: datetime.datetime): return self.cancel_after is not None and when > self.cancel_after
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L783-L784
4
[ 0, 1 ]
100
[]
0
true
99.724518
2
2
100
0
def _is_overdue(self, when: datetime.datetime): return self.cancel_after is not None and when > self.cancel_after
1,754
dbader/schedule
3eac646a8d2658929587d7454bd2c85696df254e
schedule/__init__.py
Job._decode_datetimestr
( self, datetime_str: str, formats: List[str] )
return None
786
794
def _decode_datetimestr( self, datetime_str: str, formats: List[str] ) -> Optional[datetime.datetime]: for f in formats: try: return datetime.datetime.strptime(datetime_str, f) except ValueError: pass return None
https://github.com/dbader/schedule/blob/3eac646a8d2658929587d7454bd2c85696df254e/project4/schedule/__init__.py#L786-L794
4
[ 0, 3, 4, 5, 6, 7, 8 ]
77.777778
[]
0
false
99.724518
9
3
100
0
def _decode_datetimestr( self, datetime_str: str, formats: List[str] ) -> Optional[datetime.datetime]: for f in formats: try: return datetime.datetime.strptime(datetime_str, f) except ValueError: pass return None
1,755
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
new_serial
(inst)
return inst.serial
1,017
1,021
def new_serial(inst): if inst.serial == maxint: inst.serial = -1 inst.serial += 1 return inst.serial
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L1017-L1021
6
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.370079
5
2
100
0
def new_serial(inst): if inst.serial == maxint: inst.serial = -1 inst.serial += 1 return inst.serial
4,074
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
FastCGISubprocess.__init__
(self, config)
723
725
def __init__(self, config): Subprocess.__init__(self, config) self.fcgi_sock = None
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L723-L725
6
[ 0, 1, 2 ]
100
[]
0
true
99.370079
3
1
100
0
def __init__(self, config): Subprocess.__init__(self, config) self.fcgi_sock = None
4,075
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
FastCGISubprocess.before_spawn
(self)
The FastCGI socket needs to be created by the parent before we fork
The FastCGI socket needs to be created by the parent before we fork
727
736
def before_spawn(self): """ The FastCGI socket needs to be created by the parent before we fork """ if self.group is None: raise NotImplementedError('No group set for FastCGISubprocess') if not hasattr(self.group, 'socket_manager'): raise NotImplementedError('No SocketManager set for ' '%s:%s' % (self.group, dir(self.group))) self.fcgi_sock = self.group.socket_manager.get_socket()
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L727-L736
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
100
[]
0
true
99.370079
10
3
100
1
def before_spawn(self): if self.group is None: raise NotImplementedError('No group set for FastCGISubprocess') if not hasattr(self.group, 'socket_manager'): raise NotImplementedError('No SocketManager set for ' '%s:%s' % (self.group, dir(self.group))) self.fcgi_sock = self.group.socket_manager.get_socket()
4,076
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
FastCGISubprocess.spawn
(self)
return pid
Overrides Subprocess.spawn() so we can hook in before it happens
Overrides Subprocess.spawn() so we can hook in before it happens
738
747
def spawn(self): """ Overrides Subprocess.spawn() so we can hook in before it happens """ self.before_spawn() pid = Subprocess.spawn(self) if pid is None: #Remove object reference to decrement the reference count on error self.fcgi_sock = None return pid
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L738-L747
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
100
[]
0
true
99.370079
10
2
100
1
def spawn(self): self.before_spawn() pid = Subprocess.spawn(self) if pid is None: #Remove object reference to decrement the reference count on error self.fcgi_sock = None return pid
4,077
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
FastCGISubprocess.after_finish
(self)
Releases reference to FastCGI socket when process is reaped
Releases reference to FastCGI socket when process is reaped
749
754
def after_finish(self): """ Releases reference to FastCGI socket when process is reaped """ #Remove object reference to decrement the reference count self.fcgi_sock = None
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L749-L754
6
[ 0, 1, 2, 3, 4, 5 ]
100
[]
0
true
99.370079
6
1
100
1
def after_finish(self): #Remove object reference to decrement the reference count self.fcgi_sock = None
4,078
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
FastCGISubprocess.finish
(self, pid, sts)
return retval
Overrides Subprocess.finish() so we can hook in after it happens
Overrides Subprocess.finish() so we can hook in after it happens
756
762
def finish(self, pid, sts): """ Overrides Subprocess.finish() so we can hook in after it happens """ retval = Subprocess.finish(self, pid, sts) self.after_finish() return retval
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L756-L762
6
[ 0, 1, 2, 3, 4, 5, 6 ]
100
[]
0
true
99.370079
7
1
100
1
def finish(self, pid, sts): retval = Subprocess.finish(self, pid, sts) self.after_finish() return retval
4,079
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
FastCGISubprocess._prepare_child_fds
(self)
Overrides Subprocess._prepare_child_fds() The FastCGI socket needs to be set to file descriptor 0 in the child
Overrides Subprocess._prepare_child_fds() The FastCGI socket needs to be set to file descriptor 0 in the child
764
779
def _prepare_child_fds(self): """ Overrides Subprocess._prepare_child_fds() The FastCGI socket needs to be set to file descriptor 0 in the child """ sock_fd = self.fcgi_sock.fileno() options = self.config.options options.dup2(sock_fd, 0) options.dup2(self.pipes['child_stdout'], 1) if self.config.redirect_stderr: options.dup2(self.pipes['child_stdout'], 2) else: options.dup2(self.pipes['child_stderr'], 2) for i in range(3, options.minfds): options.close_fd(i)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L764-L779
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ]
100
[]
0
true
99.370079
16
3
100
2
def _prepare_child_fds(self): sock_fd = self.fcgi_sock.fileno() options = self.config.options options.dup2(sock_fd, 0) options.dup2(self.pipes['child_stdout'], 1) if self.config.redirect_stderr: options.dup2(self.pipes['child_stdout'], 2) else: options.dup2(self.pipes['child_stderr'], 2) for i in range(3, options.minfds): options.close_fd(i)
4,080
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
ProcessGroup.transition
(self)
845
847
def transition(self): for proc in self.processes.values(): proc.transition()
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L845-L847
6
[ 0, 1, 2 ]
100
[]
0
true
99.370079
3
2
100
0
def transition(self): for proc in self.processes.values(): proc.transition()
4,081
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
FastCGIProcessGroup.__init__
(self, config, **kwargs)
851
864
def __init__(self, config, **kwargs): ProcessGroup.__init__(self, config) sockManagerKlass = kwargs.get('socketManager', SocketManager) self.socket_manager = sockManagerKlass(config.socket_config, logger=config.options.logger) # It's not required to call get_socket() here but we want # to fail early during start up if there is a config error try: self.socket_manager.get_socket() except Exception as e: raise ValueError( 'Could not create FastCGI socket %s: %s' % ( self.socket_manager.config(), e) )
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L851-L864
6
[ 0, 1, 2, 3, 6, 7, 8, 9, 10 ]
64.285714
[]
0
false
99.370079
14
2
100
0
def __init__(self, config, **kwargs): ProcessGroup.__init__(self, config) sockManagerKlass = kwargs.get('socketManager', SocketManager) self.socket_manager = sockManagerKlass(config.socket_config, logger=config.options.logger) # It's not required to call get_socket() here but we want # to fail early during start up if there is a config error try: self.socket_manager.get_socket() except Exception as e: raise ValueError( 'Could not create FastCGI socket %s: %s' % ( self.socket_manager.config(), e) )
4,082
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
EventListenerPool.__init__
(self, config)
867
873
def __init__(self, config): ProcessGroupBase.__init__(self, config) self.event_buffer = [] self.serial = -1 self.last_dispatch = 0 self.dispatch_throttle = 0 # in seconds: .00195 is an interesting one self._subscribe()
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L867-L873
6
[ 0, 1, 2, 3, 4, 5, 6 ]
100
[]
0
true
99.370079
7
1
100
0
def __init__(self, config): ProcessGroupBase.__init__(self, config) self.event_buffer = [] self.serial = -1 self.last_dispatch = 0 self.dispatch_throttle = 0 # in seconds: .00195 is an interesting one self._subscribe()
4,083
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
EventListenerPool.handle_rejected
(self, event)
875
880
def handle_rejected(self, event): process = event.process procs = self.processes.values() if process in procs: # this is one of our processes # rebuffer the event self._acceptEvent(event.event, head=True)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L875-L880
6
[ 0, 1, 2, 3, 4, 5 ]
100
[]
0
true
99.370079
6
2
100
0
def handle_rejected(self, event): process = event.process procs = self.processes.values() if process in procs: # this is one of our processes # rebuffer the event self._acceptEvent(event.event, head=True)
4,084
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
EventListenerPool.transition
(self)
882
903
def transition(self): processes = self.processes.values() dispatch_capable = False for process in processes: process.transition() # this is redundant, we do it in _dispatchEvent too, but we # want to reduce function call overhead if process.state == ProcessStates.RUNNING: if process.listener_state == EventListenerStates.READY: dispatch_capable = True if dispatch_capable: if self.dispatch_throttle: now = time.time() if now < self.last_dispatch: # The system clock appears to have moved backward # Reset self.last_dispatch accordingly self.last_dispatch = now; if now - self.last_dispatch < self.dispatch_throttle: return self.dispatch()
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L882-L903
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 ]
100
[]
0
true
99.370079
22
8
100
0
def transition(self): processes = self.processes.values() dispatch_capable = False for process in processes: process.transition() # this is redundant, we do it in _dispatchEvent too, but we # want to reduce function call overhead if process.state == ProcessStates.RUNNING: if process.listener_state == EventListenerStates.READY: dispatch_capable = True if dispatch_capable: if self.dispatch_throttle: now = time.time() if now < self.last_dispatch: # The system clock appears to have moved backward # Reset self.last_dispatch accordingly self.last_dispatch = now; if now - self.last_dispatch < self.dispatch_throttle: return self.dispatch()
4,085
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
EventListenerPool.before_remove
(self)
905
906
def before_remove(self): self._unsubscribe()
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L905-L906
6
[ 0, 1 ]
100
[]
0
true
99.370079
2
1
100
0
def before_remove(self): self._unsubscribe()
4,086
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
EventListenerPool.dispatch
(self)
908
918
def dispatch(self): while self.event_buffer: # dispatch the oldest event event = self.event_buffer.pop(0) ok = self._dispatchEvent(event) if not ok: # if we can't dispatch an event, rebuffer it and stop trying # to process any further events in the buffer self._acceptEvent(event, head=True) break self.last_dispatch = time.time()
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L908-L918
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
100
[]
0
true
99.370079
11
3
100
0
def dispatch(self): while self.event_buffer: # dispatch the oldest event event = self.event_buffer.pop(0) ok = self._dispatchEvent(event) if not ok: # if we can't dispatch an event, rebuffer it and stop trying # to process any further events in the buffer self._acceptEvent(event, head=True) break self.last_dispatch = time.time()
4,087
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
EventListenerPool._acceptEvent
(self, event, head=False)
920
947
def _acceptEvent(self, event, head=False): # events are required to be instances # this has a side effect to fail with an attribute error on 'old style' # classes processname = as_string(self.config.name) if not hasattr(event, 'serial'): event.serial = new_serial(GlobalSerial) if not hasattr(event, 'pool_serials'): event.pool_serials = {} if self.config.name not in event.pool_serials: event.pool_serials[self.config.name] = new_serial(self) else: self.config.options.logger.debug( 'rebuffering event %s for pool %s (buf size=%d, max=%d)' % ( (event.serial, processname, len(self.event_buffer), self.config.buffer_size))) if len(self.event_buffer) >= self.config.buffer_size: if self.event_buffer: # discard the oldest event discarded_event = self.event_buffer.pop(0) self.config.options.logger.error( 'pool %s event buffer overflowed, discarding event %s' % ( (processname, discarded_event.serial))) if head: self.event_buffer.insert(0, event) else: self.event_buffer.append(event)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L920-L947
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 17, 18, 19, 20, 21, 24, 25, 27 ]
75
[]
0
false
99.370079
28
7
100
0
def _acceptEvent(self, event, head=False): # events are required to be instances # this has a side effect to fail with an attribute error on 'old style' # classes processname = as_string(self.config.name) if not hasattr(event, 'serial'): event.serial = new_serial(GlobalSerial) if not hasattr(event, 'pool_serials'): event.pool_serials = {} if self.config.name not in event.pool_serials: event.pool_serials[self.config.name] = new_serial(self) else: self.config.options.logger.debug( 'rebuffering event %s for pool %s (buf size=%d, max=%d)' % ( (event.serial, processname, len(self.event_buffer), self.config.buffer_size))) if len(self.event_buffer) >= self.config.buffer_size: if self.event_buffer: # discard the oldest event discarded_event = self.event_buffer.pop(0) self.config.options.logger.error( 'pool %s event buffer overflowed, discarding event %s' % ( (processname, discarded_event.serial))) if head: self.event_buffer.insert(0, event) else: self.event_buffer.append(event)
4,088
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
EventListenerPool._dispatchEvent
(self, event)
return False
949
981
def _dispatchEvent(self, event): pool_serial = event.pool_serials[self.config.name] for process in self.processes.values(): if process.state != ProcessStates.RUNNING: continue if process.listener_state == EventListenerStates.READY: processname = as_string(process.config.name) payload = event.payload() try: event_type = event.__class__ serial = event.serial envelope = self._eventEnvelope(event_type, serial, pool_serial, payload) process.write(as_bytes(envelope)) except OSError as why: if why.args[0] != errno.EPIPE: raise self.config.options.logger.debug( 'epipe occurred while sending event %s ' 'to listener %s, listener state unchanged' % ( event.serial, processname)) continue process.listener_state = EventListenerStates.BUSY process.event = event self.config.options.logger.debug( 'event %s sent to listener %s' % ( event.serial, processname)) return True return False
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L949-L981
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, 18, 19, 23, 24, 25, 26, 27, 30, 31, 32 ]
81.818182
[]
0
false
99.370079
33
6
100
0
def _dispatchEvent(self, event): pool_serial = event.pool_serials[self.config.name] for process in self.processes.values(): if process.state != ProcessStates.RUNNING: continue if process.listener_state == EventListenerStates.READY: processname = as_string(process.config.name) payload = event.payload() try: event_type = event.__class__ serial = event.serial envelope = self._eventEnvelope(event_type, serial, pool_serial, payload) process.write(as_bytes(envelope)) except OSError as why: if why.args[0] != errno.EPIPE: raise self.config.options.logger.debug( 'epipe occurred while sending event %s ' 'to listener %s, listener state unchanged' % ( event.serial, processname)) continue process.listener_state = EventListenerStates.BUSY process.event = event self.config.options.logger.debug( 'event %s sent to listener %s' % ( event.serial, processname)) return True return False
4,089
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
EventListenerPool._eventEnvelope
(self, event_type, serial, pool_serial, payload)
return ('ver:%(ver)s server:%(sid)s serial:%(serial)s ' 'pool:%(pool_name)s poolserial:%(pool_serial)s ' 'eventname:%(event_name)s len:%(len)s\n%(payload)s' % D)
983
998
def _eventEnvelope(self, event_type, serial, pool_serial, payload): event_name = events.getEventNameByType(event_type) payload_len = len(payload) D = { 'ver':'3.0', 'sid':self.config.options.identifier, 'serial':serial, 'pool_name':self.config.name, 'pool_serial':pool_serial, 'event_name':event_name, 'len':payload_len, 'payload':payload, } return ('ver:%(ver)s server:%(sid)s serial:%(serial)s ' 'pool:%(pool_name)s poolserial:%(pool_serial)s ' 'eventname:%(event_name)s len:%(len)s\n%(payload)s' % D)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L983-L998
6
[ 0, 1, 2, 3, 13 ]
31.25
[]
0
false
99.370079
16
1
100
0
def _eventEnvelope(self, event_type, serial, pool_serial, payload): event_name = events.getEventNameByType(event_type) payload_len = len(payload) D = { 'ver':'3.0', 'sid':self.config.options.identifier, 'serial':serial, 'pool_name':self.config.name, 'pool_serial':pool_serial, 'event_name':event_name, 'len':payload_len, 'payload':payload, } return ('ver:%(ver)s server:%(sid)s serial:%(serial)s ' 'pool:%(pool_name)s poolserial:%(pool_serial)s ' 'eventname:%(event_name)s len:%(len)s\n%(payload)s' % D)
4,090
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
EventListenerPool._subscribe
(self)
1,000
1,003
def _subscribe(self): for event_type in self.config.pool_events: events.subscribe(event_type, self._acceptEvent) events.subscribe(events.EventRejectedEvent, self.handle_rejected)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L1000-L1003
6
[ 0, 1, 2, 3 ]
100
[]
0
true
99.370079
4
2
100
0
def _subscribe(self): for event_type in self.config.pool_events: events.subscribe(event_type, self._acceptEvent) events.subscribe(events.EventRejectedEvent, self.handle_rejected)
4,091
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
EventListenerPool._unsubscribe
(self)
1,005
1,008
def _unsubscribe(self): for event_type in self.config.pool_events: events.unsubscribe(event_type, self._acceptEvent) events.unsubscribe(events.EventRejectedEvent, self.handle_rejected)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L1005-L1008
6
[ 0, 1, 2, 3 ]
100
[]
0
true
99.370079
4
2
100
0
def _unsubscribe(self): for event_type in self.config.pool_events: events.unsubscribe(event_type, self._acceptEvent) events.unsubscribe(events.EventRejectedEvent, self.handle_rejected)
4,092
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/process.py
GlobalSerial.__init__
(self)
1,012
1,013
def __init__(self): self.serial = -1
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/process.py#L1012-L1013
6
[ 0, 1 ]
100
[]
0
true
99.370079
2
1
100
0
def __init__(self): self.serial = -1
4,093
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/confecho.py
main
(out=sys.stdout)
5
7
def main(out=sys.stdout): config = pkg_resources.resource_string(__name__, 'skel/sample.conf') out.write(as_string(config))
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/confecho.py#L5-L7
6
[ 0, 1, 2 ]
100
[]
0
true
100
3
1
100
0
def main(out=sys.stdout): config = pkg_resources.resource_string(__name__, 'skel/sample.conf') out.write(as_string(config))
4,094
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
getFaultDescription
(code)
return 'UNKNOWN'
46
50
def getFaultDescription(code): for faultname in Faults.__dict__: if getattr(Faults, faultname) == code: return faultname return 'UNKNOWN'
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L46-L50
6
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.741602
5
3
100
0
def getFaultDescription(code): for faultname in Faults.__dict__: if getattr(Faults, faultname) == code: return faultname return 'UNKNOWN'
4,095
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
xmlrpc_marshal
(value)
return body
144
152
def xmlrpc_marshal(value): ismethodresponse = not isinstance(value, xmlrpclib.Fault) if ismethodresponse: if not isinstance(value, tuple): value = (value,) body = xmlrpclib.dumps(value, methodresponse=ismethodresponse) else: body = xmlrpclib.dumps(value) return body
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L144-L152
6
[ 0, 1, 2, 3, 4, 5, 7, 8 ]
88.888889
[]
0
false
99.741602
9
3
100
0
def xmlrpc_marshal(value): ismethodresponse = not isinstance(value, xmlrpclib.Fault) if ismethodresponse: if not isinstance(value, tuple): value = (value,) body = xmlrpclib.dumps(value, methodresponse=ismethodresponse) else: body = xmlrpclib.dumps(value) return body
4,096
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
capped_int
(value)
return i
315
321
def capped_int(value): i = int(value) if i < xmlrpclib.MININT: i = xmlrpclib.MININT elif i > xmlrpclib.MAXINT: i = xmlrpclib.MAXINT return i
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L315-L321
6
[ 0, 1, 2, 3, 4, 5, 6 ]
100
[]
0
true
99.741602
7
3
100
0
def capped_int(value): i = int(value) if i < xmlrpclib.MININT: i = xmlrpclib.MININT elif i > xmlrpclib.MAXINT: i = xmlrpclib.MAXINT return i
4,097
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
make_datetime
(text)
return datetime.datetime( *time.strptime(text, "%Y%m%dT%H:%M:%S")[:6] )
323
326
def make_datetime(text): return datetime.datetime( *time.strptime(text, "%Y%m%dT%H:%M:%S")[:6] )
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L323-L326
6
[ 0, 1 ]
50
[]
0
false
99.741602
4
1
100
0
def make_datetime(text): return datetime.datetime( *time.strptime(text, "%Y%m%dT%H:%M:%S")[:6] )
4,098
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
traverse
(ob, method, params)
448
471
def traverse(ob, method, params): dotted_parts = method.split('.') # security (CVE-2017-11610, don't allow object traversal) if len(dotted_parts) != 2: raise RPCError(Faults.UNKNOWN_METHOD) namespace, method = dotted_parts # security (don't allow methods that start with an underscore to # be called remotely) if method.startswith('_'): raise RPCError(Faults.UNKNOWN_METHOD) rpcinterface = getattr(ob, namespace, None) if rpcinterface is None: raise RPCError(Faults.UNKNOWN_METHOD) func = getattr(rpcinterface, method, None) if not isinstance(func, types.MethodType): raise RPCError(Faults.UNKNOWN_METHOD) try: return func(*params) except TypeError: raise RPCError(Faults.INCORRECT_PARAMETERS)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L448-L471
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ]
100
[]
0
true
99.741602
24
6
100
0
def traverse(ob, method, params): dotted_parts = method.split('.') # security (CVE-2017-11610, don't allow object traversal) if len(dotted_parts) != 2: raise RPCError(Faults.UNKNOWN_METHOD) namespace, method = dotted_parts # security (don't allow methods that start with an underscore to # be called remotely) if method.startswith('_'): raise RPCError(Faults.UNKNOWN_METHOD) rpcinterface = getattr(ob, namespace, None) if rpcinterface is None: raise RPCError(Faults.UNKNOWN_METHOD) func = getattr(rpcinterface, method, None) if not isinstance(func, types.MethodType): raise RPCError(Faults.UNKNOWN_METHOD) try: return func(*params) except TypeError: raise RPCError(Faults.INCORRECT_PARAMETERS)
4,099
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
gettags
(comment)
return tags
Parse documentation strings into JavaDoc-like tokens
Parse documentation strings into JavaDoc-like tokens
559
600
def gettags(comment): """ Parse documentation strings into JavaDoc-like tokens """ tags = [] tag = None datatype = None name = None tag_lineno = lineno = 0 tag_text = [] for line in comment.split('\n'): line = line.strip() if line.startswith("@"): tags.append((tag_lineno, tag, datatype, name, '\n'.join(tag_text))) parts = line.split(None, 3) if len(parts) == 1: datatype = '' name = '' tag_text = [] elif len(parts) == 2: datatype = parts[1] name = '' tag_text = [] elif len(parts) == 3: datatype = parts[1] name = parts[2] tag_text = [] elif len(parts) == 4: datatype = parts[1] name = parts[2] tag_text = [parts[3].lstrip()] tag = parts[0][1:] tag_lineno = lineno else: if line: tag_text.append(line) lineno += 1 tags.append((tag_lineno, tag, datatype, name, '\n'.join(tag_text))) return tags
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L559-L600
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41 ]
100
[]
0
true
99.741602
42
8
100
1
def gettags(comment): tags = [] tag = None datatype = None name = None tag_lineno = lineno = 0 tag_text = [] for line in comment.split('\n'): line = line.strip() if line.startswith("@"): tags.append((tag_lineno, tag, datatype, name, '\n'.join(tag_text))) parts = line.split(None, 3) if len(parts) == 1: datatype = '' name = '' tag_text = [] elif len(parts) == 2: datatype = parts[1] name = '' tag_text = [] elif len(parts) == 3: datatype = parts[1] name = parts[2] tag_text = [] elif len(parts) == 4: datatype = parts[1] name = parts[2] tag_text = [parts[3].lstrip()] tag = parts[0][1:] tag_lineno = lineno else: if line: tag_text.append(line) lineno += 1 tags.append((tag_lineno, tag, datatype, name, '\n'.join(tag_text))) return tags
4,100
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
RPCError.__init__
(self, code, extra=None)
53
57
def __init__(self, code, extra=None): self.code = code self.text = getFaultDescription(code) if extra is not None: self.text = '%s: %s' % (self.text, extra)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L53-L57
6
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.741602
5
2
100
0
def __init__(self, code, extra=None): self.code = code self.text = getFaultDescription(code) if extra is not None: self.text = '%s: %s' % (self.text, extra)
4,101
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
RPCError.__str__
(self)
return 'code=%r, text=%r' % (self.code, self.text)
59
60
def __str__(self): return 'code=%r, text=%r' % (self.code, self.text)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L59-L60
6
[ 0, 1 ]
100
[]
0
true
99.741602
2
1
100
0
def __str__(self): return 'code=%r, text=%r' % (self.code, self.text)
4,102
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
DeferredXMLRPCResponse.__init__
(self, request, callback)
67
71
def __init__(self, request, callback): self.callback = callback self.request = request self.finished = False self.delay = float(callback.delay)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L67-L71
6
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.741602
5
1
100
0
def __init__(self, request, callback): self.callback = callback self.request = request self.finished = False self.delay = float(callback.delay)
4,103
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
DeferredXMLRPCResponse.more
(self)
73
96
def more(self): if self.finished: return '' try: try: value = self.callback() if value is NOT_DONE_YET: return NOT_DONE_YET except RPCError as err: value = xmlrpclib.Fault(err.code, err.text) body = xmlrpc_marshal(value) self.finished = True return self.getresponse(body) except: tb = traceback.format_exc() self.request.channel.server.logger.log( "XML-RPC response callback error", tb ) self.finished = True self.request.error(500)
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L73-L96
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 23 ]
91.666667
[]
0
false
99.741602
24
5
100
0
def more(self): if self.finished: return '' try: try: value = self.callback() if value is NOT_DONE_YET: return NOT_DONE_YET except RPCError as err: value = xmlrpclib.Fault(err.code, err.text) body = xmlrpc_marshal(value) self.finished = True return self.getresponse(body) except: tb = traceback.format_exc() self.request.channel.server.logger.log( "XML-RPC response callback error", tb ) self.finished = True self.request.error(500)
4,104
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
DeferredXMLRPCResponse.getresponse
(self, body)
98
142
def getresponse(self, body): self.request['Content-Type'] = 'text/xml' self.request['Content-Length'] = len(body) self.request.push(body) connection = get_header(self.CONNECTION, self.request.header) close_it = 0 if self.request.version == '1.0': if connection == 'keep-alive': self.request['Connection'] = 'Keep-Alive' else: close_it = 1 elif self.request.version == '1.1': if connection == 'close': close_it = 1 elif self.request.version is None: close_it = 1 outgoing_header = producers.simple_producer ( self.request.build_reply_header()) if close_it: self.request['Connection'] = 'close' # prepend the header self.request.outgoing.insert(0, outgoing_header) outgoing_producer = producers.composite_producer(self.request.outgoing) # apply a few final transformations to the output self.request.channel.push_with_producer ( # globbing gives us large packets producers.globbing_producer ( # hooking lets us log the number of bytes sent producers.hooked_producer ( outgoing_producer, self.request.log ) ) ) self.request.channel.current_request = None if close_it: self.request.channel.close_when_done()
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L98-L142
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 40, 41, 42, 43, 44 ]
77.777778
[]
0
false
99.741602
45
8
100
0
def getresponse(self, body): self.request['Content-Type'] = 'text/xml' self.request['Content-Length'] = len(body) self.request.push(body) connection = get_header(self.CONNECTION, self.request.header) close_it = 0 if self.request.version == '1.0': if connection == 'keep-alive': self.request['Connection'] = 'Keep-Alive' else: close_it = 1 elif self.request.version == '1.1': if connection == 'close': close_it = 1 elif self.request.version is None: close_it = 1 outgoing_header = producers.simple_producer ( self.request.build_reply_header()) if close_it: self.request['Connection'] = 'close' # prepend the header self.request.outgoing.insert(0, outgoing_header) outgoing_producer = producers.composite_producer(self.request.outgoing) # apply a few final transformations to the output self.request.channel.push_with_producer ( # globbing gives us large packets producers.globbing_producer ( # hooking lets us log the number of bytes sent producers.hooked_producer ( outgoing_producer, self.request.log ) ) ) self.request.channel.current_request = None if close_it: self.request.channel.close_when_done()
4,105
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
SystemNamespaceRPCInterface.__init__
(self, namespaces)
155
159
def __init__(self, namespaces): self.namespaces = {} for name, inst in namespaces: self.namespaces[name] = inst self.namespaces['system'] = self
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L155-L159
6
[ 0, 1, 2, 3, 4 ]
100
[]
0
true
99.741602
5
2
100
0
def __init__(self, namespaces): self.namespaces = {} for name, inst in namespaces: self.namespaces[name] = inst self.namespaces['system'] = self
4,106
Supervisor/supervisor
a7cb60d58b5eb610feb76c675208f87501d4bc4b
supervisor/xmlrpc.py
SystemNamespaceRPCInterface._listMethods
(self)
return methods
161
173
def _listMethods(self): methods = {} for ns_name in self.namespaces: namespace = self.namespaces[ns_name] for method_name in namespace.__class__.__dict__: # introspect; any methods that don't start with underscore # are published func = getattr(namespace, method_name) if callable(func): if not method_name.startswith('_'): sig = '%s.%s' % (ns_name, method_name) methods[sig] = str(func.__doc__) return methods
https://github.com/Supervisor/supervisor/blob/a7cb60d58b5eb610feb76c675208f87501d4bc4b/project6/supervisor/xmlrpc.py#L161-L173
6
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]
100
[]
0
true
99.741602
13
5
100
0
def _listMethods(self): methods = {} for ns_name in self.namespaces: namespace = self.namespaces[ns_name] for method_name in namespace.__class__.__dict__: # introspect; any methods that don't start with underscore # are published func = getattr(namespace, method_name) if callable(func): if not method_name.startswith('_'): sig = '%s.%s' % (ns_name, method_name) methods[sig] = str(func.__doc__) return methods
4,107