id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,800 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/html.py
|
wpull.scraper.html.ElementWalker
|
class ElementWalker(object):
LINK_ATTRIBUTES = frozenset([
'action', 'archive', 'background', 'cite', 'classid',
'codebase', 'data', 'href', 'longdesc', 'profile', 'src',
'usemap',
'dynsrc', 'lowsrc',
])
'''HTML element attributes that may contain links.'''
ATTR_INLINE = 1
'''Flag for embedded objects (like images, stylesheets) in documents.'''
ATTR_HTML = 2
'''Flag for links that point to other documents.'''
TAG_ATTRIBUTES = {
'a': {'href': ATTR_HTML},
'applet': {'code': ATTR_INLINE},
'area': {'href': ATTR_HTML},
'bgsound': {'src': ATTR_INLINE},
'body': {'background': ATTR_INLINE},
'embed': {'href': ATTR_HTML, 'src': ATTR_INLINE | ATTR_HTML},
'fig': {'src': ATTR_INLINE},
'form': {'action': ATTR_HTML},
'frame': {'src': ATTR_INLINE | ATTR_HTML},
'iframe': {'src': ATTR_INLINE | ATTR_HTML},
'img': {
'href': ATTR_INLINE, 'lowsrc': ATTR_INLINE, 'src': ATTR_INLINE},
'input': {'src': ATTR_INLINE},
'layer': {'src': ATTR_INLINE | ATTR_HTML},
'object': {'data': ATTR_INLINE},
'overlay': {'src': ATTR_INLINE | ATTR_HTML},
'script': {'src': ATTR_INLINE},
'table': {'background': ATTR_INLINE},
'td': {'background': ATTR_INLINE},
'th': {'background': ATTR_INLINE},
}
'''Mapping of element tag names to attributes containing links.'''
DYNAMIC_ATTRIBUTES = ('onkey', 'oncli', 'onmou')
'''Attributes that contain JavaScript.'''
OPEN_GRAPH_MEDIA_NAMES = (
'og:image', 'og:audio', 'og:video',
'twitter:image:src', 'twitter:image0', 'twitter:image1',
'twitter:image2', 'twitter:image3', 'twitter:player:stream',
)
OPEN_GRAPH_LINK_NAMES = (
'og:url', 'twitter:player'
)
'''Iterate elements looking for links.
Args:
css_scraper (:class:`.scraper.css.CSSScraper`): Optional CSS scraper.
javascript_scraper (:class:`.scraper.javascript.JavaScriptScraper):
Optional JavaScript scraper.
'''
def __init__(self, css_scraper=None, javascript_scraper=None):
self.css_scraper = css_scraper
self.javascript_scraper = javascript_scraper
def iter_links(self, elements):
'''Iterate the document root for links.
Returns:
iterable: A iterator of :class:`LinkedInfo`.
'''
for element in elements:
if not isinstance(element, Element):
continue
for link_infos in self.iter_links_element(element):
yield link_infos
def iter_links_element(self, element):
'''Iterate a HTML element.'''
# reference: lxml.html.HtmlMixin.iterlinks()
attrib = element.attrib
tag = element.tag
if tag == 'link':
iterable = self.iter_links_link_element(element)
elif tag == 'meta':
iterable = self.iter_links_meta_element(element)
elif tag in ('object', 'applet'):
iterable = self.iter_links_object_element(element)
elif tag == 'param':
iterable = self.iter_links_param_element(element)
elif tag == 'style':
iterable = self.iter_links_style_element(element)
elif tag == 'script':
iterable = self.iter_links_script_element(element)
else:
iterable = self.iter_links_plain_element(element)
# RSS/Atom
if tag in ('link', 'url', 'icon'):
iterable = itertools.chain(
iterable, self.iter_links_element_text(element)
)
for link_info in iterable:
yield link_info
if 'style' in attrib and self.css_scraper:
for link in self.css_scraper.scrape_links(attrib['style']):
yield LinkInfo(
element=element, tag=element.tag, attrib='style',
link=link,
inline=True, linked=False,
base_link=None,
value_type='css',
link_type=LinkType.media,
)
@classmethod
def iter_links_element_text(cls, element):
'''Get the element text as a link.'''
if element.text:
link_type = identify_link_type(element.text)
yield LinkInfo(
element=element, tag=element.tag, attrib=None,
link=element.text,
inline=False, linked=True,
base_link=None,
value_type='plain',
link_type=link_type
)
def iter_links_link_element(self, element):
'''Iterate a ``link`` for URLs.
This function handles stylesheets and icons in addition to
standard scraping rules.
'''
rel = element.attrib.get('rel', '')
stylesheet = 'stylesheet' in rel
icon = 'icon' in rel
inline = stylesheet or icon
if stylesheet:
link_type = LinkType.css
elif icon:
link_type = LinkType.media
else:
link_type = None
for attrib_name, link in self.iter_links_by_attrib(element):
yield LinkInfo(
element=element, tag=element.tag, attrib=attrib_name,
link=link,
inline=inline, linked=not inline,
base_link=None,
value_type='plain',
link_type=link_type
)
@classmethod
def iter_links_meta_element(cls, element):
'''Iterate the ``meta`` element for links.
This function handles refresh URLs.
'''
if element.attrib.get('http-equiv', '').lower() == 'refresh':
content_value = element.attrib.get('content')
if content_value:
link = parse_refresh(content_value)
if link:
yield LinkInfo(
element=element, tag=element.tag, attrib='http-equiv',
link=link,
inline=False, linked=True,
base_link=None,
value_type='refresh',
link_type=None # treat it as a redirect
)
else:
for link_info in cls.iter_links_open_graph_meta(element):
yield link_info
@classmethod
def iter_links_open_graph_meta(cls, element):
name = element.attrib.get('property', '').lower()
if name in cls.OPEN_GRAPH_LINK_NAMES or \
name in cls.OPEN_GRAPH_MEDIA_NAMES:
link = element.attrib.get('content')
if link:
if name in cls.OPEN_GRAPH_MEDIA_NAMES:
link_type = LinkType.media
else:
link_type = None
yield LinkInfo(
element=element, tag=element.tag, attrib='property',
link=link,
inline=False, linked=True,
base_link=None,
value_type='plain',
link_type=link_type
)
@classmethod
def iter_links_object_element(cls, element):
'''Iterate ``object`` and ``embed`` elements.
This function also looks at ``codebase`` and ``archive`` attributes.
'''
base_link = element.attrib.get('codebase', None)
if base_link:
# lxml returns codebase as inline
link_type = element.attrib.get(base_link)
yield LinkInfo(
element=element, tag=element.tag, attrib='codebase',
link=base_link,
inline=True, linked=False,
base_link=None,
value_type='plain',
link_type=link_type
)
for attribute in ('code', 'src', 'classid', 'data'):
if attribute in element.attrib:
link_type = identify_link_type(element.attrib.get(attribute))
yield LinkInfo(
element=element, tag=element.tag, attrib=attribute,
link=element.attrib.get(attribute),
inline=True, linked=False,
base_link=base_link,
value_type='plain',
link_type=link_type
)
if 'archive' in element.attrib:
for match in re.finditer(r'[^ ]+', element.attrib.get('archive')):
value = match.group(0)
link_type = identify_link_type(value)
yield LinkInfo(
element=element, tag=element.tag, attrib='archive',
link=value,
inline=True, linked=False,
base_link=base_link,
value_type='list',
link_type=link_type
)
@classmethod
def iter_links_param_element(cls, element):
'''Iterate a ``param`` element.'''
valuetype = element.attrib.get('valuetype', '')
if valuetype.lower() == 'ref' and 'value' in element.attrib:
link_type = identify_link_type(element.attrib.get('value'))
yield LinkInfo(
element=element, tag=element.tag, attrib='value',
link=element.attrib.get('value'),
inline=True, linked=False,
base_link=None,
value_type='plain',
link_type=link_type
)
def iter_links_style_element(self, element):
'''Iterate a ``style`` element.'''
if self.css_scraper and element.text:
link_iter = self.css_scraper.scrape_links(element.text,
context=True)
for link, context in link_iter:
if context == 'import':
link_type = LinkType.css
else:
link_type = LinkType.media
yield LinkInfo(
element=element, tag=element.tag, attrib=None,
link=link,
inline=True, linked=False,
base_link=None,
value_type='css',
link_type=link_type
)
def iter_links_script_element(self, element):
'''Iterate a ``script`` element.'''
if self.javascript_scraper and element.text:
link_iter = self.javascript_scraper.scrape_links(element.text,
context=True)
for link, context in link_iter:
inline = is_likely_inline(link)
if context is True:
link_type = None
else:
link_type = context
yield LinkInfo(
element=element, tag=element.tag, attrib=None,
link=link,
inline=inline, linked=not inline,
base_link=None,
value_type='script',
link_type=link_type
)
for link in self.iter_links_plain_element(element):
yield link
def iter_links_plain_element(self, element):
'''Iterate any element for links using generic rules.'''
for attrib_name, link in self.iter_links_by_attrib(element):
if attrib_name in self.LINK_ATTRIBUTES:
inline = self.is_link_inline(element.tag, attrib_name)
linked = self.is_html_link(element.tag, attrib_name)
else:
inline = is_likely_inline(link)
linked = not inline
link_type = identify_link_type(link)
yield LinkInfo(
element=element, tag=element.tag, attrib=attrib_name,
link=link,
inline=inline, linked=linked,
base_link=None,
value_type='plain',
link_type=link_type
)
def iter_links_by_attrib(self, element):
'''Iterate an element by looking at its attributes for links.'''
for attrib_name in element.attrib.keys():
attrib_value = element.attrib.get(attrib_name)
if attrib_name in self.LINK_ATTRIBUTES:
if self.javascript_scraper and \
attrib_value.lstrip().startswith('javascript:'):
for link in self.iter_links_by_js_attrib(
attrib_name, percent_decode(attrib_value)):
yield link
else:
yield attrib_name, attrib_value
elif self.javascript_scraper and \
attrib_name[:5] in self.DYNAMIC_ATTRIBUTES:
for link in self.iter_links_by_js_attrib(attrib_name,
attrib_value):
yield link
elif attrib_name.startswith('data-'):
if is_likely_link(attrib_value) \
and not is_unlikely_link(attrib_value):
yield attrib_name, attrib_value
elif attrib_name == 'srcset':
items = self.iter_links_by_srcset_attrib(
attrib_name, attrib_value)
for item in items:
yield item
def iter_links_by_js_attrib(self, attrib_name, attrib_value):
'''Iterate links of a JavaScript pseudo-link attribute.'''
links = self.javascript_scraper.scrape_links(attrib_value)
for link in links:
yield attrib_name, link
@classmethod
def iter_links_by_srcset_attrib(cls, attrib_name, attrib_value):
images = attrib_value.split(',')
links = [value.lstrip().split(' ', 1)[0] for value in images]
for link in links:
yield attrib_name, link
@classmethod
def is_link_inline(cls, tag, attribute):
'''Return whether the link is likely to be inline object.'''
if tag in cls.TAG_ATTRIBUTES \
and attribute in cls.TAG_ATTRIBUTES[tag]:
attr_flags = cls.TAG_ATTRIBUTES[tag][attribute]
return attr_flags & cls.ATTR_INLINE
return attribute != 'href'
@classmethod
def is_html_link(cls, tag, attribute):
'''Return whether the link is likely to be external object.'''
if tag in cls.TAG_ATTRIBUTES \
and attribute in cls.TAG_ATTRIBUTES[tag]:
attr_flags = cls.TAG_ATTRIBUTES[tag][attribute]
return attr_flags & cls.ATTR_HTML
return attribute == 'href'
@classmethod
def robots_cannot_follow(cls, element):
'''Return whether we cannot follow links due to robots.txt directives.
'''
return (
element.tag == 'meta'
and element.attrib.get('name', '').lower() == 'robots'
and 'nofollow' in element.attrib.get('value', '').lower()
)
|
class ElementWalker(object):
def __init__(self, css_scraper=None, javascript_scraper=None):
pass
def iter_links(self, elements):
'''Iterate the document root for links.
Returns:
iterable: A iterator of :class:`LinkedInfo`.
'''
pass
def iter_links_element(self, element):
'''Iterate a HTML element.'''
pass
@classmethod
def iter_links_element_text(cls, element):
'''Get the element text as a link.'''
pass
def iter_links_link_element(self, element):
'''Iterate a ``link`` for URLs.
This function handles stylesheets and icons in addition to
standard scraping rules.
'''
pass
@classmethod
def iter_links_meta_element(cls, element):
'''Iterate the ``meta`` element for links.
This function handles refresh URLs.
'''
pass
@classmethod
def iter_links_open_graph_meta(cls, element):
pass
@classmethod
def iter_links_object_element(cls, element):
'''Iterate ``object`` and ``embed`` elements.
This function also looks at ``codebase`` and ``archive`` attributes.
'''
pass
@classmethod
def iter_links_param_element(cls, element):
'''Iterate a ``param`` element.'''
pass
def iter_links_style_element(self, element):
'''Iterate a ``style`` element.'''
pass
def iter_links_script_element(self, element):
'''Iterate a ``script`` element.'''
pass
def iter_links_plain_element(self, element):
'''Iterate any element for links using generic rules.'''
pass
def iter_links_by_attrib(self, element):
'''Iterate an element by looking at its attributes for links.'''
pass
def iter_links_by_js_attrib(self, attrib_name, attrib_value):
'''Iterate links of a JavaScript pseudo-link attribute.'''
pass
@classmethod
def iter_links_by_srcset_attrib(cls, attrib_name, attrib_value):
pass
@classmethod
def is_link_inline(cls, tag, attribute):
'''Return whether the link is likely to be inline object.'''
pass
@classmethod
def is_html_link(cls, tag, attribute):
'''Return whether the link is likely to be external object.'''
pass
@classmethod
def robots_cannot_follow(cls, element):
'''Return whether we cannot follow links due to robots.txt directives.
'''
pass
| 28 | 15 | 18 | 2 | 15 | 2 | 4 | 0.13 | 1 | 3 | 2 | 0 | 9 | 2 | 18 | 18 | 406 | 56 | 310 | 87 | 282 | 41 | 151 | 78 | 132 | 11 | 1 | 4 | 71 |
6,801 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/html.py
|
wpull.scraper.html.HTMLScraper
|
class HTMLScraper(HTMLReader, BaseHTMLScraper):
'''Scraper for HTML documents.
Args:
html_parser (class:`.document.htmlparse.base.BaseParser`): An
HTML parser such as the lxml or html5lib one.
element_walker (class:`ElementWalker`): HTML element walker.
followed_tags: A list of tags that should be scraped
ignored_tags: A list of tags that should not be scraped
robots: If True, discard any links if they cannot be followed
only_relative: If True, discard any links that are not absolute paths
'''
def __init__(self, html_parser, element_walker,
followed_tags=None, ignored_tags=None,
robots=False,
only_relative=False, encoding_override=None):
super().__init__(html_parser)
self._element_walker = element_walker
self._robots = robots
self._only_relative = only_relative
self._encoding_override = encoding_override
if followed_tags is not None:
self._followed_tags = frozenset(
[tag.lower() for tag in followed_tags])
else:
self._followed_tags = None
if ignored_tags is not None:
self._ignored_tags = frozenset(
[tag.lower() for tag in ignored_tags])
else:
self._ignored_tags = None
def scrape(self, request, response, link_type=None):
if not self.is_supported(request=request, response=response):
return
if link_type and link_type != LinkType.html:
return
base_url = request.url_info.url
content_file = response.body
encoding = self._encoding_override \
or detect_response_encoding(response, is_html=True)
link_contexts = set()
try:
with wpull.util.reset_file_offset(content_file):
elements = self.iter_elements(content_file, encoding=encoding)
result_meta_info = self._process_elements(
elements, response, base_url, link_contexts
)
except (UnicodeError, self._html_parser.parser_error) as error:
_logger.warning(
_('Failed to read document at ‘{url}’: {error}'),
url=request.url_info.url, error=error
)
result_meta_info = {}
if result_meta_info.get('robots_no_follow'):
link_contexts.discard(frozenset(
context for context in link_contexts if context.linked
))
scrape_result = ScrapeResult(link_contexts, encoding)
scrape_result['base_url'] = base_url
return scrape_result
def _process_elements(self, elements, response, base_url, link_contexts):
robots_check_needed = self._robots
robots_no_follow = False
inject_refresh = True
doc_base_url = None
for element in elements:
if not isinstance(element, Element):
continue
if robots_check_needed and ElementWalker.robots_cannot_follow(element):
robots_check_needed = False
robots_no_follow = True
if not doc_base_url and element.tag == 'base':
doc_base_url = urljoin_safe(
base_url, clean_link_soup(element.attrib.get('href', ''))
)
link_infos = self._element_walker.iter_links_element(element)
if inject_refresh and 'Refresh' in response.fields:
link = parse_refresh(response.fields['Refresh'])
if link:
link_info = LinkInfo(
element=None, tag='_refresh', attrib=None,
link=link,
inline=False, linked=True,
base_link=None, value_type='refresh',
link_type=None # treat it as a redirect
)
link_infos = itertools.chain(link_infos, [link_info])
inject_refresh = False
else:
inject_refresh = False
for link_info in link_infos:
if self._only_relative:
if link_info.base_link or '://' in link_info.link:
continue
if not self._is_accepted(link_info.tag):
continue
element_base_url = doc_base_url or base_url
if link_info.base_link:
clean_base_url = clean_link_soup(link_info.base_link)
if clean_base_url:
element_base_url = urljoin_safe(
base_url, clean_base_url
) or base_url
cleaned_url = clean_link_soup(link_info.link)
if not cleaned_url:
continue
url = urljoin_safe(
element_base_url,
cleaned_url,
allow_fragments=False
)
if url:
link_contexts.add(LinkContext(
url,
inline=link_info.inline,
linked=link_info.linked,
link_type=link_info.link_type,
extra=link_info,
))
return {'robots_no_follow': robots_no_follow}
def scrape_file(self, file, encoding=None, base_url=None):
'''Scrape a file for links.
See :meth:`scrape` for the return value.
'''
elements = self.iter_elements(file, encoding=encoding)
link_contexts = set()
link_infos = self._element_walker.iter_links(elements)
for link_info in link_infos:
element_base_url = base_url
if link_info.base_link:
clean_base_url = clean_link_soup(link_info.base_link)
if element_base_url and base_url:
element_base_url = urljoin_safe(
base_url, clean_base_url
) or base_url
if element_base_url:
url = urljoin_safe(
element_base_url,
clean_link_soup(link_info.link),
allow_fragments=False
)
else:
url = clean_link_soup(link_info.link)
if url:
link_contexts.add(LinkContext(
url,
inline=link_info.inline,
linked=link_info.linked,
link_type=link_info.link_type,
extra=link_info
))
scrape_result = ScrapeResult(link_contexts, encoding)
scrape_result['base_url'] = base_url
return scrape_result
def _is_accepted(self, element_tag):
'''Return if the link is accepted by the filters.'''
element_tag = element_tag.lower()
if self._ignored_tags is not None \
and element_tag in self._ignored_tags:
return False
if self._followed_tags is not None:
return element_tag in self._followed_tags
else:
return True
|
class HTMLScraper(HTMLReader, BaseHTMLScraper):
'''Scraper for HTML documents.
Args:
html_parser (class:`.document.htmlparse.base.BaseParser`): An
HTML parser such as the lxml or html5lib one.
element_walker (class:`ElementWalker`): HTML element walker.
followed_tags: A list of tags that should be scraped
ignored_tags: A list of tags that should not be scraped
robots: If True, discard any links if they cannot be followed
only_relative: If True, discard any links that are not absolute paths
'''
def __init__(self, html_parser, element_walker,
followed_tags=None, ignored_tags=None,
robots=False,
only_relative=False, encoding_override=None):
pass
def scrape(self, request, response, link_type=None):
pass
def _process_elements(self, elements, response, base_url, link_contexts):
pass
def scrape_file(self, file, encoding=None, base_url=None):
'''Scrape a file for links.
See :meth:`scrape` for the return value.
'''
pass
def _is_accepted(self, element_tag):
'''Return if the link is accepted by the filters.'''
pass
| 6 | 3 | 38 | 7 | 30 | 1 | 6 | 0.1 | 2 | 9 | 4 | 1 | 5 | 6 | 5 | 38 | 205 | 42 | 149 | 43 | 140 | 15 | 98 | 39 | 92 | 15 | 5 | 4 | 32 |
6,802 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/html.py
|
wpull.scraper.html.LinkInfo
|
class LinkInfo(_BaseLinkInfo):
def __hash__(self):
return self.link.__hash__()
|
class LinkInfo(_BaseLinkInfo):
def __hash__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
6,803 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/html_test.py
|
wpull.scraper.html_test.TestHTML5LibHTMLScraper
|
class TestHTML5LibHTMLScraper(Mixin, unittest.TestCase):
def get_html_parser(self):
return HTML5LibHTMLParser()
|
class TestHTML5LibHTMLScraper(Mixin, unittest.TestCase):
def get_html_parser(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 2 | 0 | 0 | 0 | 1 | 0 | 1 | 88 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
6,804 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/html_test.py
|
wpull.scraper.html_test.TestLxmlHTMLScraper
|
class TestLxmlHTMLScraper(Mixin, unittest.TestCase):
def get_html_parser(self):
return LxmlHTMLParser()
|
class TestLxmlHTMLScraper(Mixin, unittest.TestCase):
def get_html_parser(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 2 | 0 | 0 | 0 | 1 | 0 | 1 | 88 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
6,805 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/javascript.py
|
wpull.scraper.javascript.JavaScriptScraper
|
class JavaScriptScraper(JavaScriptReader, BaseTextStreamScraper):
'''Scrapes JavaScript documents.'''
def __init__(self, encoding_override=None):
super().__init__()
self._encoding_override = encoding_override
def iter_processed_text(self, file, encoding=None, base_url=None):
for text, is_link in self.iter_text(file, encoding):
if is_link:
try:
new_text = json.loads('"{0}"'.format(text))
except ValueError:
yield (text, False)
continue
if is_unlikely_link(new_text) or not is_likely_link(new_text):
yield (text, False)
continue
if base_url:
new_link = urljoin_safe(base_url, new_text,
allow_fragments=False)
else:
new_link = new_text
if new_link:
yield (new_link, identify_link_type(new_link) or True)
else:
yield (text, False)
else:
yield (text, False)
def scrape(self, request, response, link_type=None):
if not self.is_supported(request=request, response=response):
return
if link_type and link_type != LinkType.javascript:
return
link_contexts = set()
base_url = request.url_info.url
encoding = self._encoding_override or \
detect_response_encoding(response)
try:
with wpull.util.reset_file_offset(response.body):
for link, context in self.iter_processed_links(
response.body, encoding, base_url, context=True):
inline = is_likely_inline(link)
if context is True:
link_type = None
else:
link_type = context
link_contexts.add(
LinkContext(link, inline=inline, linked=not inline,
link_type=link_type)
)
except UnicodeError as error:
_logger.warning(
_('Failed to read document at ‘{url}’: {error}'),
url=request.url_info.url, error=error
)
return ScrapeResult(link_contexts, encoding)
|
class JavaScriptScraper(JavaScriptReader, BaseTextStreamScraper):
'''Scrapes JavaScript documents.'''
def __init__(self, encoding_override=None):
pass
def iter_processed_text(self, file, encoding=None, base_url=None):
pass
def scrape(self, request, response, link_type=None):
pass
| 4 | 1 | 21 | 3 | 18 | 0 | 5 | 0.02 | 2 | 6 | 2 | 0 | 3 | 1 | 3 | 40 | 66 | 11 | 54 | 14 | 50 | 1 | 41 | 13 | 37 | 7 | 5 | 4 | 14 |
6,806 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/javascript_test.py
|
wpull.scraper.javascript_test.TestJavascript
|
class TestJavascript(unittest.TestCase):
def test_javascript_scraper(self):
scraper = JavaScriptScraper()
request = Request('http://example.com/script.js')
response = Response(200, 'OK')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
html_file_path = os.path.join(ROOT_PATH,
'testing', 'samples', 'script.js')
with open(html_file_path, 'rb') as in_file:
shutil.copyfileobj(in_file, response.body)
scrape_result = scraper.scrape(request, response)
inline_urls = scrape_result.inline_links
linked_urls = scrape_result.linked_links
self.assertEqual({
'http://example.com/script_variable.png',
'http://example.com/dragonquery.js',
},
inline_urls
)
self.assertEqual({
'http://example.com/document_write.html',
'http://example.com/http_document_write.html',
'http://example.com/http_document_write2.html',
'http://example.com/http document write.html',
'http://example.com/script_variable.html',
'http://example.com/http_script_variable.html',
'https://example.com/https_script_variable.html',
'ftp://example.com/ftp_script_variable.html',
'http://example.com/end_dir_script_variable/',
'http://example.com/start_dir_script_variable',
'http://example.com/../relative_dir_script_variable'
if sys.version_info < (3, 5) else
'http://example.com/relative_dir_script_variable',
'http://example.com/script_json.html',
'http://example.com/http_script_json.html?a=b',
},
linked_urls
)
def test_javascript_reject_type(self):
scraper = JavaScriptScraper()
request = Request('http://example.com/script.js')
response = Response(200, 'OK')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
html_file_path = os.path.join(ROOT_PATH,
'testing', 'samples', 'script.js')
with open(html_file_path, 'rb') as in_file:
shutil.copyfileobj(in_file, response.body)
scrape_result = scraper.scrape(request, response,
link_type=LinkType.css)
self.assertFalse(scrape_result)
def test_javascript_heavy_inline_monstrosity(self):
scraper = JavaScriptScraper()
request = Request('http://example.com/test.js')
response = Response(200, 'OK')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
html_file_path = os.path.join(ROOT_PATH,
'testing', 'samples',
'twitchplayspokemonfirered.html')
with open(html_file_path, 'rb') as in_file:
in_file.seek(0x147)
shutil.copyfileobj(in_file, response.body)
scrape_result = scraper.scrape(request, response)
inline_urls = scrape_result.inline_links
linked_urls = scrape_result.linked_links
self.assertIn(
'http://cdn.bulbagarden.net/upload/archive/a/a4/'
'20090718115357%21195Quagsire.png',
inline_urls
)
self.assertIn(
'http://www.google.com/url?q=http%3A%2F%2Fwww.reddit.com%2F'
'user%2FGoldenSandslash15&sa=D&sntz=1&'
'usg=AFQjCNElFBxZYdNm5mWoRSncf5tbdIJQ-A',
linked_urls
)
print('\n'.join(inline_urls))
print('\n'.join(linked_urls))
|
class TestJavascript(unittest.TestCase):
def test_javascript_scraper(self):
pass
def test_javascript_reject_type(self):
pass
def test_javascript_heavy_inline_monstrosity(self):
pass
| 4 | 0 | 29 | 3 | 26 | 0 | 1 | 0 | 1 | 5 | 5 | 0 | 3 | 0 | 3 | 75 | 91 | 11 | 80 | 26 | 76 | 0 | 43 | 23 | 39 | 2 | 2 | 2 | 4 |
6,807 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/css.py
|
wpull.scraper.css.CSSScraper
|
class CSSScraper(CSSReader, BaseTextStreamScraper):
'''Scrapes CSS stylesheet documents.'''
def __init__(self, encoding_override=None):
super().__init__()
self._encoding_override = encoding_override
def iter_processed_text(self, file, encoding=None, base_url=None):
links = super().iter_processed_text(
file, encoding=encoding, base_url=base_url)
for text, is_link in links:
if is_link and len(text) < 500:
yield (text, is_link)
elif not is_link:
yield (text, False)
def scrape(self, request, response, link_type=None):
if not self.is_supported(request=request, response=response):
return
if link_type and link_type != LinkType.css:
return
link_contexts = set()
base_url = request.url_info.url
encoding = self._encoding_override or \
detect_response_encoding(response)
try:
with wpull.util.reset_file_offset(response.body):
for link, context in self.iter_processed_links(
response.body, encoding, base_url, context=True):
if context == 'import':
link_type = LinkType.css
else:
link_type = LinkType.media
link_contexts.add(LinkContext(link, inline=True, link_type=link_type))
except UnicodeError as error:
_logger.warning(__(
_('Failed to read document at ‘{url}’: {error}'),
url=request.url_info.url, error=error
))
return ScrapeResult(link_contexts, encoding)
|
class CSSScraper(CSSReader, BaseTextStreamScraper):
'''Scrapes CSS stylesheet documents.'''
def __init__(self, encoding_override=None):
pass
def iter_processed_text(self, file, encoding=None, base_url=None):
pass
def scrape(self, request, response, link_type=None):
pass
| 4 | 1 | 14 | 2 | 12 | 0 | 4 | 0.03 | 2 | 5 | 2 | 1 | 3 | 1 | 3 | 39 | 45 | 8 | 36 | 12 | 32 | 1 | 28 | 11 | 24 | 6 | 5 | 4 | 11 |
6,808 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/sitemap.py
|
wpull.scraper.sitemap.SitemapScraper
|
class SitemapScraper(SitemapReader, BaseExtractiveScraper):
'''Scrape Sitemaps'''
def __init__(self, html_parser, encoding_override=None):
super().__init__(html_parser)
self._encoding_override = encoding_override
def scrape(self, request, response, link_type=None):
if not self.is_supported(request=request, response=response):
return
if link_type and link_type != LinkType.sitemap:
return
base_url = request.url_info.url
encoding = self._encoding_override \
or detect_response_encoding(response)
link_contexts = set()
try:
with wpull.util.reset_file_offset(response.body):
link_iter = self.iter_processed_links(response.body, encoding,
base_url)
for link in link_iter:
link_contexts.add(LinkContext(link, linked=True))
except (UnicodeError, self._html_parser.parser_error) as error:
_logger.warning(
_('Failed to read document at ‘{url}’: {error}'),
url=request.url_info.url, error=error
)
return ScrapeResult(link_contexts, encoding)
|
class SitemapScraper(SitemapReader, BaseExtractiveScraper):
'''Scrape Sitemaps'''
def __init__(self, html_parser, encoding_override=None):
pass
def scrape(self, request, response, link_type=None):
pass
| 3 | 1 | 14 | 2 | 12 | 0 | 3 | 0.04 | 2 | 5 | 2 | 0 | 2 | 1 | 2 | 36 | 31 | 5 | 25 | 10 | 22 | 1 | 20 | 9 | 17 | 5 | 5 | 3 | 6 |
6,809 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/sitemap_test.py
|
wpull.scraper.sitemap_test.TestLxmlSitemap
|
class TestLxmlSitemap(Mixin, unittest.TestCase):
def get_html_parser(self):
return LxmlHTMLParser()
|
class TestLxmlSitemap(Mixin, unittest.TestCase):
def get_html_parser(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 2 | 0 | 0 | 0 | 1 | 0 | 1 | 80 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
6,810 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/util_test.py
|
wpull.scraper.util_test.TestUtil
|
class TestUtil(unittest.TestCase):
def test_clean_link_soup(self):
self.assertEqual(
'http://example.com',
clean_link_soup('http://example.com ')
)
self.assertEqual(
'http://example.com/',
clean_link_soup('\n\r\thttp://example.com\n\r\r\r\n\t/')
)
self.assertEqual(
'http://example.com/ something',
clean_link_soup('http://example.com\n\t / something \n\r\t')
)
self.assertEqual(
'http://example.com/dog cat/',
clean_link_soup('http://example.com/\n dog \tcat\r/\n')
)
self.assertEqual(
'ßðf ¤Jáßðff ßðfœ³²œ¤ œë ßfœ',
clean_link_soup('ß\tðf ¤Jáßðf\n f ßðfœ³²œ¤ œë ßfœ ')
)
def test_parse_refresh(self):
self.assertEqual(
'http://example.com', parse_refresh('10;url="http://example.com"')
)
self.assertEqual(
'http://example.com', parse_refresh('10;url= http://example.com ')
)
self.assertEqual(
'example.com', parse_refresh("url =' example.com '")
)
self.assertFalse(
parse_refresh('url=')
)
self.assertFalse(
parse_refresh('url = ')
)
def test_is_likely_link(self):
self.assertTrue(is_likely_link('image.png'))
self.assertTrue(is_likely_link('video.mp4'))
self.assertTrue(is_likely_link('/directory'))
self.assertTrue(is_likely_link('directory/'))
self.assertTrue(is_likely_link('/directory/'))
self.assertTrue(is_likely_link('../directory/'))
self.assertTrue(is_likely_link('http://example.com/'))
self.assertTrue(is_likely_link('https://example.com/'))
self.assertTrue(is_likely_link('ftp://example.com'))
self.assertTrue(is_likely_link('directory/index.html'))
self.assertFalse(is_likely_link('directory/another_directory'))
self.assertTrue(is_likely_link('application/windows.exe'))
self.assertTrue(is_likely_link('//example.com/admin'))
self.assertFalse(is_likely_link('12.0'))
self.assertFalse(is_likely_link('7'))
self.assertFalse(is_likely_link('horse'))
self.assertFalse(is_likely_link(''))
self.assertFalse(is_likely_link('setTimeout(myTimer, 1000)'))
self.assertFalse(is_likely_link('comment.delete'))
self.assertFalse(is_likely_link('example.com'))
self.assertFalse(is_likely_link('example.net'))
self.assertFalse(is_likely_link('example.org'))
self.assertFalse(is_likely_link('example.edu'))
def test_is_unlikely_link(self):
self.assertTrue(is_unlikely_link('example.com+'))
self.assertTrue(is_unlikely_link('www.'))
self.assertTrue(is_unlikely_link(':example.com'))
self.assertTrue(is_unlikely_link(',example.com'))
self.assertTrue(is_unlikely_link('http:'))
self.assertTrue(is_unlikely_link('.example.com'))
self.assertTrue(is_unlikely_link('doc[0]'))
self.assertTrue(is_unlikely_link('/'))
self.assertTrue(is_unlikely_link('//'))
self.assertTrue(is_unlikely_link('application/json'))
self.assertTrue(is_unlikely_link('application/javascript'))
self.assertTrue(is_unlikely_link('text/javascript'))
self.assertTrue(is_unlikely_link('text/plain'))
self.assertTrue(is_unlikely_link('/\\/'))
self.assertTrue(is_unlikely_link('a.help'))
self.assertTrue(is_unlikely_link('div.menu'))
self.assertTrue(is_unlikely_link('apikey={YOUR_API_KEY_HERE}'))
self.assertFalse(is_unlikely_link('http://'))
self.assertFalse(is_unlikely_link('example'))
self.assertFalse(is_unlikely_link('example.com'))
self.assertFalse(is_unlikely_link('//example.com/assets/image.css'))
self.assertFalse(is_unlikely_link('./image.css'))
self.assertFalse(is_unlikely_link('../image.css'))
self.assertFalse(is_unlikely_link('index.html'))
self.assertFalse(is_unlikely_link('body.html'))
def test_identifiy_link_type(self):
self.assertEqual(LinkType.javascript, identify_link_type('hello.js'))
self.assertEqual(LinkType.css, identify_link_type('hello.css'))
self.assertEqual(LinkType.html, identify_link_type('hello.html'))
self.assertEqual(LinkType.media, identify_link_type('hello.mp3'))
self.assertEqual(LinkType.media, identify_link_type('hello.png'))
self.assertEqual(LinkType.media, identify_link_type('hello.flv'))
self.assertFalse(identify_link_type('hello.exe'))
|
class TestUtil(unittest.TestCase):
def test_clean_link_soup(self):
pass
def test_parse_refresh(self):
pass
def test_is_likely_link(self):
pass
def test_is_unlikely_link(self):
pass
def test_identifiy_link_type(self):
pass
| 6 | 0 | 19 | 0 | 19 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 5 | 0 | 5 | 77 | 100 | 4 | 96 | 6 | 90 | 0 | 71 | 6 | 65 | 1 | 2 | 0 | 5 |
6,811 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/stats.py
|
wpull.stats.Statistics
|
class Statistics(object):
'''Statistics.
Attributes:
start_time (float): Timestamp when the engine started.
stop_time (float): Timestamp when the engine stopped.
files (int): Number of files downloaded.
size (int): Size of files in bytes.
errors: a Counter mapping error types to integer.
quota (int): Threshold of number of bytes when the download quota is
exceeded.
bandwidth_meter (:class:`.network.BandwidthMeter`): The bandwidth
meter.
'''
def __init__(self, url_table: Optional[BaseURLTable]=None):
self.start_time = None
self.stop_time = None
self.files = 0
self.size = 0
self.errors = Counter()
self.quota = None
self.bandwidth_meter = BandwidthMeter()
self._url_table = url_table
def start(self):
'''Record the start time.'''
self.start_time = time.time()
self.bandwidth_meter.feed(1)
def stop(self):
'''Record the stop time.'''
self.stop_time = time.time()
@property
def duration(self) -> float:
'''Return the time in seconds the interval.'''
return self.stop_time - self.start_time
def increment(self, size: int):
'''Increment the number of files downloaded.
Args:
size: The size of the file
'''
assert size >= 0, size
self.files += 1
self.size += size
self.bandwidth_meter.feed(size)
@property
def is_quota_exceeded(self) -> bool:
'''Return whether the quota is exceeded.'''
if self.quota and self._url_table is not None:
return self.size >= self.quota and \
self._url_table.get_root_url_todo_count() == 0
def increment_error(self, error: Exception):
'''Increment the error counter preferring base exceptions.'''
_logger.debug('Increment error %s', error)
for error_class in ERROR_PRIORITIES:
if isinstance(error, error_class):
self.errors[error_class] += 1
return
self.errors[type(error)] += 1
|
class Statistics(object):
'''Statistics.
Attributes:
start_time (float): Timestamp when the engine started.
stop_time (float): Timestamp when the engine stopped.
files (int): Number of files downloaded.
size (int): Size of files in bytes.
errors: a Counter mapping error types to integer.
quota (int): Threshold of number of bytes when the download quota is
exceeded.
bandwidth_meter (:class:`.network.BandwidthMeter`): The bandwidth
meter.
'''
def __init__(self, url_table: Optional[BaseURLTable]=None):
pass
def start(self):
'''Record the start time.'''
pass
def stop(self):
'''Record the stop time.'''
pass
@property
def duration(self) -> float:
'''Return the time in seconds the interval.'''
pass
def increment(self, size: int):
'''Increment the number of files downloaded.
Args:
size: The size of the file
'''
pass
@property
def is_quota_exceeded(self) -> bool:
'''Return whether the quota is exceeded.'''
pass
def increment_error(self, error: Exception):
'''Increment the error counter preferring base exceptions.'''
pass
| 10 | 7 | 7 | 1 | 5 | 1 | 1 | 0.6 | 1 | 7 | 2 | 0 | 7 | 8 | 7 | 7 | 68 | 12 | 35 | 19 | 25 | 21 | 32 | 17 | 24 | 3 | 1 | 2 | 10 |
6,812 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/string_test.py
|
wpull.string_test.TestString
|
class TestString(unittest.TestCase):
def test_to_bytes(self):
self.assertEqual(b'hi', to_bytes('hi'))
self.assertEqual([b'hi'], to_bytes(['hi']))
self.assertEqual((b'hi', b'hello'), to_bytes(('hi', 'hello')))
self.assertEqual({b'hi': b'hello'}, to_bytes({'hi': 'hello'}))
object1 = object()
self.assertEqual(object1, to_bytes(object1))
def test_to_str(self):
self.assertEqual('hi', to_str(b'hi'))
self.assertEqual(['hi'], to_str([b'hi']))
self.assertEqual(('hi', 'hello'), to_str((b'hi', b'hello')))
self.assertEqual({'hi': 'hello'}, to_str({b'hi': b'hello'}))
object1 = object()
self.assertEqual(object1, to_str(object1))
def test_detect_encoding(self):
mojibake = b'\x95\xb6\x8e\x9a\x89\xbb\x82\xaf'
krakozyabry = b'\xeb\xd2\xc1\xcb\xcf\xda\xd1\xc2\xd2\xd9'
self.assertEqual(
'shift_jis',
detect_encoding(mojibake, 'shift_jis')
)
self.assertEqual(
'koi8-r',
detect_encoding(krakozyabry, 'koi8-r')
)
self.assertEqual(
'shift_jis',
detect_encoding((mojibake * 10)[:-1], 'shift_jis')
)
self.assertEqual(
'koi8-r',
detect_encoding((krakozyabry * 10)[:-1], 'koi8-r')
)
self.assertEqual(
'iso8859-1',
detect_encoding(b'\xff\xff\xff\x81')
)
self.assertRaises(
ValueError,
detect_encoding, b'\xff\xff\xff\x81',
'utf8', fallback=()
)
self.assertEqual(
'utf-8',
detect_encoding(
b'<html><meta charset="dog_breath"><body>',
is_html=True
)
)
self.assertEqual(
'utf-8',
detect_encoding(
b'<html><meta content="text/html; charset=cat-meows><body>',
is_html=True
)
)
self.assertEqual(
'utf-16-le',
detect_encoding(
codecs.BOM_UTF16_LE +
'Let’s hope no one uses UTF-36'.encode('utf_16_le')[:-1]
)
)
# Check for no crash
detect_encoding(
b'<?xml version="1.0" encoding="UTF-\xdb" ?>'
)
for length in range(1, 2):
iterable = itertools.permutations(
[bytes(i) for i in range(256)], length
)
for data in iterable:
detect_encoding(b''.join(data))
def test_printable_bytes(self):
self.assertEqual(
b' 1234abc XYZ~',
printable_bytes(b' 1234\x00abc XYZ\xff~')
)
def test_printable_str(self):
self.assertEqual(
'asdf',
printable_str('asdf')
)
self.assertEqual(
'asdf\\n',
printable_str('asdf\n')
)
self.assertEqual(
'asdf\n',
printable_str('asdf\n', keep_newlines=True)
)
self.assertEqual(
'as\\x1bdf',
printable_str('as\x1bdf')
)
def test_normalize_codec_name(self):
self.assertEqual('utf-8', normalize_codec_name('UTF-8'))
self.assertEqual('utf-8', normalize_codec_name('uTF_8'))
self.assertEqual('utf-8', normalize_codec_name('Utf8'))
self.assertEqual('shift_jis', normalize_codec_name('x-sjis'))
self.assertFalse(normalize_codec_name('\x00'))
self.assertFalse(normalize_codec_name('wolf-howl'))
self.assertFalse(normalize_codec_name('dragon-flatulence'))
def test_format_size(self):
# Check for no crash
nums = itertools.chain(
(-93334, -1, 0, 1, 100, 1023, 1024, 1025,),
[10 ** expo for expo in range(1, 16)]
)
for num in nums:
format_size(num)
|
class TestString(unittest.TestCase):
def test_to_bytes(self):
pass
def test_to_str(self):
pass
def test_detect_encoding(self):
pass
def test_printable_bytes(self):
pass
def test_printable_str(self):
pass
def test_normalize_codec_name(self):
pass
def test_format_size(self):
pass
| 8 | 0 | 18 | 2 | 16 | 0 | 1 | 0.02 | 1 | 5 | 0 | 0 | 7 | 0 | 7 | 79 | 130 | 18 | 110 | 17 | 102 | 2 | 51 | 17 | 43 | 3 | 2 | 2 | 10 |
6,813 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/testing/async.py
|
wpull.testing.async.AsyncTestCase
|
class AsyncTestCase(unittest.TestCase):
def setUp(self):
self.event_loop = asyncio.new_event_loop()
self.event_loop.set_debug(True)
asyncio.set_event_loop(self.event_loop)
def tearDown(self):
self.event_loop.stop()
self.event_loop.close()
|
class AsyncTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 11 | 2 | 1 | 2 | 74 | 9 | 1 | 8 | 4 | 5 | 0 | 8 | 4 | 5 | 1 | 2 | 0 | 2 |
6,814 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/testing/async.py
|
wpull.testing.async.TornadoAsyncIOLoop
|
class TornadoAsyncIOLoop(BaseAsyncIOLoop):
def initialize(self, event_loop):
super().initialize(event_loop, close_loop=False)
|
class TornadoAsyncIOLoop(BaseAsyncIOLoop):
def initialize(self, event_loop):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 64 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 4 | 0 | 1 |
6,815 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/testing/util.py
|
wpull.testing.util.TempDirMixin
|
class TempDirMixin:
def set_up_temp_dir(self):
assert not getattr(self, 'original_dir', None), self.original_dir
self.original_dir = os.getcwd()
self.temp_dir = TemporaryDirectory()
os.chdir(self.temp_dir.name)
_logger.debug('Switch to %s', self.temp_dir.name)
def tear_down_temp_dir(self):
os.chdir(self.original_dir)
self.temp_dir.cleanup()
self.original_dir = None
@contextlib.contextmanager
def cd_tempdir(self):
original_dir = os.getcwd()
with TemporaryDirectory() as temp_dir:
try:
os.chdir(temp_dir)
yield temp_dir
finally:
os.chdir(original_dir)
|
class TempDirMixin:
def set_up_temp_dir(self):
pass
def tear_down_temp_dir(self):
pass
@contextlib.contextmanager
def cd_tempdir(self):
pass
| 5 | 0 | 7 | 1 | 6 | 0 | 1 | 0 | 0 | 1 | 0 | 12 | 3 | 2 | 3 | 3 | 24 | 4 | 20 | 9 | 15 | 0 | 18 | 7 | 14 | 1 | 0 | 2 | 3 |
6,816 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/scraper/sitemap_test.py
|
wpull.scraper.sitemap_test.TestHTML5LibSitemap
|
class TestHTML5LibSitemap(Mixin, unittest.TestCase):
def get_html_parser(self):
return HTML5LibHTMLParser()
|
class TestHTML5LibSitemap(Mixin, unittest.TestCase):
def get_html_parser(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 2 | 0 | 0 | 0 | 1 | 0 | 1 | 80 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
6,817 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/request.py
|
wpull.protocol.ftp.request.Request
|
class Request(BaseRequest, URLPropertyMixin):
'''FTP request for a file.
Attributes:
address (tuple): Address of control connection.
data_address (tuple): Address of data connection.
username (str, None): Username for login.
password (str, None): Password for login.
restart_value (int, None): Optional value for ``REST`` command.
file_path (str): Path of the file.
'''
def __init__(self, url):
super().__init__()
self.url = url
self.address = None
self.data_address = None
self.username = None
self.password = None
self.restart_value = None
@property
def file_path(self):
return urllib.parse.unquote(self.url_info.path)
def to_dict(self):
return {
'protocol': 'ftp',
'url': self.url,
'url_info': self.url_info.to_dict() if self.url_info else None,
'username': self.username,
'password': self.password,
'restart_value': self.restart_value,
'file_path': self.file_path,
}
def set_continue(self, offset):
'''Modify the request into a restart request.'''
assert offset >= 0, offset
self.restart_value = offset
|
class Request(BaseRequest, URLPropertyMixin):
'''FTP request for a file.
Attributes:
address (tuple): Address of control connection.
data_address (tuple): Address of data connection.
username (str, None): Username for login.
password (str, None): Password for login.
restart_value (int, None): Optional value for ``REST`` command.
file_path (str): Path of the file.
'''
def __init__(self, url):
pass
@property
def file_path(self):
pass
def to_dict(self):
pass
def set_continue(self, offset):
'''Modify the request into a restart request.'''
pass
| 6 | 2 | 6 | 0 | 6 | 0 | 1 | 0.4 | 2 | 1 | 0 | 0 | 4 | 6 | 4 | 10 | 39 | 4 | 25 | 12 | 19 | 10 | 16 | 11 | 11 | 2 | 3 | 0 | 5 |
6,818 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/util.py
|
wpull.protocol.ftp.util.ReplyCodes
|
class ReplyCodes(object):
command_okay = 200
syntax_error_command_unrecognized = 500
syntax_error_in_parameters_or_arguments = 501
command_not_implemented_superfluous_at_this_site = 202
command_not_implemented = 502
bad_sequence_of_commands = 503
command_not_implemented_for_that_parameter = 504
restart_marker_reply = 110
system_status_or_system_help_reply = 211
directory_status = 212
file_status = 213
help_message = 214
name_system_type = 215
service_ready_in_nnn_minutes = 120
service_ready_for_new_user = 220
service_closing_control_connection = 221
service_not_available_closing_control_connection = 421
data_connection_already_open_transfer_starting = 125
data_connection_open_no_transfer_in_progress = 225
cant_open_data_connection = 425
closing_data_connection = 226
connection_closed_transfer_aborted = 426
entering_passive_mode = 227
user_logged_in_proceed = 230
not_logged_in = 530
user_name_okay_need_password = 331
need_account_for_login = 332
need_account_for_storing_files = 532
file_status_okay_about_to_open_data_connection = 150
requested_file_action_okay_completed = 250
pathname_created = 257
requested_file_action_pending_further_information = 350
requested_file_action_not_taken = 450
requested_action_not_taken_file_unavailable = 550
requested_action_aborted_local_error_in_processing = 451
requested_action_aborted_page_type_unknown = 551
requested_action_not_taken_insufficient_storage_space = 452
requested_file_action_aborted = 552
requested_action_not_taken_file_name_not_allowed = 553
|
class ReplyCodes(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0 | 40 | 40 | 39 | 0 | 40 | 40 | 39 | 0 | 1 | 0 | 0 |
6,819 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/request.py
|
wpull.protocol.ftp.request.ListingResponse
|
class ListingResponse(Response):
'''FTP response for a file listing.
Attributes:
files (list): A list of :class:`.ftp.ls.listing.FileEntry`
'''
def __init__(self):
super().__init__()
self.files = []
def to_dict(self):
dict_obj = super().to_dict()
dict_obj['files'] = self.files
return dict_obj
|
class ListingResponse(Response):
'''FTP response for a file listing.
Attributes:
files (list): A list of :class:`.ftp.ls.listing.FileEntry`
'''
def __init__(self):
pass
def to_dict(self):
pass
| 3 | 1 | 4 | 0 | 4 | 0 | 1 | 0.5 | 1 | 1 | 0 | 0 | 2 | 1 | 2 | 14 | 14 | 2 | 8 | 5 | 5 | 4 | 8 | 5 | 5 | 1 | 4 | 0 | 2 |
6,820 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/base.py
|
wpull.processor.base.BaseProcessorSession
|
class BaseProcessorSession(object, metaclass=abc.ABCMeta):
'''Base class for processor sessions.'''
def _log_error(self, request, error):
'''Log exceptions during a fetch.'''
_logger.error(
_('Fetching ‘{url}’ encountered an error: {error}'),
url=request.url, error=error
)
|
class BaseProcessorSession(object, metaclass=abc.ABCMeta):
'''Base class for processor sessions.'''
def _log_error(self, request, error):
'''Log exceptions during a fetch.'''
pass
| 2 | 2 | 6 | 0 | 5 | 1 | 1 | 0.33 | 2 | 0 | 0 | 2 | 1 | 0 | 1 | 21 | 9 | 1 | 6 | 2 | 4 | 2 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
6,821 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/base.py
|
wpull.processor.base.BaseProcessor
|
class BaseProcessor(object, metaclass=abc.ABCMeta):
'''Base class for processors.
Processors contain the logic for processing requests.
'''
@asyncio.coroutine
def process(self, item_session: ItemSession):
'''Process an URL Item.
Args:
item_session: The URL item.
This function handles the logic for processing a single
URL item.
It must call one of :meth:`.engine.URLItem.set_status` or
:meth:`.engine.URLItem.skip`.
Coroutine.
'''
def close(self):
'''Run any clean up actions.'''
|
class BaseProcessor(object, metaclass=abc.ABCMeta):
'''Base class for processors.
Processors contain the logic for processing requests.
'''
@asyncio.coroutine
def process(self, item_session: ItemSession):
'''Process an URL Item.
Args:
item_session: The URL item.
This function handles the logic for processing a single
URL item.
It must call one of :meth:`.engine.URLItem.set_status` or
:meth:`.engine.URLItem.skip`.
Coroutine.
'''
pass
def close(self):
'''Run any clean up actions.'''
pass
| 4 | 3 | 8 | 2 | 1 | 5 | 1 | 3.25 | 2 | 1 | 1 | 3 | 2 | 0 | 2 | 22 | 23 | 6 | 4 | 4 | 0 | 13 | 3 | 3 | 0 | 1 | 3 | 0 | 2 |
6,822 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/session.py
|
wpull.pipeline.session.URLItemSource
|
class URLItemSource(ItemSource[ItemSession]):
def __init__(self, app_session: AppSession):
self._app_session = app_session
@asyncio.coroutine
def get_item(self) -> Optional[ItemSession]:
try:
url_record = self._app_session.factory['URLTable'].check_out(Status.todo)
except NotFound:
try:
url_record = self._app_session.factory['URLTable'].check_out(Status.error)
except NotFound:
return None
item_session = ItemSession(self._app_session, url_record)
return item_session
|
class URLItemSource(ItemSource[ItemSession]):
def __init__(self, app_session: AppSession):
pass
@asyncio.coroutine
def get_item(self) -> Optional[ItemSession]:
pass
| 4 | 0 | 7 | 1 | 6 | 0 | 2 | 0 | 1 | 4 | 4 | 0 | 2 | 1 | 2 | 25 | 16 | 2 | 14 | 7 | 10 | 0 | 13 | 6 | 10 | 3 | 4 | 2 | 4 |
6,823 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/session.py
|
wpull.pipeline.session.ItemSession
|
class ItemSession(object):
'''Item for a URL that needs to processed.'''
def __init__(self, app_session: AppSession, url_record: URLRecord):
self.app_session = app_session
self.url_record = url_record
self._processed = False
self._try_count_incremented = False
self._add_url_batch = []
self._request = None
self._response = None
@property
def is_virtual(self) -> bool:
return False
@property
def is_processed(self):
'''Return whether the item has been processed.'''
return self._processed
@property
def request(self) -> BaseRequest:
return self._request
@request.setter
def request(self, request: BaseRequest):
self._request = request
@property
def response(self) -> BaseResponse:
return self._response
@response.setter
def response(self, response: BaseResponse):
self._response = response
def skip(self):
'''Mark the item as processed without download.'''
_logger.debug(__(_('Skipping ‘{url}’.'), url=self.url_record.url))
self.app_session.factory['URLTable'].check_in(self.url_record.url, Status.skipped)
self._processed = True
def set_status(self, status: Status, increment_try_count: bool=True,
filename: str=None):
'''Mark the item with the given status.
Args:
status: a value from :class:`Status`.
increment_try_count: if True, increment the ``try_count``
value
'''
url = self.url_record.url
assert not self._try_count_incremented, (url, status)
if increment_try_count:
self._try_count_incremented = True
_logger.debug(__('Marking URL {0} status {1}.', url, status))
url_result = URLResult()
url_result.filename = filename
self.app_session.factory['URLTable'].check_in(
url,
status,
increment_try_count=increment_try_count,
url_result=url_result,
)
self._processed = True
def add_url(self, url: str, url_properites: Optional[URLProperties]=None,
url_data: Optional[URLData]=None):
url_info = parse_url_or_log(url)
if not url_info:
return
url_properties = url_properites or URLProperties()
url_data = url_data or URLData()
add_url_info = AddURLInfo(url, url_properties, url_data)
self._add_url_batch.append(add_url_info)
if len(self._add_url_batch) >= 1000:
self.app_session.factory['URLTable'].add_many(self._add_url_batch)
self._add_url_batch.clear()
def add_child_url(self, url: str, inline: bool=False,
link_type: Optional[LinkType]=None,
post_data: Optional[str]=None,
level: Optional[int]=None,
replace: bool=False):
'''Add links scraped from the document with automatic values.
Args:
url: A full URL. (It can't be a relative path.)
inline: Whether the URL is an embedded object.
link_type: Expected link type.
post_data: URL encoded form data. The request will be made using
POST. (Don't use this to upload files.)
level: The child depth of this URL.
replace: Whether to replace the existing entry in the database
table so it will be redownloaded again.
This function provides values automatically for:
* ``inline``
* ``level``
* ``parent``: The referrering page.
* ``root``
See also :meth:`add_url`.
'''
url_properties = URLProperties()
url_properties.level = self.url_record.level + 1 if level is None else level
url_properties.inline_level = (self.url_record.inline_level or 0) + 1 if inline else None
url_properties.parent_url = self.url_record.url
url_properties.root_url = self.url_record.root_url or self.url_record.url
url_properties.link_type = link_type
url_data = URLData()
url_data.post_data = post_data
if replace:
self.app_session.factory['URLTable'].remove_many([url])
self.add_url(url, url_properties, url_data)
def child_url_record(self, url: str, inline: bool=False,
link_type: Optional[LinkType]=None,
post_data: Optional[str]=None,
level: Optional[int]=None):
'''Return a child URLRecord.
This function is useful for testing filters before adding to table.
'''
url_record = URLRecord()
url_record.url = url
url_record.status = Status.todo
url_record.try_count = 0
url_record.level = self.url_record.level + 1 if level is None else level
url_record.root_url = self.url_record.root_url or self.url_record.url
url_record.parent_url = self.url_record.url
url_record.inline_level = (self.url_record.inline_level or 0) + 1 if inline else 0
url_record.link_type = link_type
url_record.post_data = post_data
return url_record
def finish(self):
self.app_session.factory['URLTable'].add_many(self._add_url_batch)
self._add_url_batch.clear()
def update_record_value(self, **kwargs):
self.app_session.factory['URLTable'].update_one(self.url_record.url, **kwargs)
for key, value in kwargs.items():
setattr(self.url_record, key, value)
|
class ItemSession(object):
'''Item for a URL that needs to processed.'''
def __init__(self, app_session: AppSession, url_record: URLRecord):
pass
@property
def is_virtual(self) -> bool:
pass
@property
def is_processed(self):
'''Return whether the item has been processed.'''
pass
@property
def request(self) -> BaseRequest:
pass
@request.setter
def request(self) -> BaseRequest:
pass
@property
def response(self) -> BaseResponse:
pass
@response.setter
def response(self) -> BaseResponse:
pass
def skip(self):
'''Mark the item as processed without download.'''
pass
def set_status(self, status: Status, increment_try_count: bool=True,
filename: str=None):
'''Mark the item with the given status.
Args:
status: a value from :class:`Status`.
increment_try_count: if True, increment the ``try_count``
value
'''
pass
def add_url(self, url: str, url_properites: Optional[URLProperties]=None,
url_data: Optional[URLData]=None):
pass
def add_child_url(self, url: str, inline: bool=False,
link_type: Optional[LinkType]=None,
post_data: Optional[str]=None,
level: Optional[int]=None,
replace: bool=False):
'''Add links scraped from the document with automatic values.
Args:
url: A full URL. (It can't be a relative path.)
inline: Whether the URL is an embedded object.
link_type: Expected link type.
post_data: URL encoded form data. The request will be made using
POST. (Don't use this to upload files.)
level: The child depth of this URL.
replace: Whether to replace the existing entry in the database
table so it will be redownloaded again.
This function provides values automatically for:
* ``inline``
* ``level``
* ``parent``: The referrering page.
* ``root``
See also :meth:`add_url`.
'''
pass
def child_url_record(self, url: str, inline: bool=False,
link_type: Optional[LinkType]=None,
post_data: Optional[str]=None,
level: Optional[int]=None):
'''Return a child URLRecord.
This function is useful for testing filters before adding to table.
'''
pass
def finish(self):
pass
def update_record_value(self, **kwargs):
pass
| 21 | 6 | 10 | 1 | 6 | 2 | 2 | 0.3 | 1 | 12 | 9 | 1 | 14 | 7 | 14 | 14 | 158 | 32 | 97 | 46 | 67 | 29 | 77 | 31 | 62 | 4 | 1 | 1 | 23 |
6,824 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/progress_test.py
|
wpull.pipeline.progress_test.TestProgress
|
class TestProgress(unittest.TestCase):
def test_progress_dot(self):
progress = DotProgress(stream=sys.stdout, draw_interval=0)
progress.max_value = 100
progress.min_value = 0
progress.update()
for dummy in range(100):
progress.current_value += 1
progress.update()
def test_progress_bar_integer(self):
progress = BarProgress(stream=sys.stdout, draw_interval=0)
progress.max_value = 100
progress.min_value = 0
progress.current_value = 10
progress.update()
for dummy in range(100):
progress.current_value += 1
progress.update()
def test_progress_bar_bytes(self):
progress = BarProgress(stream=sys.stdout, draw_interval=0)
progress.max_value = 100
progress.min_value = 0
progress.current_value = 10
progress.measurement = Measurement.bytes
progress.update()
for dummy in range(100):
progress.current_value += 1
progress.update()
def test_progress_http(self):
progress = ProgressPrinter(stream=sys.stdout)
request = HTTPRequest('http://example.com')
response = HTTPResponse(206, 'OK')
response.fields['Content-Size'] = '1024'
response.fields['Content-Range'] = 'bytes 10-/2048'
progress.update_from_begin_request(request)
progress.update_from_begin_response(response)
for dummy in range(100):
progress.update_with_data(b'abc')
progress.update_from_end_response(response)
def test_progress_ftp(self):
progress = ProgressPrinter(stream=sys.stdout)
request = FTPRequest('ftp://example.com/example.txt')
response = FTPResponse()
response.reply = FTPReply(226, 'Closing data connection')
response.file_transfer_size = 2048
response.restart_value = 10
progress.update_from_begin_request(request)
progress.update_from_begin_response(response)
for dummy in range(100):
progress.update_with_data(b'abc')
progress.update_from_end_response(response)
|
class TestProgress(unittest.TestCase):
def test_progress_dot(self):
pass
def test_progress_bar_integer(self):
pass
def test_progress_bar_bytes(self):
pass
def test_progress_http(self):
pass
def test_progress_ftp(self):
pass
| 6 | 0 | 13 | 3 | 10 | 0 | 2 | 0 | 1 | 5 | 4 | 0 | 5 | 0 | 5 | 77 | 72 | 21 | 51 | 20 | 45 | 0 | 51 | 20 | 45 | 2 | 2 | 1 | 10 |
6,825 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/progress.py
|
wpull.pipeline.progress.ProtocolProgress
|
class ProtocolProgress(Progress):
class State(enum.Enum):
idle = 'idle'
sending_request = 'sending_request'
sending_body = 'sending_body'
receiving_response = 'receiving_response'
receiving_body = 'receiving_body'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._state = self.State.sending_request
def update_from_begin_request(self, request: BaseRequest):
self._state = self.State.sending_request
self.reset()
self.measurement = Measurement.bytes
def update_from_begin_response(self, response: BaseResponse):
self._state = self.State.receiving_body
self._process_response_sizes(response)
def update_from_end_response(self, response: BaseResponse):
self._state = self.State.idle
def _process_response_sizes(self, response: BaseResponse):
if hasattr(response, 'fields'):
content_length = response.fields.get('Content-Length')
elif hasattr(response, 'file_transfer_size'):
content_length = response.file_transfer_size
else:
content_length = None
if content_length:
try:
self.max_value = int(content_length)
except ValueError:
pass
if response.protocol == 'http':
response = cast(HTTPResponse, response)
if not response.status_code == http.client.PARTIAL_CONTENT:
return
match = re.search(
r'bytes +([0-9]+)-([0-9]+)/([0-9]+)',
response.fields.get('Content-Range', '')
)
if match:
self.continue_value = int(match.group(1))
self.max_value = int(match.group(3))
elif response.protocol == 'ftp':
response = cast(FTPResponse, response)
if response.restart_value:
self.continue_value = response.restart_value
def update_with_data(self, data):
if self._state == self.State.receiving_body:
self.current_value += len(data)
self.update()
|
class ProtocolProgress(Progress):
class State(enum.Enum):
def __init__(self, *args, **kwargs):
pass
def update_from_begin_request(self, request: BaseRequest):
pass
def update_from_begin_response(self, response: BaseResponse):
pass
def update_from_end_response(self, response: BaseResponse):
pass
def _process_response_sizes(self, response: BaseResponse):
pass
def update_with_data(self, data):
pass
| 8 | 0 | 9 | 2 | 7 | 0 | 3 | 0 | 1 | 7 | 4 | 1 | 6 | 4 | 6 | 11 | 65 | 15 | 50 | 19 | 42 | 0 | 44 | 19 | 36 | 10 | 3 | 2 | 16 |
6,826 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/progress.py
|
wpull.pipeline.progress.ProgressPrinter
|
class ProgressPrinter(ProtocolProgress):
def _print(self, *args):
'''Convenience function for the print function.
This function prints no newline.
'''
string = ' '.join([str(arg) for arg in args])
print(string, end='', file=self._stream)
def _println(self, *args):
'''Convenience function for the print function.'''
string = ' '.join([str(arg) for arg in args])
print(string, file=self._stream)
def _flush(self):
'''Flush the print stream.'''
self._stream.flush()
def update_from_end_response(self, response: BaseResponse):
super().update_from_end_response(response)
self._println()
|
class ProgressPrinter(ProtocolProgress):
def _print(self, *args):
'''Convenience function for the print function.
This function prints no newline.
'''
pass
def _println(self, *args):
'''Convenience function for the print function.'''
pass
def _flush(self):
'''Flush the print stream.'''
pass
def update_from_end_response(self, response: BaseResponse):
pass
| 5 | 3 | 5 | 1 | 3 | 1 | 1 | 0.42 | 1 | 3 | 1 | 2 | 4 | 0 | 4 | 15 | 22 | 5 | 12 | 7 | 7 | 5 | 12 | 7 | 7 | 1 | 4 | 0 | 4 |
6,827 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/progress.py
|
wpull.pipeline.progress.Progress
|
class Progress(HookableMixin):
'''Print file download progress as dots or a bar.
Args:
bar_style (bool): If True, print as a progress bar. If False,
print dots every few seconds.
stream: A file object. Default is usually stderr.
human_format (true): If True, format sizes in units. Otherwise, output
bits only.
'''
def __init__(self, stream: IO[str]=sys.stderr):
super().__init__()
self._stream = stream
self.min_value = 0
self.max_value = None
self.current_value = 0
self.continue_value = None
self.measurement = Measurement.integer
self.event_dispatcher.register('update')
def update(self):
self.event_dispatcher.notify('update', self)
def reset(self):
self.min_value = 0
self.max_value = None
self.current_value = 0
self.continue_value = None
self.measurement = Measurement.integer
|
class Progress(HookableMixin):
'''Print file download progress as dots or a bar.
Args:
bar_style (bool): If True, print as a progress bar. If False,
print dots every few seconds.
stream: A file object. Default is usually stderr.
human_format (true): If True, format sizes in units. Otherwise, output
bits only.
'''
def __init__(self, stream: IO[str]=sys.stderr):
pass
def update(self):
pass
def reset(self):
pass
| 4 | 1 | 7 | 1 | 6 | 0 | 1 | 0.44 | 1 | 4 | 1 | 1 | 3 | 6 | 3 | 5 | 33 | 7 | 18 | 10 | 14 | 8 | 18 | 10 | 14 | 1 | 2 | 0 | 3 |
6,828 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/progress.py
|
wpull.pipeline.progress.Measurement
|
class Measurement(enum.Enum):
integer = 'integer'
bytes = 'bytes'
|
class Measurement(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
6,829 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/request.py
|
wpull.protocol.ftp.request.Reply
|
class Reply(SerializableMixin, DictableMixin):
'''FTP reply.
Encoding is always UTF-8.
Attributes:
code (int): Reply code.
text (str): Reply message.
'''
def __init__(self, code=None, text=None):
self.code = code
self.text = text
def parse(self, data):
for line in data.splitlines(False):
match = re.match(br'(\d{3}|^)([ -]?)(.*)', line)
if not match:
raise ProtocolError('Failed to parse reply.')
if match.group(1) and match.group(2) == b' ':
assert self.code is None
self.code = int(match.group(1))
if self.text is None:
self.text = match.group(3).decode('utf-8',
errors='surrogateescape')
else:
self.text += '\r\n{0}'.format(match.group(3).decode(
'utf-8', errors='surrogateescape'))
def to_bytes(self):
assert self.code is not None
assert self.text is not None
text_lines = self.text.splitlines(False)
lines = []
for row_num in range(len(text_lines)):
line = text_lines[row_num]
if row_num == len(text_lines) - 1:
lines.append('{0} {1}\r\n'.format(self.code, line))
else:
lines.append('{0}-{1}\r\n'.format(self.code, line))
return ''.join(lines).encode('utf-8', errors='surrogateescape')
def to_dict(self):
return {
'code': self.code,
'text': self.text
}
def code_tuple(self):
'''Return a tuple of the reply code.'''
return wpull.protocol.ftp.util.reply_code_tuple(self.code)
|
class Reply(SerializableMixin, DictableMixin):
'''FTP reply.
Encoding is always UTF-8.
Attributes:
code (int): Reply code.
text (str): Reply message.
'''
def __init__(self, code=None, text=None):
pass
def parse(self, data):
pass
def to_bytes(self):
pass
def to_dict(self):
pass
def code_tuple(self):
'''Return a tuple of the reply code.'''
pass
| 6 | 2 | 9 | 1 | 7 | 0 | 2 | 0.19 | 2 | 3 | 1 | 0 | 5 | 2 | 5 | 9 | 57 | 13 | 37 | 14 | 31 | 7 | 30 | 14 | 24 | 5 | 2 | 2 | 11 |
6,830 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/progress.py
|
wpull.pipeline.progress.DotProgress
|
class DotProgress(ProgressPrinter):
def __init__(self, *args, draw_interval: float=2.0, **kwargs):
super().__init__(*args, **kwargs)
self._draw_interval = draw_interval
self._last_draw_time = 0
def update(self):
super().update()
if self._state != self.State.receiving_body:
return
time_now = time.time()
if time_now - self._last_draw_time > self._draw_interval:
self._print_dots()
self._flush()
self._last_draw_time = time_now
def _print_dots(self):
'''Print a dot.'''
self._print('.')
|
class DotProgress(ProgressPrinter):
def __init__(self, *args, draw_interval: float=2.0, **kwargs):
pass
def update(self):
pass
def _print_dots(self):
'''Print a dot.'''
pass
| 4 | 1 | 7 | 2 | 5 | 0 | 2 | 0.06 | 1 | 2 | 0 | 0 | 3 | 2 | 3 | 18 | 24 | 7 | 16 | 7 | 12 | 1 | 16 | 7 | 12 | 3 | 5 | 1 | 5 |
6,831 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/progress.py
|
wpull.pipeline.progress.BarProgress
|
class BarProgress(ProgressPrinter):
def __init__(self, *args, draw_interval: float=0.5, bar_width: int=25,
human_format: bool=True, **kwargs):
super().__init__(*args, **kwargs)
self._draw_interval = draw_interval
self._bar_width = bar_width
self._human_format = human_format
self._throbber_index = 0
self._throbber_iter = itertools.cycle(
itertools.chain(
range(bar_width), reversed(range(1, bar_width - 1))
))
self._bandwidth_meter = BandwidthMeter()
self._previous_value = 0
self._last_draw_time = 0
self._start_time = time.time()
def update(self):
super().update()
if self._state != self.State.receiving_body:
return
difference = self.current_value - self._previous_value
self._previous_value = self.current_value
self._bandwidth_meter.feed(difference)
time_now = time.time()
if time_now - self._last_draw_time > self._draw_interval or self.current_value == self.max_value:
self._print_status()
self._flush()
self._last_draw_time = time_now
def _print_status(self):
'''Print an entire status line including bar and stats.'''
self._clear_line()
self._print(' ')
if self.max_value:
self._print_percent()
self._print(' ')
self._print_bar()
else:
self._print_throbber()
self._print(' ')
if self.measurement == Measurement.bytes:
self._print_size_downloaded()
else:
self._print(self.current_value)
self._print(' ')
self._print_duration()
self._print(' ')
if self.measurement == Measurement.bytes:
self._print_speed()
self._flush()
def _clear_line(self):
'''Print ANSI code to clear the current line.'''
self._print('\x1b[1G')
self._print('\x1b[2K')
def _print_throbber(self):
'''Print an indefinite progress bar.'''
self._print('[')
for position in range(self._bar_width):
self._print('O' if position == self._throbber_index else ' ')
self._print(']')
self._throbber_index = next(self._throbber_iter)
def _print_bar(self):
'''Print a progress bar.'''
self._print('[')
for position in range(self._bar_width):
position_fraction = position / (self._bar_width - 1)
position_bytes = position_fraction * self.max_value
if position_bytes < (self.continue_value or 0):
self._print('+')
elif position_bytes <= (self.continue_value or 0) + self.current_value:
self._print('=')
else:
self._print(' ')
self._print(']')
def _print_size_downloaded(self):
'''Print the bytes downloaded.'''
self._print(wpull.string.format_size(self.current_value))
def _print_duration(self):
'''Print the elapsed download time.'''
duration = int(time.time() - self._start_time)
self._print(datetime.timedelta(seconds=duration))
def _print_speed(self):
'''Print the current speed.'''
if self._bandwidth_meter.num_samples:
speed = self._bandwidth_meter.speed()
if self._human_format:
file_size_str = wpull.string.format_size(speed)
else:
file_size_str = '{:.1f} b'.format(speed * 8)
speed_str = _('{preformatted_file_size}/s').format(
preformatted_file_size=file_size_str
)
else:
speed_str = _('-- B/s')
self._print(speed_str)
def _print_percent(self):
'''Print how much is done in percentage.'''
fraction_done = ((self.continue_value or 0 + self.current_value) /
self.max_value)
self._print('{fraction_done:.1%}'.format(fraction_done=fraction_done))
|
class BarProgress(ProgressPrinter):
def __init__(self, *args, draw_interval: float=0.5, bar_width: int=25,
human_format: bool=True, **kwargs):
pass
def update(self):
pass
def _print_status(self):
'''Print an entire status line including bar and stats.'''
pass
def _clear_line(self):
'''Print ANSI code to clear the current line.'''
pass
def _print_throbber(self):
'''Print an indefinite progress bar.'''
pass
def _print_bar(self):
'''Print a progress bar.'''
pass
def _print_size_downloaded(self):
'''Print the bytes downloaded.'''
pass
def _print_duration(self):
'''Print the elapsed download time.'''
pass
def _print_speed(self):
'''Print the current speed.'''
pass
def _print_percent(self):
'''Print how much is done in percentage.'''
pass
| 11 | 8 | 12 | 3 | 9 | 1 | 2 | 0.09 | 1 | 11 | 2 | 0 | 10 | 9 | 10 | 25 | 133 | 34 | 91 | 32 | 79 | 8 | 78 | 31 | 67 | 4 | 5 | 2 | 22 |
6,832 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/pipeline.py
|
wpull.pipeline.pipeline.Worker
|
class Worker(object):
def __init__(self, item_queue: ItemQueue, tasks: Sequence[ItemTask]):
self._item_queue = item_queue
self._tasks = tasks
self._worker_id_counter = 0
@asyncio.coroutine
def process_one(self, _worker_id=None):
item = yield from self._item_queue.get()
if item == POISON_PILL:
return item
_logger.debug(__('Worker id {} Processing item {}', _worker_id, item))
for task in self._tasks:
yield from task.process(item)
_logger.debug(__('Worker id {} Processed item {}', _worker_id, item))
yield from self._item_queue.item_done()
return item
@asyncio.coroutine
def process(self):
worker_id = self._worker_id_counter
self._worker_id_counter += 1
_logger.debug('Worker process id=%s', worker_id)
while True:
item = yield from self.process_one(_worker_id=worker_id)
if item == POISON_PILL:
_logger.debug('Worker quitting.')
break
|
class Worker(object):
def __init__(self, item_queue: ItemQueue, tasks: Sequence[ItemTask]):
pass
@asyncio.coroutine
def process_one(self, _worker_id=None):
pass
@asyncio.coroutine
def process_one(self, _worker_id=None):
pass
| 6 | 0 | 11 | 3 | 8 | 0 | 2 | 0 | 1 | 2 | 2 | 0 | 3 | 3 | 3 | 3 | 37 | 11 | 26 | 13 | 20 | 0 | 24 | 11 | 20 | 3 | 1 | 2 | 7 |
6,833 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/pipeline.py
|
wpull.pipeline.pipeline.Producer
|
class Producer(object):
def __init__(self, item_source: ItemSource, item_queue: ItemQueue):
self._item_source = item_source
self._item_queue = item_queue
self._running = False
@asyncio.coroutine
def process_one(self):
_logger.debug('Get item from source')
item = yield from self._item_source.get_item()
if item:
yield from self._item_queue.put_item(item)
return item
@asyncio.coroutine
def process(self):
self._running = True
while self._running:
item = yield from self.process_one()
if not item and self._item_queue.unfinished_items == 0:
self.stop()
break
elif not item:
yield from self._item_queue.wait_for_worker()
def stop(self):
if self._running:
_logger.debug('Producer stopping.')
self._running = False
|
class Producer(object):
def __init__(self, item_source: ItemSource, item_queue: ItemQueue):
pass
@asyncio.coroutine
def process_one(self):
pass
@asyncio.coroutine
def process_one(self):
pass
def stop(self):
pass
| 7 | 0 | 7 | 1 | 6 | 0 | 2 | 0 | 1 | 2 | 2 | 0 | 4 | 3 | 4 | 4 | 32 | 6 | 26 | 12 | 19 | 0 | 23 | 10 | 18 | 4 | 1 | 2 | 9 |
6,834 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/coprocessor/phantomjs.py
|
wpull.processor.coprocessor.phantomjs.PhantomJSCoprocessor
|
class PhantomJSCoprocessor(object):
'''PhantomJS coprocessor.
Args:
phantomjs_driver_factory: Callback function that accepts ``params``
argument and returns PhantomJSDriver
processing_rule: Processing
rule.
warc_recorder: WARC recorder.
root_dir (str): Root directory path for temp files.
'''
def __init__(self, phantomjs_driver_factory: Callable[..., PhantomJSDriver],
processing_rule: ProcessingRule,
phantomjs_params: PhantomJSParams,
warc_recorder=None, root_path='.'):
self._phantomjs_driver_factory = phantomjs_driver_factory
self._processing_rule = processing_rule
self._phantomjs_params = phantomjs_params
self._warc_recorder = warc_recorder
self._root_path = root_path
self._file_writer_session = None
@asyncio.coroutine
def process(self, item_session: ItemSession, request, response, file_writer_session):
'''Process PhantomJS.
Coroutine.
'''
if response.status_code != 200:
return
if not HTMLReader.is_supported(request=request, response=response):
return
_logger.debug('Starting PhantomJS processing.')
self._file_writer_session = file_writer_session
# FIXME: this is a quick hack for crashes. See #137.
attempts = int(os.environ.get('WPULL_PHANTOMJS_TRIES', 5))
for dummy in range(attempts):
try:
yield from self._run_driver(item_session, request, response)
except asyncio.TimeoutError:
_logger.warning(_('Waiting for page load timed out.'))
break
except PhantomJSCrashed as error:
_logger.exception(__('PhantomJS crashed: {}', error))
else:
break
else:
_logger.warning(__(
_('PhantomJS failed to fetch ‘{url}’. I am sorry.'),
url=request.url_info.url
))
@asyncio.coroutine
def _run_driver(self, item_session: ItemSession, request, response):
'''Start PhantomJS processing.'''
_logger.debug('Started PhantomJS processing.')
session = PhantomJSCoprocessorSession(
self._phantomjs_driver_factory, self._root_path,
self._processing_rule, self._file_writer_session,
request, response,
item_session, self._phantomjs_params, self._warc_recorder
)
with contextlib.closing(session):
yield from session.run()
_logger.debug('Ended PhantomJS processing.')
|
class PhantomJSCoprocessor(object):
'''PhantomJS coprocessor.
Args:
phantomjs_driver_factory: Callback function that accepts ``params``
argument and returns PhantomJSDriver
processing_rule: Processing
rule.
warc_recorder: WARC recorder.
root_dir (str): Root directory path for temp files.
'''
def __init__(self, phantomjs_driver_factory: Callable[..., PhantomJSDriver],
processing_rule: ProcessingRule,
phantomjs_params: PhantomJSParams,
warc_recorder=None, root_path='.'):
pass
@asyncio.coroutine
def process(self, item_session: ItemSession, request, response, file_writer_session):
'''Process PhantomJS.
Coroutine.
'''
pass
@asyncio.coroutine
def _run_driver(self, item_session: ItemSession, request, response):
'''Start PhantomJS processing.'''
pass
| 6 | 3 | 20 | 3 | 15 | 2 | 3 | 0.3 | 1 | 10 | 6 | 0 | 3 | 6 | 3 | 3 | 74 | 13 | 47 | 19 | 38 | 14 | 34 | 13 | 30 | 6 | 1 | 2 | 8 |
6,835 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/pipeline.py
|
wpull.pipeline.pipeline.PipelineState
|
class PipelineState(enum.Enum):
stopped = 'stopped'
running = 'running'
stopping = 'stopping'
|
class PipelineState(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 4 | 0 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 4 | 0 | 0 |
6,836 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/pipeline.py
|
wpull.pipeline.pipeline.Pipeline
|
class Pipeline(object):
def __init__(self, item_source: ItemSource, tasks: Sequence[ItemTask],
item_queue: Optional[ItemQueue]=None):
self._item_queue = item_queue or ItemQueue()
self._tasks = tasks
self._producer = Producer(item_source, self._item_queue)
self._worker = Worker(self._item_queue, tasks)
self._state = PipelineState.stopped
self._concurrency = 1
self._producer_task = None
self._worker_tasks = set()
self._unpaused_event = asyncio.Event()
self.skippable = False
@property
def tasks(self):
return self._tasks
@asyncio.coroutine
def process(self):
if self._state == PipelineState.stopped:
self._state = PipelineState.running
self._producer_task = asyncio.get_event_loop().create_task(self._run_producer_wrapper())
self._unpaused_event.set()
while self._state == PipelineState.running:
yield from self._process_one_worker()
yield from self._shutdown_processing()
@asyncio.coroutine
def _process_one_worker(self):
assert self._state == PipelineState.running, self._state
while len(self._worker_tasks) < self._concurrency:
_logger.debug('Creating worker')
worker_task = asyncio.get_event_loop().create_task(self._worker.process())
self._worker_tasks.add(worker_task)
if self._worker_tasks:
wait_coroutine = asyncio.wait(
self._worker_tasks, return_when=asyncio.FIRST_COMPLETED)
done_tasks = (yield from wait_coroutine)[0]
_logger.debug('%d worker tasks completed', len(done_tasks))
for task in done_tasks:
task.result()
self._worker_tasks.remove(task)
else:
yield from self._unpaused_event.wait()
@asyncio.coroutine
def _shutdown_processing(self):
assert self._state == PipelineState.stopping
_logger.debug('Exited workers loop.')
if self._worker_tasks:
_logger.debug('Waiting for workers to stop.')
yield from asyncio.wait(self._worker_tasks)
_logger.debug('Waiting for producer to stop.')
self._worker_tasks.clear()
yield from self._producer_task
self._state = PipelineState.stopped
def stop(self):
if self._state == PipelineState.running:
self._state = PipelineState.stopping
self._producer.stop()
self._kill_workers()
@asyncio.coroutine
def _run_producer_wrapper(self):
'''Run the producer, if exception, stop engine.'''
try:
yield from self._producer.process()
except Exception as error:
if not isinstance(error, StopIteration):
# Stop the workers so the producer exception will be handled
# when we finally yield from this coroutine
_logger.debug('Producer died.', exc_info=True)
self.stop()
raise
else:
self.stop()
def _kill_workers(self):
for dummy in range(len(self._worker_tasks)):
_logger.debug('Put poison pill.')
self._item_queue.put_poison_nowait()
@property
def concurrency(self) -> int:
return self._concurrency
@concurrency.setter
def concurrency(self, new_concurrency: int):
if new_concurrency < 0:
raise ValueError('Concurrency cannot be negative')
change = new_concurrency - self._concurrency
self._concurrency = new_concurrency
if self._state != PipelineState.running:
return
if change < 0:
for dummy in range(abs(change)):
_logger.debug('Put poison pill for less workers.')
self._item_queue.put_poison_nowait()
elif change > 0:
_logger.debug('Put 1 poison pill to trigger more workers.')
self._item_queue.put_poison_nowait()
if self._concurrency:
self._unpaused_event.set()
else:
self._unpaused_event.clear()
def _warn_discarded_items(self):
_logger.warning(__(
gettext.ngettext(
'Discarding {num} unprocessed item.',
'Discarding {num} unprocessed items.',
self._item_queue.unfinished_items
),
num=self._item_queue.unfinished_items
))
|
class Pipeline(object):
def __init__(self, item_source: ItemSource, tasks: Sequence[ItemTask],
item_queue: Optional[ItemQueue]=None):
pass
@property
def tasks(self):
pass
@asyncio.coroutine
def process(self):
pass
@asyncio.coroutine
def _process_one_worker(self):
pass
@asyncio.coroutine
def _shutdown_processing(self):
pass
def stop(self):
pass
@asyncio.coroutine
def _run_producer_wrapper(self):
'''Run the producer, if exception, stop engine.'''
pass
def _kill_workers(self):
pass
@property
def concurrency(self) -> int:
pass
@concurrency.setter
def concurrency(self) -> int:
pass
def _warn_discarded_items(self):
pass
| 19 | 1 | 11 | 2 | 9 | 0 | 2 | 0.03 | 1 | 12 | 6 | 0 | 11 | 10 | 11 | 11 | 135 | 28 | 104 | 38 | 84 | 3 | 85 | 29 | 73 | 7 | 1 | 2 | 27 |
6,837 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/pipeline.py
|
wpull.pipeline.pipeline.ItemQueue
|
class ItemQueue(Generic[WorkItemT]):
def __init__(self):
self._queue = asyncio.PriorityQueue()
self._unfinished_items = 0
self._worker_ready_condition = asyncio.Condition()
self._entry_count = 0
@asyncio.coroutine
def put_item(self, item: WorkItemT):
while self._queue.qsize() > 0:
yield from self._worker_ready_condition.acquire()
yield from self._worker_ready_condition.wait()
self._worker_ready_condition.release()
self._unfinished_items += 1
self._queue.put_nowait((ITEM_PRIORITY, self._entry_count, item))
self._entry_count += 1
def put_poison_nowait(self):
self._queue.put_nowait((POISON_PRIORITY, self._entry_count, POISON_PILL))
self._entry_count += 1
@asyncio.coroutine
def get(self) -> WorkItemT:
priority, entry_count, item = yield from self._queue.get()
yield from self._worker_ready_condition.acquire()
self._worker_ready_condition.notify_all()
self._worker_ready_condition.release()
return item
@asyncio.coroutine
def item_done(self):
self._unfinished_items -= 1
assert self._unfinished_items >= 0
yield from self._worker_ready_condition.acquire()
self._worker_ready_condition.notify_all()
self._worker_ready_condition.release()
@property
def unfinished_items(self) -> int:
return self._unfinished_items
@asyncio.coroutine
def wait_for_worker(self):
yield from self._worker_ready_condition.acquire()
yield from self._worker_ready_condition.wait()
self._worker_ready_condition.release()
|
class ItemQueue(Generic[WorkItemT]):
def __init__(self):
pass
@asyncio.coroutine
def put_item(self, item: WorkItemT):
pass
def put_poison_nowait(self):
pass
@asyncio.coroutine
def get(self) -> WorkItemT:
pass
@asyncio.coroutine
def item_done(self):
pass
@property
def unfinished_items(self) -> int:
pass
@asyncio.coroutine
def wait_for_worker(self):
pass
| 13 | 0 | 5 | 1 | 5 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 7 | 4 | 7 | 9 | 50 | 10 | 40 | 18 | 27 | 0 | 35 | 13 | 27 | 2 | 1 | 1 | 8 |
6,838 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/item.py
|
wpull.pipeline.item.URLResult
|
class URLResult(URLDatabaseMixin):
'''Data associated with the fetched URL.
status_code (int): The HTTP or FTP status code.
filename (str): The path to where the file was saved.
'''
database_attributes = ('status_code', 'filename')
def __init__(self):
self.status_code = None
self.filename = None
|
class URLResult(URLDatabaseMixin):
'''Data associated with the fetched URL.
status_code (int): The HTTP or FTP status code.
filename (str): The path to where the file was saved.
'''
def __init__(self):
pass
| 2 | 1 | 3 | 0 | 3 | 0 | 1 | 0.8 | 1 | 0 | 0 | 1 | 1 | 2 | 1 | 2 | 11 | 2 | 5 | 5 | 3 | 4 | 5 | 5 | 3 | 1 | 1 | 0 | 1 |
6,839 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/item.py
|
wpull.pipeline.item.URLRecord
|
class URLRecord(URLProperties, URLData, URLResult):
'''An entry in the URL table describing a URL to be downloaded.
Attributes:
url (str): The URL.
'''
def __init__(self):
super().__init__()
self.url = None
@property
def url_info(self) -> URLInfo:
'''Return URL Info for this URL'''
return URLInfo.parse(self.url)
|
class URLRecord(URLProperties, URLData, URLResult):
'''An entry in the URL table describing a URL to be downloaded.
Attributes:
url (str): The URL.
'''
def __init__(self):
pass
@property
def url_info(self) -> URLInfo:
'''Return URL Info for this URL'''
pass
| 4 | 2 | 3 | 0 | 3 | 1 | 1 | 0.71 | 3 | 2 | 1 | 0 | 2 | 1 | 2 | 8 | 14 | 2 | 7 | 5 | 3 | 5 | 6 | 4 | 3 | 1 | 2 | 0 | 2 |
6,840 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/item.py
|
wpull.pipeline.item.URLProperties
|
class URLProperties(URLDatabaseMixin):
'''URL properties that determine whether a URL is fetched.
Attributes:
parent_url (str): The parent or referral URL that linked to this URL.
root_url (str): The earliest ancestor URL of this URL. This URL
is typically the URL supplied at the start of the program.
status (Status): Processing status of this URL.
try_count (int): The number of attempts on this URL.
level (int): The recursive depth of this URL. A level of ``0``
indicates the URL was initially supplied to the program (the
top URL).
Level ``1`` means the URL was linked from the top URL.
inline_level (int): Whether this URL was an embedded object (such as an
image or a stylesheet) of the parent URL.
The value represents the recursive depth of the object. For
example, an iframe is depth 1 and the images in the iframe
is depth 2.
link_type (LinkType): Describes the expected document type.
'''
database_attributes = ('parent_url', 'root_url', 'status', 'try_count',
'level', 'inline_level', 'link_type', 'priority')
def __init__(self):
self.parent_url = None
self.root_url = None
self.status = None
self.try_count = None
self.level = None
self.inline_level = None
self.link_type = None
self.priority = None
@property
def parent_url_info(self):
'''Return URL Info for the parent URL'''
return URLInfo.parse(self.parent_url)
@property
def root_url_info(self):
'''Return URL Info for the root URL'''
return URLInfo.parse(self.parent_url)
|
class URLProperties(URLDatabaseMixin):
'''URL properties that determine whether a URL is fetched.
Attributes:
parent_url (str): The parent or referral URL that linked to this URL.
root_url (str): The earliest ancestor URL of this URL. This URL
is typically the URL supplied at the start of the program.
status (Status): Processing status of this URL.
try_count (int): The number of attempts on this URL.
level (int): The recursive depth of this URL. A level of ``0``
indicates the URL was initially supplied to the program (the
top URL).
Level ``1`` means the URL was linked from the top URL.
inline_level (int): Whether this URL was an embedded object (such as an
image or a stylesheet) of the parent URL.
The value represents the recursive depth of the object. For
example, an iframe is depth 1 and the images in the iframe
is depth 2.
link_type (LinkType): Describes the expected document type.
'''
def __init__(self):
pass
@property
def parent_url_info(self):
'''Return URL Info for the parent URL'''
pass
@property
def root_url_info(self):
'''Return URL Info for the root URL'''
pass
| 6 | 3 | 5 | 0 | 4 | 1 | 1 | 1.11 | 1 | 1 | 1 | 1 | 3 | 8 | 3 | 4 | 43 | 5 | 18 | 15 | 12 | 20 | 15 | 13 | 11 | 1 | 1 | 0 | 3 |
6,841 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/item.py
|
wpull.pipeline.item.URLDatabaseMixin
|
class URLDatabaseMixin:
def database_items(self):
for name in self.database_attributes:
value = getattr(self, name)
if value is not None:
yield name, value
|
class URLDatabaseMixin:
def database_items(self):
pass
| 2 | 0 | 6 | 1 | 5 | 0 | 3 | 0 | 0 | 0 | 0 | 3 | 1 | 0 | 1 | 1 | 7 | 1 | 6 | 4 | 4 | 0 | 6 | 4 | 4 | 3 | 0 | 2 | 3 |
6,842 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/item.py
|
wpull.pipeline.item.URLData
|
class URLData(URLDatabaseMixin):
'''Data associated fetching the URL.
post_data (str): If given, the URL should be fetched as a
POST request containing `post_data`.
'''
database_attributes = ('post_data',)
def __init__(self):
self.post_data = None
|
class URLData(URLDatabaseMixin):
'''Data associated fetching the URL.
post_data (str): If given, the URL should be fetched as a
POST request containing `post_data`.
'''
def __init__(self):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 1 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 2 | 10 | 2 | 4 | 4 | 2 | 4 | 4 | 4 | 2 | 1 | 1 | 0 | 1 |
6,843 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/item.py
|
wpull.pipeline.item.Status
|
class Status(enum.Enum):
'''URL status.'''
todo = 'todo'
'''The item has not yet been processed.'''
in_progress = 'in_progress'
'''The item is in progress of being processed.'''
done = 'done'
'''The item has been processed successfully.'''
error = 'error'
'''The item encountered an error during processing.'''
skipped = 'skipped'
'''The item was excluded from processing due to some rejection filters.'''
|
class Status(enum.Enum):
'''URL status.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 12 | 0 | 6 | 6 | 5 | 6 | 6 | 6 | 5 | 0 | 4 | 0 | 0 |
6,844 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/item.py
|
wpull.pipeline.item.LinkType
|
class LinkType(enum.Enum):
'''The type of contents that a link is expected to have.'''
html = 'html'
'''HTML document.'''
css = 'css'
'''Stylesheet file. Recursion on links is usually safe.'''
javascript = 'javascript'
'''JavaScript file. Possible to recurse links on this file.'''
media = 'media'
'''Image or video file. Recursion on this type will not be useful.'''
sitemap = 'sitemap'
'''A Sitemap.xml file.'''
file = 'file'
'''FTP File.'''
directory = 'directory'
'''FTP directory.'''
|
class LinkType(enum.Enum):
'''The type of contents that a link is expected to have.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 16 | 0 | 8 | 8 | 7 | 8 | 8 | 8 | 7 | 0 | 4 | 0 | 0 |
6,845 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/observer_test.py
|
wpull.observer_test.TestObserver
|
class TestObserver(unittest.TestCase):
def test_observer(self):
observer = Observer()
self.assertEqual(0, observer.count())
# Check for no crash
observer.notify()
observer.clear()
self.assertRaises(KeyError, observer.remove, 'no exist')
values = {}
def func(value):
values['value'] = value
observer.add(func)
self.assertEqual(1, observer.count())
observer.notify('a')
self.assertEqual('a', values['value'])
observer.clear()
observer.notify()
self.assertEqual(0, observer.count())
|
class TestObserver(unittest.TestCase):
def test_observer(self):
pass
def func(value):
pass
| 3 | 0 | 15 | 6 | 9 | 1 | 1 | 0.06 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 73 | 29 | 11 | 17 | 5 | 14 | 1 | 17 | 5 | 14 | 1 | 2 | 0 | 2 |
6,846 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/path.py
|
wpull.path.PathNamer
|
class PathNamer(BasePathNamer):
'''Path namer that creates a directory hierarchy based on the URL.
Args:
root (str): The base path.
index (str): The filename to use when the URL path does not indicate
one.
use_dir (bool): Include directories based on the URL path.
cut (int): Number of leading directories to cut from the file path.
protocol (bool): Include the URL scheme in the directory structure.
hostname (bool): Include the hostname in the directory structure.
safe_filename_args (dict): Keyword arguments for `safe_filename`.
See also: :func:`url_to_filename`, :func:`url_to_dir_path`,
:func:`safe_filename`.
'''
def __init__(self, root, index='index.html', use_dir=False, cut=None,
protocol=False, hostname=False, os_type='unix',
no_control=True, ascii_only=True,
case=None, max_filename_length=None):
self._root = root
self._index = index
self._cut = cut
self._protocol = protocol
self._hostname = hostname
self._use_dir = use_dir
self._os_type = os_type
self._no_control = no_control
self._ascii_only = ascii_only
self._case = case
self._max_filename_length = max_filename_length
if os.path.isfile(root):
raise IOError('Root cannot be a file.')
def get_filename(self, url_info):
url = url_info.url
alt_char = self._os_type == 'windows'
parts = []
if self._use_dir:
dir_parts = url_to_dir_parts(
url, self._protocol, self._hostname, alt_char=alt_char
)
for dummy in range(self._cut or 0):
if dir_parts:
del dir_parts[0]
parts.extend(dir_parts)
parts.append(url_to_filename(
url,
'.listing' if url_info.scheme == 'ftp' else self._index,
alt_char=alt_char
))
if url_info.scheme == 'ftp':
parts = [urllib.parse.unquote(part) for part in parts]
parts = [self.safe_filename(part) for part in parts]
return os.path.join(self._root, *parts)
def safe_filename(self, part):
'''Return a safe filename or file part.'''
return safe_filename(
part,
os_type=self._os_type, no_control=self._no_control,
ascii_only=self._ascii_only, case=self._case,
max_length=self._max_filename_length,
)
|
class PathNamer(BasePathNamer):
'''Path namer that creates a directory hierarchy based on the URL.
Args:
root (str): The base path.
index (str): The filename to use when the URL path does not indicate
one.
use_dir (bool): Include directories based on the URL path.
cut (int): Number of leading directories to cut from the file path.
protocol (bool): Include the URL scheme in the directory structure.
hostname (bool): Include the hostname in the directory structure.
safe_filename_args (dict): Keyword arguments for `safe_filename`.
See also: :func:`url_to_filename`, :func:`url_to_dir_path`,
:func:`safe_filename`.
'''
def __init__(self, root, index='index.html', use_dir=False, cut=None,
protocol=False, hostname=False, os_type='unix',
no_control=True, ascii_only=True,
case=None, max_filename_length=None):
pass
def get_filename(self, url_info):
pass
def safe_filename(self, part):
'''Return a safe filename or file part.'''
pass
| 4 | 2 | 18 | 3 | 15 | 0 | 3 | 0.3 | 1 | 1 | 0 | 0 | 3 | 11 | 3 | 24 | 72 | 12 | 46 | 23 | 39 | 14 | 32 | 20 | 28 | 6 | 4 | 3 | 9 |
6,847 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/path.py
|
wpull.path.PercentEncoder
|
class PercentEncoder(collections.defaultdict):
'''Percent encoder.'''
# The percent-encoder was inspired from urllib.parse
def __init__(self, unix=False, control=False, windows=False, ascii_=False):
super().__init__()
self.unix = unix
self.control = control
self.windows = windows
self.ascii = ascii_
def __missing__(self, char):
assert isinstance(char, bytes), \
'Expect bytes. Got {}.'.format(type(char))
char_num = ord(char)
if ((self.unix and char == b'/')
or (self.control and
(0 <= char_num <= 31 or
self.ascii and 128 <= char_num <= 159))
or (self.windows and char in br'\|/:?"*<>')
or (self.ascii and char_num > 127)):
value = b'%' + base64.b16encode(char)
else:
value = char
self[char] = value
return value
def quote(self, bytes_string):
quoter = self.__getitem__
return b''.join(
[quoter(bytes_string[i:i + 1]) for i in range(len(bytes_string))]
)
|
class PercentEncoder(collections.defaultdict):
'''Percent encoder.'''
def __init__(self, unix=False, control=False, windows=False, ascii_=False):
pass
def __missing__(self, char):
pass
def quote(self, bytes_string):
pass
| 4 | 1 | 10 | 1 | 9 | 0 | 1 | 0.07 | 1 | 4 | 0 | 0 | 3 | 4 | 3 | 3 | 34 | 5 | 27 | 11 | 23 | 2 | 18 | 11 | 14 | 2 | 1 | 1 | 4 |
6,848 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/path_test.py
|
wpull.path_test.TestPath
|
class TestPath(unittest.TestCase, TempDirMixin):
def setUp(self):
self.set_up_temp_dir()
def tearDown(self):
self.tear_down_temp_dir()
def test_url_to_dir_parts(self):
self.assertEqual(
['blog'],
url_to_dir_parts('http://example.com/blog/')
)
self.assertEqual(
['blog'],
url_to_dir_parts('http://example.com/blog/image.png')
)
self.assertEqual(
['example.com', 'blog'],
url_to_dir_parts(
'http://example.com/blog/image.png', include_hostname=True
)
)
self.assertEqual(
[],
url_to_dir_parts('http://example.com/')
)
self.assertEqual(
['example.com:123'],
url_to_dir_parts(
'http://example.com:123/',
include_hostname=True, alt_char=False,
)
)
self.assertEqual(
['example.com+123'],
url_to_dir_parts(
'http://example.com:123/',
include_hostname=True, alt_char=True,
)
)
def test_url_to_filename(self):
self.assertEqual(
'image.png',
url_to_filename('http://example.com/blog/image.png')
)
self.assertEqual(
'index.html',
url_to_filename('http://example.com/blog/')
)
self.assertEqual(
'index.html',
url_to_filename('http://example.com/')
)
self.assertEqual(
'index.html?blah=',
url_to_filename('http://example.com/?blah=')
)
self.assertEqual(
'index.html@blah=',
url_to_filename('http://example.com/?blah=', alt_char=True)
)
def test_safe_filename(self):
self.assertEqual(
'asdf',
safe_filename(
'asdf',
os_type='unix', no_control=True, ascii_only=True, case=None
)
)
self.assertEqual(
'asdf%00',
safe_filename(
'asdf\x00',
os_type='unix', no_control=True, ascii_only=True, case=None
)
)
self.assertEqual(
'asdf%F0%9F%92%8E',
safe_filename(
'asdf💎',
os_type='unix', no_control=True, ascii_only=True, case=None
)
)
self.assertEqual(
'asdf💎',
safe_filename(
'asdf💎',
os_type='unix', no_control=True, ascii_only=False, case=None
)
)
self.assertEqual(
'asdf%3a',
safe_filename(
'Asdf:',
os_type='windows', no_control=True, ascii_only=True,
case='lower'
)
)
self.assertEqual(
'A%C3%A9',
safe_filename(
'aé',
os_type='windows', no_control=True, ascii_only=True,
case='upper',
)
)
self.assertEqual(
'%C3%A1bcdefgf29053e2',
safe_filename(
'ábcdefghij123456789012345678901234567890',
max_length=20,
)
)
def test_anti_clobber_dir_path(self):
with self.cd_tempdir():
self.assertEqual(
'a',
anti_clobber_dir_path('./a/')
)
with self.cd_tempdir():
self.assertEqual(
'a/b/c/d/e/f/g',
anti_clobber_dir_path('a/b/c/d/e/f/g/')
)
with self.cd_tempdir():
self.assertEqual(
'a/b/c/d/e/f/g',
anti_clobber_dir_path('a/b/c/d/e/f/g')
)
with self.cd_tempdir():
with open('a', 'w'):
pass
self.assertEqual(
'a.d/b/c/d/e/f/g',
anti_clobber_dir_path('a/b/c/d/e/f/g')
)
with self.cd_tempdir():
os.makedirs('a/b')
with open('a/b/c', 'w'):
pass
self.assertEqual(
'a/b/c.d/d/e/f/g',
anti_clobber_dir_path('a/b/c/d/e/f/g')
)
with self.cd_tempdir():
os.makedirs('a/b')
with open('a/b/c', 'w'):
pass
self.assertEqual(
os.path.abspath('a/b/c.d/d/e/f/g'),
anti_clobber_dir_path(os.path.abspath('a/b/c/d/e/f/g'))
)
with self.cd_tempdir():
os.makedirs('a/b/c/d/e/f')
with open('a/b/c/d/e/f/g', 'w'):
pass
self.assertEqual(
'a/b/c/d/e/f/g.d',
anti_clobber_dir_path('a/b/c/d/e/f/g')
)
def test_parse_content_disposition(self):
self.assertEqual(
'hello.txt',
parse_content_disposition('attachment; filename=hello.txt')
)
self.assertEqual(
'hello.txt',
parse_content_disposition(
'attachment; filename=hello.txt; filename*=blahblah')
)
self.assertEqual(
'hello.txt',
parse_content_disposition(
'attachment; filename=hello.txt ;filename*=blahblah')
)
self.assertEqual(
'hello.txt',
parse_content_disposition('attachment; filename="hello.txt"')
)
self.assertEqual(
'hello.txt',
parse_content_disposition('attachment; filename="hello.txt" ;')
)
self.assertEqual(
'hello world',
parse_content_disposition('attachment; filename="hello world"')
)
self.assertEqual(
'hello world',
parse_content_disposition('attachment; filename="hello world"')
)
self.assertEqual(
'hello world',
parse_content_disposition("attachment; filename='hello world'")
)
self.assertEqual(
'hello"world',
parse_content_disposition('attachment; filename="hello\\"world"')
)
self.assertEqual(
'\'hello"world\'',
parse_content_disposition('attachment; filename="\'hello\\"world\'"')
)
self.assertEqual(
'\'hello"world\'',
parse_content_disposition(
'attachment; filename="\'hello\\"world\'";')
)
self.assertFalse(
parse_content_disposition('attachment; filename=')
)
self.assertFalse(
parse_content_disposition('attachment; filename=""')
)
self.assertFalse(
parse_content_disposition('attachment; filename=";')
)
self.assertFalse(
parse_content_disposition('attachment; filename=\'aaa')
)
self.assertFalse(
parse_content_disposition('attachment; filename="aaa')
)
|
class TestPath(unittest.TestCase, TempDirMixin):
def setUp(self):
pass
def tearDown(self):
pass
def test_url_to_dir_parts(self):
pass
def test_url_to_filename(self):
pass
def test_safe_filename(self):
pass
def test_anti_clobber_dir_path(self):
pass
def test_parse_content_disposition(self):
pass
| 8 | 0 | 33 | 1 | 31 | 0 | 1 | 0 | 2 | 0 | 0 | 0 | 7 | 0 | 7 | 82 | 237 | 16 | 221 | 8 | 213 | 0 | 69 | 8 | 61 | 1 | 2 | 2 | 7 |
6,849 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/app.py
|
wpull.pipeline.app.AppSession
|
class AppSession(object):
def __init__(self, factory: Factory, args, stderr):
self.default_user_agent = 'Wpull/{0} (gzip)'.format(
wpull.version.__version__)
self.factory = factory
self.args = args
self.stderr = stderr
self.ca_certs_filename = None
self.console_log_handler = None
self.file_log_handler = None
self.resource_monitor_semaphore = asyncio.BoundedSemaphore(1)
self.ssl_context = None
self.async_servers = []
self.background_async_tasks = []
self.proxy_server_port = None
self.plugin_manager = None
self.root_path = args.directory_prefix
|
class AppSession(object):
def __init__(self, factory: Factory, args, stderr):
pass
| 2 | 0 | 16 | 0 | 16 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 14 | 1 | 1 | 17 | 0 | 17 | 16 | 15 | 0 | 16 | 16 | 14 | 1 | 1 | 0 | 1 |
6,850 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/pipeline.py
|
wpull.pipeline.pipeline.PipelineSeries
|
class PipelineSeries(object):
def __init__(self, pipelines: Iterator[Pipeline]):
self._pipelines = tuple(pipelines)
self._concurrency = 1
self._concurrency_pipelines = set()
@property
def pipelines(self) -> Tuple[Pipeline]:
return self._pipelines
@property
def concurrency(self) -> int:
return self._concurrency
@concurrency.setter
def concurrency(self, new_concurrency: int):
self._concurrency = new_concurrency
for pipeline in self._pipelines:
if pipeline in self._concurrency_pipelines:
pipeline.concurrency = new_concurrency
@property
def concurrency_pipelines(self) -> Set[Pipeline]:
return self._concurrency_pipelines
|
class PipelineSeries(object):
def __init__(self, pipelines: Iterator[Pipeline]):
pass
@property
def pipelines(self) -> Tuple[Pipeline]:
pass
@property
def concurrency(self) -> int:
pass
@concurrency.setter
def concurrency(self) -> int:
pass
@property
def concurrency_pipelines(self) -> Set[Pipeline]:
pass
| 10 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 4 | 1 | 0 | 5 | 3 | 5 | 5 | 25 | 5 | 20 | 14 | 10 | 0 | 16 | 10 | 10 | 3 | 1 | 2 | 7 |
6,851 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/pipeline/app.py
|
wpull.pipeline.app.AppSource
|
class AppSource(ItemSource[AppSession]):
def __init__(self, session: AppSession):
self._source = session
@asyncio.coroutine
def get_item(self) -> Optional[AppSession]:
item = self._source
self._source = None
return item
|
class AppSource(ItemSource[AppSession]):
def __init__(self, session: AppSession):
pass
@asyncio.coroutine
def get_item(self) -> Optional[AppSession]:
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 2 | 25 | 9 | 1 | 8 | 6 | 4 | 0 | 7 | 5 | 4 | 1 | 4 | 0 | 2 |
6,852 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/coprocessor/phantomjs.py
|
wpull.processor.coprocessor.phantomjs.PhantomJSCoprocessorSession
|
class PhantomJSCoprocessorSession(object):
'''PhantomJS coprocessor session.'''
def __init__(self, phantomjs_driver_factory, root_path,
processing_rule, file_writer_session,
request, response,
item_session: ItemSession, params, warc_recorder):
self._phantomjs_driver_factory = phantomjs_driver_factory
self._root_path = root_path
self._processing_rule = processing_rule
self._file_writer_session = file_writer_session
self._request = request
self._response = response
self._item_session = item_session
self._params = params
self._warc_recorder = warc_recorder
self._temp_filenames = []
self._action_warc_record = None
@asyncio.coroutine
def run(self):
scrape_snapshot_path = self._get_temp_path('phantom', suffix='.html')
action_log_path = self._get_temp_path('phantom-action', suffix='.txt')
event_log_path = self._get_temp_path('phantom-event', suffix='.txt')
snapshot_paths = [scrape_snapshot_path]
snapshot_paths.extend(self._get_snapshot_paths())
url = self._item_session.url_record.url
driver_params = PhantomJSDriverParams(
url=url,
snapshot_paths=snapshot_paths,
wait_time=self._params.wait_time,
num_scrolls=self._params.num_scrolls,
smart_scroll=self._params.smart_scroll,
snapshot=self._params.snapshot,
viewport_size=self._params.viewport_size,
paper_size=self._params.paper_size,
event_log_filename=event_log_path,
action_log_filename=action_log_path,
custom_headers=self._params.custom_headers,
page_settings=self._params.page_settings,
)
driver = self._phantomjs_driver_factory(params=driver_params)
_logger.info(__(
_('PhantomJS fetching ‘{url}’.'),
url=url
))
with contextlib.closing(driver):
yield from driver.start()
# FIXME: we don't account that things might be scrolling and
# downloading so it might not be a good idea to timeout like
# this
if self._params.load_time:
yield from asyncio.wait_for(
driver.process.wait(), self._params.load_time
)
else:
yield from driver.process.wait()
if driver.process.returncode != 0:
raise PhantomJSCrashed(
'PhantomJS exited with code {}'
.format(driver.process.returncode)
)
if self._warc_recorder:
self._add_warc_action_log(action_log_path, url)
for path in snapshot_paths:
self._add_warc_snapshot(path, url)
_logger.info(__(
_('PhantomJS fetched ‘{url}’.'),
url=url
))
def _get_temp_path(self, hint, suffix='.tmp'):
temp_fd, temp_path = tempfile.mkstemp(
dir=self._root_path, prefix='tmp-wpull-{}'.format(hint), suffix=suffix
)
os.close(temp_fd)
self._temp_filenames.append(temp_path)
return temp_path
def _get_snapshot_paths(self, infix='snapshot'):
for snapshot_type in self._params.snapshot_types or ():
path = self._file_writer_session.extra_resource_path(
'.{infix}.{file_type}'
.format(infix=infix, file_type=snapshot_type)
)
if not path:
temp_fd, temp_path = tempfile.mkstemp(
dir=self._root_path, prefix='tmp-phnsh',
suffix='.{}'.format(snapshot_type)
)
os.close(temp_fd)
path = temp_path
self._temp_filenames.append(temp_path)
yield path
def _add_warc_action_log(self, path, url):
'''Add the action log to the WARC file.'''
_logger.debug('Adding action log record.')
actions = []
with open(path, 'r', encoding='utf-8', errors='replace') as file:
for line in file:
actions.append(json.loads(line))
log_data = json.dumps(
{'actions': actions},
indent=4,
).encode('utf-8')
self._action_warc_record = record = WARCRecord()
record.set_common_fields('metadata', 'application/json')
record.fields['WARC-Target-URI'] = 'urn:X-wpull:snapshot?url={0}' \
.format(wpull.url.percent_encode_query_value(url))
record.block_file = io.BytesIO(log_data)
self._warc_recorder.set_length_and_maybe_checksums(record)
self._warc_recorder.write_record(record)
def _add_warc_snapshot(self, filename, url):
'''Add the snaphot to the WARC file.'''
_logger.debug('Adding snapshot record.')
extension = os.path.splitext(filename)[1]
content_type = {
'.pdf': 'application/pdf',
'.html': 'text/html',
'.png': 'image/png',
'.gif': 'image/gif'
}[extension]
record = WARCRecord()
record.set_common_fields('resource', content_type)
record.fields['WARC-Target-URI'] = 'urn:X-wpull:snapshot?url={0}' \
.format(wpull.url.percent_encode_query_value(url))
if self._action_warc_record:
record.fields['WARC-Concurrent-To'] = \
self._action_warc_record.fields[WARCRecord.WARC_RECORD_ID]
with open(filename, 'rb') as in_file:
record.block_file = in_file
self._warc_recorder.set_length_and_maybe_checksums(record)
self._warc_recorder.write_record(record)
def _scrape_document(self):
'''Extract links from the DOM.'''
mock_response = self._new_mock_response(
self._response, self._get_temp_path('phantom', '.html')
)
self._item_session.request = self._request
self._item_session.response = mock_response
self._processing_rule.scrape_document(item_session)
if mock_response.body:
mock_response.body.close()
def _new_mock_response(self, response, file_path):
'''Return a new mock Response with the content.'''
mock_response = copy.copy(response)
mock_response.body = Body(open(file_path, 'rb'))
mock_response.fields = NameValueRecord()
for name, value in response.fields.get_all():
mock_response.fields.add(name, value)
mock_response.fields['Content-Type'] = 'text/html; charset="utf-8"'
return mock_response
def close(self):
'''Clean up.'''
for path in self._temp_filenames:
if os.path.exists(path):
os.remove(path)
|
class PhantomJSCoprocessorSession(object):
'''PhantomJS coprocessor session.'''
def __init__(self, phantomjs_driver_factory, root_path,
processing_rule, file_writer_session,
request, response,
item_session: ItemSession, params, warc_recorder):
pass
@asyncio.coroutine
def run(self):
pass
def _get_temp_path(self, hint, suffix='.tmp'):
pass
def _get_snapshot_paths(self, infix='snapshot'):
pass
def _add_warc_action_log(self, path, url):
'''Add the action log to the WARC file.'''
pass
def _add_warc_snapshot(self, filename, url):
'''Add the snaphot to the WARC file.'''
pass
def _scrape_document(self):
'''Extract links from the DOM.'''
pass
def _new_mock_response(self, response, file_path):
'''Return a new mock Response with the content.'''
pass
def close(self):
'''Clean up.'''
pass
| 11 | 6 | 20 | 3 | 16 | 1 | 2 | 0.06 | 1 | 6 | 5 | 0 | 9 | 11 | 9 | 9 | 188 | 35 | 144 | 50 | 130 | 9 | 94 | 43 | 84 | 5 | 1 | 2 | 21 |
6,853 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/coprocessor/proxy.py
|
wpull.processor.coprocessor.proxy.ProxyCoprocessor
|
class ProxyCoprocessor(object):
'''Proxy coprocessor.'''
def __init__(self, app_session: AppSession):
self._app_session = app_session
proxy_server = cast(HTTPProxyServer,
self._app_session.factory['HTTPProxyServer'])
proxy_server.event_dispatcher.add_listener(
HTTPProxyServer.Event.begin_session,
self._proxy_server_session_callback)
def _proxy_server_session_callback(self, session: HTTPProxySession):
ProxyCoprocessorSession(self._app_session, session)
|
class ProxyCoprocessor(object):
'''Proxy coprocessor.'''
def __init__(self, app_session: AppSession):
pass
def _proxy_server_session_callback(self, session: HTTPProxySession):
pass
| 3 | 1 | 5 | 1 | 5 | 0 | 1 | 0.1 | 1 | 5 | 5 | 0 | 2 | 1 | 2 | 2 | 13 | 2 | 10 | 5 | 7 | 1 | 7 | 5 | 4 | 1 | 1 | 0 | 2 |
6,854 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/request.py
|
wpull.protocol.ftp.request.Command
|
class Command(SerializableMixin, DictableMixin):
'''FTP request command.
Encoding is UTF-8.
Attributes:
name (str): The command. Usually 4 characters or less.
argument (str): Optional argument for the command.
'''
def __init__(self, name=None, argument=''):
self._name = None
if name:
self.name = name
self.argument = argument
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value.upper()
def parse(self, data):
assert self.name is None
assert not self.argument
match = re.match(br'(\w+) ?([^\r\n]*)', data)
if not match:
raise ProtocolError('Failed to parse command.')
self.name = match.group(1).decode('utf-8', errors='surrogateescape')
self.argument = match.group(2).decode('utf-8', errors='surrogateescape')
def to_bytes(self):
return '{0} {1}\r\n'.format(self.name, self.argument).encode(
'utf-8', errors='surrogateescape')
def to_dict(self):
return {
'name': self.name,
'argument': self.argument,
}
|
class Command(SerializableMixin, DictableMixin):
'''FTP request command.
Encoding is UTF-8.
Attributes:
name (str): The command. Usually 4 characters or less.
argument (str): Optional argument for the command.
'''
def __init__(self, name=None, argument=''):
pass
@property
def name(self):
pass
@name.setter
def name(self):
pass
def parse(self, data):
pass
def to_bytes(self):
pass
def to_dict(self):
pass
| 9 | 1 | 5 | 1 | 4 | 0 | 1 | 0.21 | 2 | 1 | 1 | 0 | 6 | 2 | 6 | 10 | 46 | 12 | 28 | 12 | 19 | 6 | 22 | 10 | 15 | 2 | 2 | 1 | 8 |
6,855 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/url_test.py
|
wpull.url_test.TestURL
|
class TestURL(unittest.TestCase):
@unittest.skip('experiment only')
def test_lib_vs_wpull(self):
result_1 = timeit.timeit('''
from urllib.parse import urlsplit
for i in range(1000):
urlsplit('http://donkey{i}.com/waffles{i}'.format(i=i))
''', number=100)
result_2 = timeit.timeit('''
from wpull.url import URLInfo
parse = URLInfo.parse
for i in range(1000):
parse('http://donkey{i}.com/waffles{i}'.format(i=i))
''', number=100)
print(result_1, result_2)
def test_url_info_naked(self):
self.assertEqual(
'http://example.com/',
URLInfo.parse('Example.Com').url
)
self.assertEqual(
'http://example.com/',
URLInfo.parse('//example.com').url
)
self.assertEqual(
'http://example.com/Blah',
URLInfo.parse('//example.com/Blah').url
)
url_info = URLInfo.parse('example.com:8080')
self.assertEqual('http://example.com:8080/', url_info.url)
self.assertEqual('example.com:8080', url_info.hostname_with_port)
self.assertEqual(8080, url_info.port)
url_info = URLInfo.parse('localhost:8080/A/b/C:')
self.assertEqual('http://localhost:8080/A/b/C:', url_info.url)
self.assertEqual('localhost:8080', url_info.hostname_with_port)
self.assertEqual(8080, url_info.port)
self.assertEqual(
'http://example.com/Asdf',
URLInfo.parse('example.com/Asdf#Blah').url
)
self.assertEqual(
'http://example.com/asdf/Ghjk',
URLInfo.parse('example.com/asdf/Ghjk#blah').url
)
self.assertEqual(
'http://example.com/',
URLInfo.parse('example.com/').url
)
self.assertEqual(
'https://example.com/',
URLInfo.parse('https://example.com').url
)
def test_url_info_parts(self):
url_info = URLInfo.parse(
'HTTP://userName:pass%3Aword@[A::1]:81/ásdF\u200C/ghjK?a=b=c&D#/?')
self.assertEqual(
'http://userName:pass:word@[a::1]:81/'
'%C3%A1sdF%E2%80%8C/ghjK?a=b=c&D',
url_info.url
)
self.assertEqual('http', url_info.scheme)
self.assertEqual('userName:pass%3Aword@[A::1]:81',
url_info.authority)
self.assertEqual('/ásdF\u200C/ghjK?a=b=c&D#/?', url_info.resource)
self.assertEqual('userName', url_info.username)
self.assertEqual('pass:word', url_info.password)
self.assertEqual('[A::1]:81', url_info.host)
self.assertEqual('[a::1]:81', url_info.hostname_with_port)
self.assertEqual('a::1', url_info.hostname)
self.assertEqual(81, url_info.port)
self.assertEqual('/%C3%A1sdF%E2%80%8C/ghjK', url_info.path)
self.assertEqual('a=b=c&D', url_info.query)
self.assertEqual('/?', url_info.fragment)
self.assertEqual('utf-8', url_info.encoding)
self.assertEqual(
'HTTP://userName:pass%3Aword@[A::1]:81/ásdF\u200C/ghjK?a=b=c&D#/?',
url_info.raw)
self.assertEqual(('/%C3%A1sdF%E2%80%8C', 'ghjK'),
url_info.split_path())
url_info = URLInfo.parse(
'Ftp://N00B:hunter2@LocalHost.Example/mydocs/'
)
self.assertEqual('ftp', url_info.scheme)
self.assertEqual('N00B:hunter2@LocalHost.Example',
url_info.authority)
self.assertEqual('/mydocs/', url_info.resource)
self.assertEqual('N00B', url_info.username)
self.assertEqual('hunter2', url_info.password)
self.assertEqual('LocalHost.Example', url_info.host)
self.assertEqual('localhost.example', url_info.hostname_with_port)
self.assertEqual('localhost.example', url_info.hostname)
self.assertEqual(21, url_info.port)
self.assertEqual('/mydocs/', url_info.path)
self.assertFalse(url_info.query)
self.assertFalse(url_info.fragment)
self.assertEqual('utf-8', url_info.encoding)
self.assertEqual(
'Ftp://N00B:hunter2@LocalHost.Example/mydocs/',
url_info.raw)
self.assertEqual(('/mydocs', ''), url_info.split_path())
def test_url_info_default_port(self):
self.assertEqual(
80,
URLInfo.parse('http://example.com').port
)
self.assertEqual(
443,
URLInfo.parse('https://example.com').port
)
self.assertEqual(
'example.com',
URLInfo.parse('http://example.com').hostname_with_port
)
self.assertEqual(
'example.com',
URLInfo.parse('https://example.com').hostname_with_port
)
self.assertEqual(
'http://example.com/',
URLInfo.parse('http://example.com:80').url
)
def test_url_info_percent_encode(self):
self.assertEqual(
'http://example.com/%C3%B0',
URLInfo.parse('http://example.com/ð').url
)
self.assertEqual(
'http://example.com/blah%20blah/',
URLInfo.parse('example.com/blah blah/').url
)
self.assertEqual(
'http://example.com/blah%20blah/',
URLInfo.parse('example.com/blah%20blah/').url
)
self.assertEqual(
'http://www.xn--hda.com/asdf',
URLInfo.parse('www.ð.com/asdf').url
)
self.assertEqual(
'www.xn--hda.com',
URLInfo.parse('www.ð.com/asdf').hostname
)
self.assertEqual(
'www.xn--hda.com',
URLInfo.parse('www.xn--hda.com/asdf').hostname
)
self.assertEqual(
'http://example.com/?blah=%C3%B0',
URLInfo.parse('example.com?blah=ð').url
)
self.assertEqual(
'http://example.com/?blah=%C3%B0',
URLInfo.parse('example.com?blah=%c3%b0').url
)
url_info = URLInfo.parse('example.com/文字化け/?blah=文字化け',
encoding='shift_jis')
self.assertEqual(
'http://example.com/%95%B6%8E%9A%89%BB%82%AF/'
'?blah=%95%B6%8E%9A%89%BB%82%AF',
url_info.url
)
self.assertEqual(
'/%95%B6%8E%9A%89%BB%82%AF/',
url_info.path
)
self.assertEqual(
'blah=%95%B6%8E%9A%89%BB%82%AF',
url_info.query
)
self.assertEqual(
'shift_jis',
url_info.encoding
)
self.assertEqual(
'http://example.com/%95%B6%8E%9A%89%BB%82%AF/'
'?blah=%95%B6%8E%9A%89%BB%82%AF',
URLInfo.parse('example.com/%95%B6%8E%9A%89%BB%82%AF/'
'?blah=%95%B6%8E%9A%89%BB%82%AF',
encoding='shift_jis').url
)
self.assertEqual(
'http://example.com/%95%B6%8E%9A%89%BB%82%AF/'
'?blah=%95%B6%8E%9A%89%BB%82%AF',
URLInfo.parse('example.com/%95%B6%8E%9A%89%BB%82%AF/'
'?blah=%95%B6%8E%9A%89%BB%82%AF').url
)
self.assertEqual(
'http://example.com/'
'?blah=http%3A%2F%2Fexample.com%2F%3Ffail%3Dtrue',
URLInfo.parse(
'http://example.com/'
'?blah=http%3A%2F%2Fexample.com%2F%3Ffail%3Dtrue').url
)
self.assertEqual(
'http://example.com/'
'?blah=http://example.com/?fail%3Dtrue',
URLInfo.parse(
'http://example.com/'
'?blah=http://example.com/?fail%3Dtrue').url
)
self.assertEqual(
'http://example.com/??blah=blah[0:]=bl%61h?blah%22&d%26_',
URLInfo.parse(
'http://example.com/??blah=blah[0:]=bl%61h?blah"&d%26_').url
)
def test_url_info_not_http(self):
url_info = URLInfo.parse('mailto:user@example.com')
self.assertEqual('mailto:user@example.com', url_info.url)
self.assertEqual('mailto', url_info.scheme)
def test_url_info_invalids(self):
self.assertRaises(ValueError, URLInfo.parse, '')
self.assertRaises(ValueError, URLInfo.parse, '#')
self.assertRaises(ValueError, URLInfo.parse, 'http://')
self.assertRaises(ValueError, URLInfo.parse, 'example....com')
self.assertRaises(ValueError, URLInfo.parse, 'http://example....com')
self.assertRaises(ValueError, URLInfo.parse, 'http://example…com')
self.assertRaises(ValueError, URLInfo.parse, 'http://[34.4kf]::4')
self.assertRaises(ValueError, URLInfo.parse, 'http://[34.4kf::4')
self.assertRaises(ValueError, URLInfo.parse, 'http://dmn3]:3a:45')
self.assertRaises(ValueError, URLInfo.parse, ':38/3')
self.assertRaises(ValueError, URLInfo.parse, 'http://][a:@1]')
self.assertRaises(ValueError, URLInfo.parse, 'http://[[aa]]:4:]6')
self.assertRaises(ValueError, URLInfo.parse, 'http://[a]')
self.assertRaises(ValueError, URLInfo.parse, 'http://[a]')
self.assertRaises(ValueError, URLInfo.parse, 'http://[[a]')
self.assertRaises(ValueError, URLInfo.parse, 'http://[[a]]a]')
self.assertRaises(ValueError, URLInfo.parse, 'http://[[a:a]]')
self.assertRaises(ValueError, URLInfo.parse, 'http:///')
self.assertRaises(ValueError, URLInfo.parse, 'http:///horse')
self.assertRaises(ValueError, URLInfo.parse, 'http://?what?')
self.assertRaises(ValueError, URLInfo.parse, 'http://#egg=wpull')
self.assertRaises(ValueError, URLInfo.parse,
'http://:@example.com:?@/')
self.assertRaises(ValueError, URLInfo.parse, 'http://\x00/')
self.assertRaises(ValueError, URLInfo.parse, 'http:/a')
self.assertRaises(ValueError, URLInfo.parse, 'http://@@example.com/@')
self.assertRaises(
ValueError, URLInfo.parse,
'http://fat32defragmenter.internets::80')
self.assertRaises(
ValueError, URLInfo.parse,
'http://fat32defragmenter.internets:80/')
self.assertRaises(ValueError, URLInfo.parse, 'http:// /spaaaace')
self.assertRaises(
ValueError, URLInfo.parse,
'http://a-long-long-time-ago-the-earth-was-ruled-by-dinosaurs-'
'they-were-big-so-not-a-lot-of-people-went-around-hassling-them-'
'actually-no-people-went-around-hassling-them-'
'because-there-weren-t-any-people-yet-'
'just-the-first-tiny-mammals-'
'basically-life-was-good-'
'lou-it-just-dont-get-no-better-than-this-'
'yeah-'
'then-something-happened-'
'a-giant-meteorite-struck-the-earth-'
'goodbye-dinosaurs-'
'but-what-if-the-dinosaurs-werent-all-destroyed-'
'what-if-the-impact-of-that-meteorite-created-a-parallel-dimension-'
'where-the-dinosaurs-continue-to-thrive-'
'and-evolved-into-intelligent-vicious-aggressive-beings-'
'just-like-us-'
'and-hey-what-if-they-found-their-way-back.movie'
)
self.assertRaises(
ValueError, URLInfo.parse, 'http://[...]/python.xml%22')
self.assertRaises(
ValueError, URLInfo.parse, 'http://[…]/python.xml%22')
self.assertRaises(
ValueError, URLInfo.parse, 'http://[.]/python.xml%22')
self.assertRaises(
ValueError, URLInfo.parse,
'http://wow:99999999999999999999999999999999999999999999999999999'
'9999999999999999999999999999999999999999999999999999999999999999')
self.assertRaises(
ValueError, URLInfo.parse,
'http://wow:-9999999999999999999999999999999999999999999999999999'
'9999999999999999999999999999999999999999999999999999999999999999')
def test_url_info_path_folding(self):
self.assertEqual(
'http://example.com/',
URLInfo.parse('http://example.com/.').url
)
self.assertEqual(
'http://example.com/',
URLInfo.parse('http://example.com/../').url
)
self.assertEqual(
'http://example.com/index.html',
URLInfo.parse('http://example.com/../index.html').url
)
self.assertEqual(
'http://example.com/b/style.css',
URLInfo.parse('http://example.com/a/../../b/style.css').url
)
self.assertEqual(
'http://example.com/a/style.css',
URLInfo.parse('http://example.com/a/b/../style.css').url
)
def test_url_info_reserved_char_is_ok(self):
self.assertEqual(
'http://example.com/@49IMG.DLL/$SESSION$/image.png;large',
URLInfo.parse(
'http://example.com/@49IMG.DLL/$SESSION$/image.png;large').url
)
self.assertEqual(
'http://example.com/@49IMG.DLL/$SESSION$/imag%C3%A9.png;large',
URLInfo.parse(
'http://example.com/@49IMG.DLL/$SESSION$/imagé.png;large').url
)
self.assertEqual(
'http://example.com/$c/%system.exe/',
URLInfo.parse('http://example.com/$c/%system.exe/').url
)
def test_url_info_misleading_parts(self):
self.assertEqual(
'http://example.com/?a',
URLInfo.parse('http://example.com?a').url
)
self.assertEqual(
'http://example.com/?a?',
URLInfo.parse('http://example.com?a?').url
)
self.assertEqual(
'http://example.com/',
URLInfo.parse('http://example.com#a').url
)
self.assertEqual(
'http://example.com/',
URLInfo.parse('http://example.com#a?').url
)
self.assertEqual(
'http://example.com/?a',
URLInfo.parse('http://example.com?a#').url
)
self.assertEqual(
'http://example.com/:10',
URLInfo.parse('http://example.com/:10').url
)
self.assertEqual(
'http://example.com/?@/',
URLInfo.parse('http://:@example.com?@/').url
)
self.assertEqual(
'http://example.com/http:/example.com',
URLInfo.parse('http://:@example.com/http://example.com').url
)
def test_url_info_query(self):
self.assertEqual(
'http://example.com/?a=',
URLInfo.parse('http://example.com?a=').url
)
self.assertEqual(
'http://example.com/?a=1',
URLInfo.parse('http://example.com?a=1').url
)
self.assertEqual(
'http://example.com/?a=1&b',
URLInfo.parse('http://example.com?a=1&b').url
)
self.assertEqual(
'http://example.com/?a=1&b=',
URLInfo.parse('http://example.com?a=1&b=').url
)
def test_url_info_ipv6(self):
self.assertEqual(
'https://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8080/ipv6',
URLInfo.parse(
'https://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8080/ipv6'
).url
)
self.assertEqual(
'[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8080',
URLInfo.parse(
'http://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8080/ipv6'
).hostname_with_port
)
self.assertEqual(
'http://[2001:db8:85a3:8d3:1319:8a2e:370:7348]/ipv6',
URLInfo.parse(
'http://[2001:db8:85a3:8d3:1319:8a2e:370:7348]/ipv6'
).url
)
self.assertEqual(
'[2001:db8:85a3:8d3:1319:8a2e:370:7348]',
URLInfo.parse(
'http://[2001:db8:85a3:8d3:1319:8a2e:370:7348]/ipv6'
).hostname_with_port
)
def test_url_info_trailing_dot(self):
self.assertEqual(
'http://example.com./',
URLInfo.parse('http://example.com./').url
)
self.assertEqual(
'http://example.com.:81/',
URLInfo.parse('http://example.com.:81/').url
)
def test_url_info_usrename_password(self):
self.assertEqual(
'http://UserName@example.com/',
URLInfo.parse('http://UserName@example.com/').url
)
self.assertEqual(
'http://UserName:PassWord@example.com/',
URLInfo.parse('http://UserName:PassWord@example.com/').url
)
self.assertEqual(
'http://:PassWord@example.com/',
URLInfo.parse('http://:PassWord@example.com/').url
)
self.assertEqual(
'http://UserName:Pass:Word@example.com/',
URLInfo.parse('http://UserName:Pass:Word@example.com/').url
)
self.assertEqual(
'http://User%40Name:Pass:Word@example.com/',
URLInfo.parse('http://User%40Name:Pass%3AWord@example.com/').url
)
self.assertEqual(
'http://User%20Name%3A@example.com/',
URLInfo.parse('http://User Name%3A:@example.com/').url
)
def test_url_info_round_trip(self):
urls = [
'http://example.com/blah%20blah/',
'example.com:81?blah=%c3%B0',
'http://example.com/a/../../b/style.css',
'http://example.com/'
'?blah=http%3A%2F%2Fexample.com%2F%3Ffail%3Dtrue',
'http://example.com/??blah=blah[0:]=bl%61h?blah"&d%26_',
'http://[2001:db8:85a3:8d3:1319:8a2e:370:7348]/ipv6',
]
for url in urls:
URLInfo.parse(URLInfo.parse(url).url)
def test_ip_address_normalization(self):
self.assertEqual(
'http://192.0.2.235/',
URLInfo.parse('http://0xC0.0x00.0x02.0xEB').url
)
self.assertEqual(
'http://192.0.2.235/',
URLInfo.parse('http://0300.0000.0002.0353').url
)
self.assertEqual(
'http://192.0.2.235/',
URLInfo.parse('http://0xC00002EB/').url
)
self.assertEqual(
'http://192.0.2.235/',
URLInfo.parse('http://3221226219/').url
)
self.assertEqual(
'http://192.0.2.235/',
URLInfo.parse('http://030000001353/').url
)
self.assertEqual(
'http://[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8080/ipv6',
URLInfo.parse(
'http://[2001:Db8:85a3:8d3:1319:8a2e:370:7348]:8080/ipv6'
).url
)
self.assertEqual(
'http://[::1]/',
URLInfo.parse('http://[0:0:0:0:0:0:0:1]').url
)
self.assertEqual(
'http://[::ffff:c000:280]/',
URLInfo.parse('http://[::ffff:192.0.2.128]/').url
)
def test_url_info_to_dict(self):
url_info = URLInfo.parse('https://example.com/file.jpg')
url_info_dict = url_info.to_dict()
self.assertEqual('/file.jpg', url_info_dict['path'])
self.assertEqual('example.com', url_info_dict['hostname'])
self.assertEqual('https', url_info_dict['scheme'])
self.assertEqual(443, url_info_dict['port'])
self.assertEqual('utf-8', url_info_dict['encoding'])
def test_schemes_simialar(self):
self.assertTrue(schemes_similar('http', 'http'))
self.assertTrue(schemes_similar('https', 'http'))
self.assertTrue(schemes_similar('http', 'https'))
self.assertTrue(schemes_similar('https', 'https'))
self.assertFalse(schemes_similar('ftp', 'http'))
self.assertTrue(schemes_similar('email', 'email'))
def test_is_subdir(self):
self.assertTrue(is_subdir('/profile/blog', '/profile/blog/123'))
self.assertTrue(is_subdir('/profile/blog/', '/profile/blog/123'))
self.assertFalse(is_subdir('/profile/blog', '/profile/photo'))
self.assertTrue(is_subdir('/profile/blog', '/profile/blog/123',
trailing_slash=True))
self.assertTrue(is_subdir('/profile/blog/', '/profile/blog/123',
trailing_slash=True))
self.assertFalse(is_subdir('/profile/blog/', '/profile/photo',
trailing_slash=True))
self.assertTrue(is_subdir('/profile/blog', '/profile/photo',
trailing_slash=True))
self.assertTrue(is_subdir('/profile/blog-*-', '/profile/blog-1-/',
wildcards=True))
self.assertFalse(is_subdir('/profile/blog-*-', '/profile/blog/',
wildcards=True))
self.assertFalse(is_subdir('/profile/blog-*-', '/profile/',
wildcards=True))
def test_split_query(self):
self.assertEqual([],
split_query('&'))
self.assertEqual([('a', 'ð')],
split_query('a=ð'))
self.assertEqual([('a', 'ð')],
split_query('a=ð&b'))
self.assertEqual([('a', 'ð')],
split_query('a=ð&b='))
self.assertEqual([('a', 'ð'), ('b', '')],
split_query('a=ð&b=', keep_blank_values=True))
self.assertEqual([('a', 'ð'), ('b', '%2F')],
split_query('a=ð&b=%2F'))
def test_url_percent_encode(self):
self.assertEqual('a ', percent_decode('a%20'))
self.assertEqual('að', percent_decode('a%C3%B0'))
self.assertEqual('a ', percent_decode_plus('a+'))
self.assertEqual('að', percent_decode_plus('a%C3%B0'))
self.assertEqual('a%20', percent_encode('a '))
self.assertEqual('a%C3%B0', percent_encode('að'))
self.assertEqual('a+', percent_encode_plus('a '))
self.assertEqual('a%C3%B0', percent_encode_plus('að'))
def test_uppercase_percent_encoding(self):
self.assertEqual(
'ð',
uppercase_percent_encoding('ð')
)
self.assertEqual(
'qwerty%%asdf',
uppercase_percent_encoding('qwerty%%asdf')
)
self.assertEqual(
'cAt%2F%EE%AB',
uppercase_percent_encoding('cAt%2f%ee%ab')
)
def test_url_join(self):
self.assertEqual(
'http://example.net',
urljoin('http://example.com', '//example.net')
)
self.assertEqual(
'https://example.net',
urljoin('https://example.com', '//example.net')
)
self.assertEqual(
'http://example.net',
urljoin('http://example.com/', '//example.net')
)
self.assertEqual(
'https://example.net',
urljoin('https://example.com/', '//example.net')
)
self.assertEqual(
'http://example.net/',
urljoin('http://example.com/', '//example.net/')
)
self.assertEqual(
'https://example.net/',
urljoin('https://example.com/', '//example.net/')
)
self.assertEqual(
'https://example.com/asdf',
urljoin('https://example.com/cookies', '/asdf')
)
self.assertEqual(
'http://example.com/asdf',
urljoin('http://example.com/cookies', 'asdf')
)
self.assertEqual(
'http://example.com/cookies/asdf',
urljoin('http://example.com/cookies/', 'asdf')
)
self.assertEqual(
'https://example.net/asdf',
urljoin('https://example.net/', '/asdf')
)
self.assertEqual(
'http://example.net/asdf',
urljoin('https://example.com', 'http://example.net/asdf')
)
self.assertEqual(
'http://example.com/',
urljoin('http://example.com', '//example.com/')
)
self.assertEqual(
'http://example.com/',
urljoin('http://example.com/', '//')
)
self.assertEqual(
'http://example.com/',
urljoin('http://example.com/', '///')
)
self.assertEqual(
'http://example.com/a/style.css',
urljoin('http://example.com/a/', './style.css')
)
self.assertEqual(
'http://example.com/style.css',
urljoin('http://example.com/a/', './../style.css')
)
self.assertEqual(
'sausage',
urljoin('mailto:hotdogbun', 'sausage')
)
self.assertEqual(
'mailto://sausage',
urljoin('mailto:hotdogbun', '//sausage')
)
self.assertEqual(
'hotdogbun://sausage',
urljoin('hotdogbun', '//sausage')
)
def test_flatten_path(self):
self.assertEqual('/', flatten_path(''))
self.assertEqual('//', flatten_path('//'))
self.assertEqual('///', flatten_path('///'))
self.assertEqual('/http://', flatten_path('/http://'))
self.assertEqual('/', flatten_path('//', flatten_slashes=True))
self.assertEqual('/', flatten_path('///', flatten_slashes=True))
self.assertEqual('/http:/', flatten_path('/http://',
flatten_slashes=True))
self.assertEqual('/a', flatten_path('a'))
self.assertEqual('/a/', flatten_path('a/'))
self.assertEqual('/', flatten_path('.'))
self.assertEqual('/', flatten_path('./'))
self.assertEqual('/', flatten_path('/.'))
self.assertEqual('/', flatten_path('/..'))
self.assertEqual('/', flatten_path('../'))
self.assertEqual('/', flatten_path('./.'))
self.assertEqual('/', flatten_path('/'))
self.assertEqual('/', flatten_path('/../../../'))
self.assertEqual('/', flatten_path('/.././'))
self.assertEqual('/a', flatten_path('/../a/../a'))
self.assertEqual('/a/', flatten_path('/../a/../a/'))
self.assertEqual('//a/a/', flatten_path('//a//../a/'))
self.assertEqual('/a//a///a', flatten_path('/a//a///a'))
self.assertEqual('/a/',
flatten_path('//a//../a/', flatten_slashes=True))
self.assertEqual('/a/a/a',
flatten_path('/a//a///a', flatten_slashes=True))
self.assertEqual('/index.html', flatten_path('/./index.html'))
self.assertEqual('/index.html', flatten_path('/../index.html'))
self.assertEqual('/a/index.html', flatten_path('/a/./index.html'))
self.assertEqual('/index.html', flatten_path('/a/../index.html'))
self.assertEqual('/doc/index.html', flatten_path('/../doc/index.html'))
self.assertEqual(
'/dog/doc/index.html',
flatten_path('/dog/cat/../doc/index.html')
)
self.assertEqual(
'/dog/doc/index.html',
flatten_path('/dog/../dog/./cat/../doc/././../doc/index.html')
)
self.assertEqual(
'/dog//doc//doc/index.html/',
flatten_path('/dog/../dog//./cat/../doc/.///./../doc/index.html/')
)
self.assertEqual(
'/dog/doc/index.html/',
flatten_path('/dog/../dog//./cat/../doc/.///./../doc/index.html/',
flatten_slashes=True)
)
def test_parse_url_or_log(self):
self.assertTrue(parse_url_or_log('http://example.com'))
self.assertFalse(parse_url_or_log('http://'))
|
class TestURL(unittest.TestCase):
@unittest.skip('experiment only')
def test_lib_vs_wpull(self):
pass
def test_url_info_naked(self):
pass
def test_url_info_parts(self):
pass
def test_url_info_default_port(self):
pass
def test_url_info_percent_encode(self):
pass
def test_url_info_not_http(self):
pass
def test_url_info_invalids(self):
pass
def test_url_info_path_folding(self):
pass
def test_url_info_reserved_char_is_ok(self):
pass
def test_url_info_misleading_parts(self):
pass
def test_url_info_query(self):
pass
def test_url_info_ipv6(self):
pass
def test_url_info_trailing_dot(self):
pass
def test_url_info_usrename_password(self):
pass
def test_url_info_round_trip(self):
pass
def test_ip_address_normalization(self):
pass
def test_url_info_to_dict(self):
pass
def test_schemes_simialar(self):
pass
def test_is_subdir(self):
pass
def test_split_query(self):
pass
def test_url_percent_encode(self):
pass
def test_uppercase_percent_encoding(self):
pass
def test_url_join(self):
pass
def test_flatten_path(self):
pass
def test_parse_url_or_log(self):
pass
| 27 | 0 | 27 | 1 | 27 | 0 | 1 | 0.02 | 1 | 2 | 1 | 0 | 25 | 0 | 25 | 97 | 703 | 37 | 666 | 37 | 639 | 10 | 274 | 36 | 248 | 2 | 2 | 1 | 26 |
6,856 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/ls/listing.py
|
wpull.protocol.ftp.ls.listing.UnknownListingError
|
class UnknownListingError(ListingError):
'''Failed to determine type of listing.'''
|
class UnknownListingError(ListingError):
'''Failed to determine type of listing.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 5 | 0 | 0 |
6,857 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/ls/listing.py
|
wpull.protocol.ftp.ls.listing.ListingParser
|
class ListingParser(LineParser):
'''Listing parser.
Args:
text (str): A text listing.
file: A file object in text mode containing the listing.
'''
def __init__(self, text=None, file=None):
super().__init__()
self._text = text
self._file = file
def parse_input(self):
'''Parse the listings.
Returns:
iter: A iterable of :class:`.ftp.ls.listing.FileEntry`
'''
if self._text:
lines = iter(self._text.splitlines())
elif self._file:
lines = self._file
else:
lines = ()
sample_lines = []
for line in lines:
if len(sample_lines) > 100:
break
sample_lines.append(line)
lines = itertools.chain(sample_lines, lines)
self.guess_type(sample_lines)
datetime_format = wpull.protocol.ftp.ls.date.guess_datetime_format(
sample_lines)
self.set_datetime_format(datetime_format)
return self.parse(lines)
|
class ListingParser(LineParser):
'''Listing parser.
Args:
text (str): A text listing.
file: A file object in text mode containing the listing.
'''
def __init__(self, text=None, file=None):
pass
def parse_input(self):
'''Parse the listings.
Returns:
iter: A iterable of :class:`.ftp.ls.listing.FileEntry`
'''
pass
| 3 | 2 | 19 | 6 | 11 | 2 | 3 | 0.39 | 1 | 2 | 0 | 0 | 2 | 2 | 2 | 10 | 45 | 13 | 23 | 9 | 20 | 9 | 20 | 9 | 17 | 5 | 2 | 2 | 6 |
6,858 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/ls/listing.py
|
wpull.protocol.ftp.ls.listing.ListingError
|
class ListingError(ValueError):
'''Error during parsing a listing.'''
|
class ListingError(ValueError):
'''Error during parsing a listing.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 11 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 4 | 0 | 0 |
6,859 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/ls/listing.py
|
wpull.protocol.ftp.ls.listing.LineParser
|
class LineParser(object):
'''Parse individual lines in a listing.'''
def __init__(self):
self.type = None
self.date_format = None
self.is_day_period = None
def guess_type(self, sample_lines):
'''Guess the type of listing from a sample of lines.'''
self.type = guess_listing_type(sample_lines)
return self.type
def set_datetime_format(self, datetime_format):
'''Set the datetime format.'''
self.date_format, self.is_day_period = datetime_format
def parse(self, lines):
'''Parse the lines.'''
if self.type == 'msdos':
return self.parse_msdos(lines)
elif self.type == 'unix':
return self.parse_unix(lines)
elif self.type == 'nlst':
return self.parse_nlst(lines)
else:
raise UnknownListingError('Unsupported listing type.')
def parse_datetime(self, text):
'''Parse datetime from line of text.'''
return parse_datetime(text, date_format=self.date_format,
is_day_period=self.is_day_period)
def parse_nlst(self, lines):
'''Parse lines from a NLST format.'''
for line in lines:
yield FileEntry(line)
def parse_msdos(self, lines):
'''Parse lines from a MS-DOS format.'''
for line in lines:
fields = line.split(None, 4)
date_str = fields[0]
time_str = fields[1]
datetime_str = '{} {}'.format(date_str, time_str)
file_datetime = self.parse_datetime(datetime_str)[0]
if fields[2] == '<DIR>':
file_size = None
file_type = 'dir'
else:
file_size = parse_int(fields[2])
file_type = 'file'
filename = fields[3]
yield FileEntry(filename, file_type, file_size, file_datetime)
def parse_unix(self, lines):
'''Parse listings from a Unix ls command format.'''
# This method uses some Filezilla parsing algorithms
for line in lines:
original_line = line
fields = line.split(' ')
after_perm_index = 0
# Search for the permissions field by checking the file type
for field in fields:
after_perm_index += len(field)
if not field:
continue
# If the filesystem goes corrupt, it may show ? instead
# but I don't really care in that situation.
if field[0] in 'bcdlps-':
if field[0] == 'd':
file_type = 'dir'
elif field[0] == '-':
file_type = 'file'
elif field[0] == 'l':
file_type = 'symlink'
else:
file_type = 'other'
perms = parse_unix_perm(field[1:])
break
else:
raise ListingError('Failed to parse file type.')
line = line[after_perm_index:]
# We look for the position of the date and use the integer
# before it as the file size.
# We look for the position of the time and use the text
# after it as the filename
while line:
try:
datetime_obj, start_index, end_index = self.parse_datetime(line)
except ValueError:
line = line[4:]
else:
break
else:
raise ListingError(
'Could parse a date from {}'.format(repr(original_line)))
file_size = int(line[:start_index].rstrip().rpartition(' ')[-1])
filename = line[end_index:].strip()
if file_type == 'symlink':
filename, sep, symlink_dest = filename.partition(' -> ')
else:
symlink_dest = None
yield FileEntry(filename, file_type, file_size, datetime_obj,
symlink_dest, perm=perms)
|
class LineParser(object):
'''Parse individual lines in a listing.'''
def __init__(self):
pass
def guess_type(self, sample_lines):
'''Guess the type of listing from a sample of lines.'''
pass
def set_datetime_format(self, datetime_format):
'''Set the datetime format.'''
pass
def parse(self, lines):
'''Parse the lines.'''
pass
def parse_datetime(self, text):
'''Parse datetime from line of text.'''
pass
def parse_nlst(self, lines):
'''Parse lines from a NLST format.'''
pass
def parse_msdos(self, lines):
'''Parse lines from a MS-DOS format.'''
pass
def parse_unix(self, lines):
'''Parse listings from a Unix ls command format.'''
pass
| 9 | 8 | 14 | 2 | 10 | 2 | 3 | 0.2 | 1 | 4 | 2 | 1 | 8 | 3 | 8 | 8 | 122 | 25 | 81 | 33 | 72 | 16 | 70 | 33 | 61 | 11 | 1 | 4 | 24 |
6,860 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/ls/date_test.py
|
wpull.protocol.ftp.ls.date_test.TestDate
|
class TestDate(unittest.TestCase):
def test_parse_datetime(self):
self.assertEqual(
new_datetime(1990, 2, 9),
parse_datetime('Feb 9 1990')[0]
)
self.assertEqual(
new_datetime(2005, 2, 9, 18, 45),
parse_datetime(
'Feb 9 18:45',
datetime_now=new_datetime(2005, 2, 9, 20, 0)
)[0]
)
self.assertEqual(
new_datetime(2004, 2, 9, 18, 45),
parse_datetime(
'Feb 9 18:45',
datetime_now=new_datetime(2005, 2, 9, 17, 0)
)[0]
)
self.assertEqual(
new_datetime(2005, 2, 10),
parse_datetime(
'Feb 10 2005',
datetime_now=new_datetime(2005, 2, 5)
)[0]
)
self.assertEqual(
new_datetime(2005, 2, 10),
parse_datetime(
'Feb 10 2005',
datetime_now=new_datetime(2005, 2, 12)
)[0]
)
self.assertEqual(
new_datetime(2010, 5, 7),
parse_datetime('2010-05-07')[0]
)
self.assertEqual(
new_datetime(2010, 5, 7),
parse_datetime('2010年5月7日')[0]
)
self.assertEqual(
new_datetime(2010, 5, 7),
parse_datetime('07-05-2010')[0]
)
self.assertEqual(
new_datetime(2010, 5, 7),
parse_datetime('07-05-2010')[0]
)
self.assertEqual(
new_datetime(2014, 4, 1, 22, 39),
parse_datetime('Apr 1 2014 10:39PM', is_day_period=True)[0]
)
self.assertEqual(
new_datetime(2014, 4, 1, 12, 39),
parse_datetime('Apr 1 2014 12:39PM', is_day_period=True)[0]
)
self.assertEqual(
new_datetime(2014, 4, 1, 0, 39),
parse_datetime('Apr 1 2014 12:39AM', is_day_period=True)[0]
)
|
class TestDate(unittest.TestCase):
def test_parse_datetime(self):
pass
| 2 | 0 | 70 | 9 | 61 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 73 | 71 | 9 | 62 | 2 | 60 | 0 | 14 | 2 | 12 | 1 | 2 | 0 | 1 |
6,861 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/command.py
|
wpull.protocol.ftp.command.Commander
|
class Commander(object):
'''Helper class that performs typical FTP routines.
Args:
control_stream (:class:`.ftp.stream.ControlStream`): The control
stream.
'''
def __init__(self, data_stream):
self._control_stream = data_stream
@classmethod
def raise_if_not_match(cls, action: str,
expected_code: Union[int, Sequence[int]],
reply: Reply):
'''Raise FTPServerError if not expected reply code.
Args:
action: Label to use in the exception message.
expected_code: Expected 3 digit code.
reply: Reply from the server.
'''
if isinstance(expected_code, int):
expected_codes = (expected_code,)
else:
expected_codes = expected_code
if reply.code not in expected_codes:
raise FTPServerError(
'Failed action {action}: {reply_code} {reply_text}'
.format(action=action, reply_code=reply.code,
reply_text=ascii(reply.text)
),
reply.code
)
@asyncio.coroutine
def read_welcome_message(self):
'''Read the welcome message.
Coroutine.
'''
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(
'Server ready', ReplyCodes.service_ready_for_new_user, reply)
@asyncio.coroutine
def login(self, username: str='anonymous', password: str='-wpull-lib@'):
'''Log in.
Coroutine.
'''
yield from self._control_stream.write_command(Command('USER', username))
reply = yield from self._control_stream.read_reply()
if reply.code == ReplyCodes.user_logged_in_proceed:
return
self.raise_if_not_match(
'Login username', ReplyCodes.user_name_okay_need_password, reply)
yield from self._control_stream.write_command(Command('PASS', password))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(
'Login password', ReplyCodes.user_logged_in_proceed, reply)
@asyncio.coroutine
def passive_mode(self) -> Tuple[str, int]:
'''Enable passive mode.
Returns:
The address (IP address, port) of the passive port.
Coroutine.
'''
yield from self._control_stream.write_command(Command('PASV'))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(
'Passive mode', ReplyCodes.entering_passive_mode, reply)
try:
return wpull.protocol.ftp.util.parse_address(reply.text)
except ValueError as error:
raise ProtocolError(str(error)) from error
@asyncio.coroutine
def setup_data_stream(
self,
connection_factory: Callable[[tuple], Connection],
data_stream_factory: Callable[[Connection], DataStream]=DataStream) -> \
DataStream:
'''Create and setup a data stream.
This function will set up passive and binary mode and handle
connecting to the data connection.
Args:
connection_factory: A coroutine callback that returns a connection
data_stream_factory: A callback that returns a data stream
Coroutine.
Returns:
DataStream
'''
yield from self._control_stream.write_command(Command('TYPE', 'I'))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply)
address = yield from self.passive_mode()
connection = yield from connection_factory(address)
# TODO: unit test for following line for connections that have
# the same port over time but within pool cleaning intervals
connection.reset()
yield from connection.connect()
data_stream = data_stream_factory(connection)
return data_stream
@asyncio.coroutine
def begin_stream(self, command: Command) -> Reply:
'''Start sending content on the data stream.
Args:
command: A command that tells the server to send data over the
data connection.
Coroutine.
Returns:
The begin reply.
'''
yield from self._control_stream.write_command(command)
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(
'Begin stream',
(
ReplyCodes.file_status_okay_about_to_open_data_connection,
ReplyCodes.data_connection_already_open_transfer_starting,
),
reply
)
return reply
@asyncio.coroutine
def read_stream(self, file: IO, data_stream: DataStream) -> Reply:
'''Read from the data stream.
Args:
file: A destination file object or a stream writer.
data_stream: The stream of which to read from.
Coroutine.
Returns:
Reply: The final reply.
'''
yield from data_stream.read_file(file=file)
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match(
'End stream',
ReplyCodes.closing_data_connection,
reply
)
data_stream.close()
return reply
@asyncio.coroutine
def size(self, filename: str) -> int:
'''Get size of file.
Coroutine.
'''
yield from self._control_stream.write_command(Command('SIZE', filename))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match('File size', ReplyCodes.file_status, reply)
try:
return int(reply.text.strip())
except ValueError:
return
@asyncio.coroutine
def restart(self, offset: int):
'''Send restart command.
Coroutine.
'''
yield from self._control_stream.write_command(Command('REST', str(offset)))
reply = yield from self._control_stream.read_reply()
self.raise_if_not_match('Restart', ReplyCodes.requested_file_action_pending_further_information, reply)
|
class Commander(object):
'''Helper class that performs typical FTP routines.
Args:
control_stream (:class:`.ftp.stream.ControlStream`): The control
stream.
'''
def __init__(self, data_stream):
pass
@classmethod
def raise_if_not_match(cls, action: str,
expected_code: Union[int, Sequence[int]],
reply: Reply):
'''Raise FTPServerError if not expected reply code.
Args:
action: Label to use in the exception message.
expected_code: Expected 3 digit code.
reply: Reply from the server.
'''
pass
@asyncio.coroutine
def read_welcome_message(self):
'''Read the welcome message.
Coroutine.
'''
pass
@asyncio.coroutine
def login(self, username: str='anonymous', password: str='-wpull-lib@'):
'''Log in.
Coroutine.
'''
pass
@asyncio.coroutine
def passive_mode(self) -> Tuple[str, int]:
'''Enable passive mode.
Returns:
The address (IP address, port) of the passive port.
Coroutine.
'''
pass
@asyncio.coroutine
def setup_data_stream(
self,
connection_factory: Callable[[tuple], Connection],
data_stream_factory: Callable[[Connection], DataStream]=DataStream) -> \
DataStream:
'''Create and setup a data stream.
This function will set up passive and binary mode and handle
connecting to the data connection.
Args:
connection_factory: A coroutine callback that returns a connection
data_stream_factory: A callback that returns a data stream
Coroutine.
Returns:
DataStream
'''
pass
@asyncio.coroutine
def begin_stream(self, command: Command) -> Reply:
'''Start sending content on the data stream.
Args:
command: A command that tells the server to send data over the
data connection.
Coroutine.
Returns:
The begin reply.
'''
pass
@asyncio.coroutine
def read_stream(self, file: IO, data_stream: DataStream) -> Reply:
'''Read from the data stream.
Args:
file: A destination file object or a stream writer.
data_stream: The stream of which to read from.
Coroutine.
Returns:
Reply: The final reply.
'''
pass
@asyncio.coroutine
def size(self, filename: str) -> int:
'''Get size of file.
Coroutine.
'''
pass
@asyncio.coroutine
def restart(self, offset: int):
'''Send restart command.
Coroutine.
'''
pass
| 20 | 10 | 19 | 5 | 9 | 5 | 2 | 0.57 | 1 | 12 | 7 | 0 | 9 | 1 | 10 | 10 | 212 | 57 | 99 | 40 | 73 | 56 | 62 | 24 | 51 | 3 | 1 | 1 | 15 |
6,862 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/client.py
|
wpull.protocol.ftp.client.SessionState
|
class SessionState(enum.Enum):
ready = 'ready'
file_request_sent = 'file_request_sent'
directory_request_sent = 'directory_request_sent'
response_received = 'response_received'
aborted = 'aborted'
|
class SessionState(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 6 | 0 | 6 | 6 | 5 | 0 | 6 | 6 | 5 | 0 | 4 | 0 | 0 |
6,863 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/client.py
|
wpull.protocol.ftp.client.Session
|
class Session(BaseSession):
class Event(enum.Enum):
begin_control = 'begin_control'
control_send_data = 'control_send_data'
control_receive_data = 'control_receive_data'
end_control = 'end_control'
begin_transfer = 'begin_transfer'
transfer_send_data = 'transfer_send_data'
transfer_receive_data = 'transfer_receive_data'
end_transfer = 'end_transfer'
def __init__(self, login_table: weakref.WeakKeyDictionary, **kwargs):
self._login_table = login_table
super().__init__(**kwargs)
self._control_connection = None
self._control_stream = None
self._commander = None
self._request = None
self._response = None
self._data_stream = None
self._data_connection = None
self._listing_type = None
self._session_state = SessionState.ready
self.event_dispatcher.register(self.Event.begin_control)
self.event_dispatcher.register(self.Event.control_send_data)
self.event_dispatcher.register(self.Event.control_receive_data)
self.event_dispatcher.register(self.Event.end_control)
self.event_dispatcher.register(self.Event.begin_transfer)
self.event_dispatcher.register(self.Event.transfer_send_data)
self.event_dispatcher.register(self.Event.transfer_receive_data)
self.event_dispatcher.register(self.Event.end_transfer)
@asyncio.coroutine
def _init_stream(self):
'''Create streams and commander.
Coroutine.
'''
assert not self._control_connection
self._control_connection = yield from self._acquire_request_connection(self._request)
self._control_stream = ControlStream(self._control_connection)
self._commander = Commander(self._control_stream)
read_callback = functools.partial(self.event_dispatcher.notify, self.Event.control_receive_data)
self._control_stream.data_event_dispatcher.add_read_listener(read_callback)
write_callback = functools.partial(self.event_dispatcher.notify, self.Event.control_send_data)
self._control_stream.data_event_dispatcher.add_write_listener(write_callback)
@asyncio.coroutine
def _log_in(self):
'''Connect and login.
Coroutine.
'''
username = self._request.url_info.username or self._request.username or 'anonymous'
password = self._request.url_info.password or self._request.password or '-wpull@'
cached_login = self._login_table.get(self._control_connection)
if cached_login and cached_login == (username, password):
_logger.debug('Reusing existing login.')
return
try:
yield from self._commander.login(username, password)
except FTPServerError as error:
raise AuthenticationError('Login error: {}'.format(error)) \
from error
self._login_table[self._control_connection] = (username, password)
@asyncio.coroutine
def start(self, request: Request) -> Response:
'''Start a file or directory listing download.
Args:
request: Request.
Returns:
A Response populated with the initial data connection reply.
Once the response is received, call :meth:`download`.
Coroutine.
'''
if self._session_state != SessionState.ready:
raise RuntimeError('Session not ready')
response = Response()
yield from self._prepare_fetch(request, response)
response.file_transfer_size = yield from self._fetch_size(request)
if request.restart_value:
try:
yield from self._commander.restart(request.restart_value)
response.restart_value = request.restart_value
except FTPServerError:
_logger.debug('Could not restart file.', exc_info=1)
yield from self._open_data_stream()
command = Command('RETR', request.file_path)
yield from self._begin_stream(command)
self._session_state = SessionState.file_request_sent
return response
@asyncio.coroutine
def start_listing(self, request: Request) -> ListingResponse:
'''Fetch a file listing.
Args:
request: Request.
Returns:
A listing response populated with the initial data connection
reply.
Once the response is received, call :meth:`download_listing`.
Coroutine.
'''
if self._session_state != SessionState.ready:
raise RuntimeError('Session not ready')
response = ListingResponse()
yield from self._prepare_fetch(request, response)
yield from self._open_data_stream()
mlsd_command = Command('MLSD', self._request.file_path)
list_command = Command('LIST', self._request.file_path)
try:
yield from self._begin_stream(mlsd_command)
self._listing_type = 'mlsd'
except FTPServerError as error:
if error.reply_code in (ReplyCodes.syntax_error_command_unrecognized,
ReplyCodes.command_not_implemented):
self._listing_type = None
else:
raise
if not self._listing_type:
# This code not in exception handler to avoid incorrect
# exception chaining
yield from self._begin_stream(list_command)
self._listing_type = 'list'
_logger.debug('Listing type is %s', self._listing_type)
self._session_state = SessionState.directory_request_sent
return response
@asyncio.coroutine
def _prepare_fetch(self, request: Request, response: Response):
'''Prepare for a fetch.
Coroutine.
'''
self._request = request
self._response = response
yield from self._init_stream()
connection_closed = self._control_connection.closed()
if connection_closed:
self._login_table.pop(self._control_connection, None)
yield from self._control_stream.reconnect()
request.address = self._control_connection.address
connection_reused = not connection_closed
self.event_dispatcher.notify(self.Event.begin_control, request, connection_reused=connection_reused)
if connection_closed:
yield from self._commander.read_welcome_message()
yield from self._log_in()
self._response.request = request
@asyncio.coroutine
def _begin_stream(self, command: Command):
'''Start data stream transfer.'''
begin_reply = yield from self._commander.begin_stream(command)
self._response.reply = begin_reply
self.event_dispatcher.notify(self.Event.begin_transfer, self._response)
@asyncio.coroutine
def download(self, file: Optional[IO]=None, rewind: bool=True,
duration_timeout: Optional[float]=None) -> Response:
'''Read the response content into file.
Args:
file: A file object or asyncio stream.
rewind: Seek the given file back to its original offset after
reading is finished.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated with the final data connection reply.
Be sure to call :meth:`start` first.
Coroutine.
'''
if self._session_state != SessionState.file_request_sent:
raise RuntimeError('File request not sent')
if rewind and file and hasattr(file, 'seek'):
original_offset = file.tell()
else:
original_offset = None
if not hasattr(file, 'drain'):
self._response.body = file
if not isinstance(file, Body):
self._response.body = Body(file)
read_future = self._commander.read_stream(file, self._data_stream)
try:
reply = yield from \
asyncio.wait_for(read_future, timeout=duration_timeout)
except asyncio.TimeoutError as error:
raise DurationTimeout(
'Did not finish reading after {} seconds.'
.format(duration_timeout)
) from error
self._response.reply = reply
if original_offset is not None:
file.seek(original_offset)
self.event_dispatcher.notify(self.Event.end_transfer, self._response)
self._session_state = SessionState.response_received
return self._response
@asyncio.coroutine
def download_listing(self, file: Optional[IO],
duration_timeout: Optional[float]=None) -> \
ListingResponse:
'''Read file listings.
Args:
file: A file object or asyncio stream.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated the file listings
Be sure to call :meth:`start_file_listing` first.
Coroutine.
'''
if self._session_state != SessionState.directory_request_sent:
raise RuntimeError('File request not sent')
self._session_state = SessionState.file_request_sent
yield from self.download(file=file, rewind=False,
duration_timeout=duration_timeout)
try:
if self._response.body.tell() == 0:
listings = ()
elif self._listing_type == 'mlsd':
self._response.body.seek(0)
machine_listings = wpull.protocol.ftp.util.parse_machine_listing(
self._response.body.read().decode('utf-8',
errors='surrogateescape'),
convert=True, strict=False
)
listings = list(
wpull.protocol.ftp.util.machine_listings_to_file_entries(
machine_listings
))
else:
self._response.body.seek(0)
file = io.TextIOWrapper(self._response.body, encoding='utf-8',
errors='surrogateescape')
listing_parser = ListingParser(file=file)
listings = list(listing_parser.parse_input())
_logger.debug('Listing detected as %s', listing_parser.type)
# We don't want the file to be closed when exiting this function
file.detach()
except (ListingError, ValueError) as error:
raise ProtocolError(*error.args) from error
self._response.files = listings
self._response.body.seek(0)
self._session_state = SessionState.response_received
return self._response
@asyncio.coroutine
def _open_data_stream(self):
'''Open the data stream connection.
Coroutine.
'''
@asyncio.coroutine
def connection_factory(address: Tuple[int, int]):
self._data_connection = yield from self._acquire_connection(address[0], address[1])
return self._data_connection
self._data_stream = yield from self._commander.setup_data_stream(
connection_factory
)
self._response.data_address = self._data_connection.address
read_callback = functools.partial(self.event_dispatcher.notify, self.Event.transfer_receive_data)
self._data_stream.data_event_dispatcher.add_read_listener(read_callback)
write_callback = functools.partial(self.event_dispatcher.notify, self.Event.transfer_send_data)
self._data_stream.data_event_dispatcher.add_write_listener(write_callback)
@asyncio.coroutine
def _fetch_size(self, request: Request) -> int:
'''Return size of file.
Coroutine.
'''
try:
size = yield from self._commander.size(request.file_path)
return size
except FTPServerError:
return
def abort(self):
super().abort()
self._close_data_connection()
if self._control_connection:
self._login_table.pop(self._control_connection, None)
def recycle(self):
super().recycle()
self._close_data_connection()
if self._control_connection:
self.event_dispatcher.notify(
self.Event.end_control, self._response,
connection_closed=self._control_connection.closed()
)
def _close_data_connection(self):
if self._data_connection:
# self._data_connection.close()
# self._connection_pool.no_wait_release(self._data_connection)
self._data_connection = None
if self._data_stream:
self._data_stream = None
|
class Session(BaseSession):
class Event(enum.Enum):
def __init__(self, login_table: weakref.WeakKeyDictionary, **kwargs):
pass
@asyncio.coroutine
def _init_stream(self):
'''Create streams and commander.
Coroutine.
'''
pass
@asyncio.coroutine
def _log_in(self):
'''Connect and login.
Coroutine.
'''
pass
@asyncio.coroutine
def start(self, request: Request) -> Response:
'''Start a file or directory listing download.
Args:
request: Request.
Returns:
A Response populated with the initial data connection reply.
Once the response is received, call :meth:`download`.
Coroutine.
'''
pass
@asyncio.coroutine
def start_listing(self, request: Request) -> ListingResponse:
'''Fetch a file listing.
Args:
request: Request.
Returns:
A listing response populated with the initial data connection
reply.
Once the response is received, call :meth:`download_listing`.
Coroutine.
'''
pass
@asyncio.coroutine
def _prepare_fetch(self, request: Request, response: Response):
'''Prepare for a fetch.
Coroutine.
'''
pass
@asyncio.coroutine
def _begin_stream(self, command: Command):
'''Start data stream transfer.'''
pass
@asyncio.coroutine
def download(self, file: Optional[IO]=None, rewind: bool=True,
duration_timeout: Optional[float]=None) -> Response:
'''Read the response content into file.
Args:
file: A file object or asyncio stream.
rewind: Seek the given file back to its original offset after
reading is finished.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated with the final data connection reply.
Be sure to call :meth:`start` first.
Coroutine.
'''
pass
@asyncio.coroutine
def download_listing(self, file: Optional[IO],
duration_timeout: Optional[float]=None) -> \
ListingResponse:
'''Read file listings.
Args:
file: A file object or asyncio stream.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated the file listings
Be sure to call :meth:`start_file_listing` first.
Coroutine.
'''
pass
@asyncio.coroutine
def _open_data_stream(self):
'''Open the data stream connection.
Coroutine.
'''
pass
@asyncio.coroutine
def connection_factory(address: Tuple[int, int]):
pass
@asyncio.coroutine
def _fetch_size(self, request: Request) -> int:
'''Return size of file.
Coroutine.
'''
pass
def abort(self):
pass
def recycle(self):
pass
def _close_data_connection(self):
pass
| 28 | 10 | 23 | 6 | 14 | 4 | 3 | 0.27 | 1 | 27 | 16 | 0 | 14 | 10 | 14 | 43 | 383 | 102 | 221 | 75 | 190 | 60 | 183 | 57 | 166 | 7 | 4 | 2 | 41 |
6,864 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/client.py
|
wpull.protocol.ftp.client.Client
|
class Client(BaseClient):
'''FTP Client.
The session object is :class:`Session`.
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._login_table = weakref.WeakKeyDictionary()
def _session_class(self) -> Session:
return functools.partial(Session, login_table=self._login_table)
def session(self) -> Session:
return super().session()
|
class Client(BaseClient):
'''FTP Client.
The session object is :class:`Session`.
'''
def __init__(self, *args, **kwargs):
pass
def _session_class(self) -> Session:
pass
def session(self) -> Session:
pass
| 4 | 1 | 2 | 0 | 2 | 0 | 1 | 0.38 | 1 | 4 | 1 | 0 | 3 | 1 | 3 | 31 | 15 | 4 | 8 | 5 | 4 | 3 | 8 | 5 | 4 | 1 | 4 | 0 | 3 |
6,865 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/abstract/stream.py
|
wpull.protocol.abstract.stream.DataEventDispatcher
|
class DataEventDispatcher(object):
def __init__(self):
self._read_listeners = set()
self._write_listeners = set()
def add_read_listener(self, callback: DataEventCallback):
self._read_listeners.add(callback)
def remove_read_listener(self, callback: DataEventCallback):
self._read_listeners.remove(callback)
def add_write_listener(self, callback: DataEventCallback):
self._write_listeners.add(callback)
def remove_write_listener(self, callback: DataEventCallback):
self._write_listeners.remove(callback)
def notify_read(self, data: bytes):
for callback in self._read_listeners:
callback(data)
def notify_write(self, data: bytes):
for callback in self._write_listeners:
callback(data)
|
class DataEventDispatcher(object):
def __init__(self):
pass
def add_read_listener(self, callback: DataEventCallback):
pass
def remove_read_listener(self, callback: DataEventCallback):
pass
def add_write_listener(self, callback: DataEventCallback):
pass
def remove_write_listener(self, callback: DataEventCallback):
pass
def notify_read(self, data: bytes):
pass
def notify_write(self, data: bytes):
pass
| 8 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 7 | 2 | 7 | 7 | 24 | 6 | 18 | 12 | 10 | 0 | 18 | 12 | 10 | 2 | 1 | 1 | 9 |
6,866 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/abstract/request.py
|
wpull.protocol.abstract.request.URLPropertyMixin
|
class URLPropertyMixin(object):
'''Provide URL as a property.
Attributes:
url (str): The complete URL string.
url_info (:class:`.url.URLInfo`): The URLInfo of the `url` attribute.
Setting :attr:`url` or :attr:`url_info` will update the other
respectively.
'''
def __init__(self):
self._url = None
self._url_info = None
@property
def url(self):
return self._url
@url.setter
def url(self, url_str):
self._url = url_str
self._url_info = URLInfo.parse(url_str)
@property
def url_info(self):
return self._url_info
@url_info.setter
def url_info(self, url_info):
self._url_info = url_info
self._url = url_info.url
|
class URLPropertyMixin(object):
'''Provide URL as a property.
Attributes:
url (str): The complete URL string.
url_info (:class:`.url.URLInfo`): The URLInfo of the `url` attribute.
Setting :attr:`url` or :attr:`url_info` will update the other
respectively.
'''
def __init__(self):
pass
@property
def url(self):
pass
@url.setter
def url(self):
pass
@property
def url_info(self):
pass
@url_info.setter
def url_info(self):
pass
| 10 | 1 | 3 | 0 | 3 | 0 | 1 | 0.39 | 1 | 1 | 1 | 2 | 5 | 2 | 5 | 5 | 31 | 6 | 18 | 12 | 8 | 7 | 14 | 8 | 8 | 1 | 1 | 0 | 5 |
6,867 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/abstract/request.py
|
wpull.protocol.abstract.request.BaseResponse
|
class BaseResponse(ProtocolResponseMixin):
def __init__(self):
super().__init__()
self.body = None
self.request = None
|
class BaseResponse(ProtocolResponseMixin):
def __init__(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 1 | 0 | 2 | 1 | 2 | 1 | 4 | 5 | 0 | 5 | 4 | 3 | 0 | 5 | 4 | 3 | 1 | 2 | 0 | 1 |
6,868 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/coprocessor/phantomjs.py
|
wpull.processor.coprocessor.phantomjs.PhantomJSCrashed
|
class PhantomJSCrashed(Exception):
'''PhantomJS exited with non-zero code.'''
|
class PhantomJSCrashed(Exception):
'''PhantomJS exited with non-zero code.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 3 | 0 | 0 |
6,869 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/abstract/client.py
|
wpull.protocol.abstract.client.DurationTimeout
|
class DurationTimeout(NetworkTimedOut):
'''Download did not complete within specified time.'''
|
class DurationTimeout(NetworkTimedOut):
'''Download did not complete within specified time.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 | 0 | 0 |
6,870 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/web_test.py
|
wpull.processor.web_test.TestWeb
|
class TestWeb(unittest.TestCase):
def test_add_referer(self):
request = Request()
url_record = URLRecord()
url_record.parent_url = 'http://example.com/'
url_record.url = 'http://example.com/image.png'
WebProcessorSession._add_referrer(request, url_record)
self.assertEqual('http://example.com/', request.fields['Referer'])
def test_add_referer_https_to_http(self):
request = Request()
url_record = URLRecord()
url_record.parent_url = 'https://example.com/'
url_record.url = 'http://example.com/image.png'
WebProcessorSession._add_referrer(request, url_record)
self.assertNotIn('referer', request.fields)
|
class TestWeb(unittest.TestCase):
def test_add_referer(self):
pass
def test_add_referer_https_to_http(self):
pass
| 3 | 0 | 9 | 2 | 7 | 0 | 1 | 0 | 1 | 3 | 3 | 0 | 2 | 0 | 2 | 74 | 20 | 5 | 15 | 7 | 12 | 0 | 15 | 7 | 12 | 1 | 2 | 0 | 2 |
6,871 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/coprocessor/proxy.py
|
wpull.processor.coprocessor.proxy.ProxyCoprocessorSession
|
class ProxyCoprocessorSession(object):
def __init__(self, app_session: AppSession,
http_proxy_session: HTTPProxySession):
self._app_session = app_session
self._http_proxy_session = http_proxy_session
self._cookie_jar = cast(
CookieJarWrapper, self._app_session.factory.get('CookieJarWrapper')
)
self._fetch_rule = cast(
FetchRule, self._app_session.factory['FetchRule']
)
self._result_rule = cast(
ResultRule, self._app_session.factory['ResultRule']
)
self._processing_rule = cast(
ProcessingRule, self._app_session.factory['ProcessingRule']
)
file_writer = cast(
BaseFileWriter, self._app_session.factory['FileWriter']
)
self._file_writer_session = file_writer.session()
self._item_session = None
http_proxy_session.hook_dispatcher.connect(
HTTPProxySession.Event.client_request,
self._client_request_callback
)
http_proxy_session.hook_dispatcher.connect(
HTTPProxySession.Event.server_begin_response,
self._server_begin_response_callback
)
http_proxy_session.event_dispatcher.add_listener(
HTTPProxySession.Event.server_end_response,
self._server_end_response_callback
)
http_proxy_session.event_dispatcher.add_listener(
HTTPProxySession.Event.server_response_error,
self._server_response_error_callback
)
@classmethod
def _new_url_record(cls, request: Request) -> URLRecord:
'''Return new empty URLRecord.'''
url_record = URLRecord()
url_record.url = request.url_info.url
url_record.status = Status.in_progress
url_record.try_count = 0
url_record.level = 0
return url_record
def _new_item_session(self, request: Request) -> ProxyItemSession:
url_table = cast(BaseURLTable, self._app_session.factory['URLTable'])
url_table.add_one(request.url_info.url)
return ProxyItemSession(self._app_session, self._new_url_record(request))
def _client_request_callback(self, request: Request):
'''Request callback handler.'''
self._item_session = self._new_item_session(request)
self._item_session.request = request
if self._cookie_jar:
self._cookie_jar.add_cookie_header(request)
verdict, reason = self._fetch_rule.check_subsequent_web_request(self._item_session)
self._file_writer_session.process_request(request)
if verdict:
_logger.info(__(
_('Fetching ‘{url}’.'),
url=request.url_info.url
))
return verdict
def _server_begin_response_callback(self, response: Response):
'''Pre-response callback handler.'''
self._item_session.response = response
if self._cookie_jar:
self._cookie_jar.extract_cookies(response, self._item_session.request)
action = self._result_rule.handle_pre_response(self._item_session)
self._file_writer_session.process_response(response)
return action == Actions.NORMAL
def _server_end_response_callback(self, respoonse: Response):
'''Response callback handler.'''
request = self._item_session.request
response = self._item_session.response
_logger.info(__(
_('Fetched ‘{url}’: {status_code} {reason}. '
'Length: {content_length} [{content_type}].'),
url=request.url,
status_code=response.status_code,
reason=wpull.string.printable_str(response.reason),
content_length=wpull.string.printable_str(
response.fields.get('Content-Length', _('none'))),
content_type=wpull.string.printable_str(
response.fields.get('Content-Type', _('none'))),
))
self._result_rule.handle_response(self._item_session)
if response.status_code in WebProcessor.DOCUMENT_STATUS_CODES:
filename = self._file_writer_session.save_document(response)
self._processing_rule.scrape_document(self._item_session)
self._result_rule.handle_document(self._item_session, filename)
elif response.status_code in WebProcessor.NO_DOCUMENT_STATUS_CODES:
self._file_writer_session.discard_document(response)
self._result_rule.handle_no_document(self._item_session)
else:
self._file_writer_session.discard_document(response)
self._result_rule.handle_document_error(self._item_session)
def _server_response_error_callback(self, error: BaseException):
self._result_rule.handle_error(self._item_session, error)
|
class ProxyCoprocessorSession(object):
def __init__(self, app_session: AppSession,
http_proxy_session: HTTPProxySession):
pass
@classmethod
def _new_url_record(cls, request: Request) -> URLRecord:
'''Return new empty URLRecord.'''
pass
def _new_item_session(self, request: Request) -> ProxyItemSession:
pass
def _client_request_callback(self, request: Request):
'''Request callback handler.'''
pass
def _server_begin_response_callback(self, response: Response):
'''Pre-response callback handler.'''
pass
def _server_end_response_callback(self, respoonse: Response):
'''Response callback handler.'''
pass
def _server_response_error_callback(self, error: BaseException):
pass
| 9 | 4 | 17 | 2 | 14 | 1 | 2 | 0.04 | 1 | 17 | 16 | 0 | 6 | 8 | 7 | 7 | 124 | 23 | 97 | 26 | 87 | 4 | 58 | 24 | 50 | 3 | 1 | 1 | 12 |
6,872 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/coprocessor/proxy.py
|
wpull.processor.coprocessor.proxy.ProxyItemSession
|
class ProxyItemSession(ItemSession):
@property
def is_virtual(self):
return True
def skip(self):
self._processed = True
self.set_status(Status.skipped)
|
class ProxyItemSession(ItemSession):
@property
def is_virtual(self):
pass
def skip(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 2 | 16 | 8 | 1 | 7 | 5 | 3 | 0 | 6 | 4 | 3 | 1 | 2 | 0 | 2 |
6,873 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/coprocessor/youtubedl.py
|
wpull.processor.coprocessor.youtubedl.Session
|
class Session(object):
'''youtube-dl session.'''
def __init__(self, proxy_address, youtube_dl_path, root_path, item_session: ItemSession,
file_writer_session, user_agent, warc_recorder, inet_family,
check_certificate):
self._proxy_address = proxy_address
self._youtube_dl_path = youtube_dl_path
self._root_path = root_path
self._item_session = item_session
self._file_writer_session = file_writer_session
self._user_agent = user_agent
self._warc_recorder = warc_recorder
self._temp_dir = None
self._path_prefix = None
self._inet_family = inet_family
self._check_certificate = check_certificate
@asyncio.coroutine
def run(self):
host, port = self._proxy_address
url = self._item_session.url_record.url
self._path_prefix, output_template = self._get_output_template()
args = [
self._youtube_dl_path,
'--proxy', 'http://{}:{}'.format(host, port),
'--no-continue',
'--write-info-json',
'--write-annotations',
'--write-thumbnail',
'--no-cache-dir',
'--no-progress',
'--all-subs',
'--output', output_template,
url
]
if self._user_agent:
args.extend(['--user-agent', self._user_agent])
if self._inet_family == 'IPv4':
args.extend(['--force-ipv4'])
if self._check_certificate is False:
args.extend(['--no-check-certificate'])
youtube_dl_process = Process(
args,
stderr_callback=self._stderr_callback,
stdout_callback=self._stdout_callback,
)
yield from youtube_dl_process.start()
yield from youtube_dl_process.process.wait()
if self._warc_recorder:
self._write_warc_metadata()
def close(self):
if self._temp_dir:
self._temp_dir.cleanup()
def _get_output_template(self):
'''Return the path prefix and output template.'''
path = self._file_writer_session.extra_resource_path('.youtube-dl')
if not path:
self._temp_dir = tempfile.TemporaryDirectory(
dir=self._root_path, prefix='tmp-wpull-youtubedl'
)
path = '{}/tmp'.format(self._temp_dir.name)
return path, '{}.%(id)s.%(format_id)s.%(ext)s'.format(path)
@asyncio.coroutine
def _stderr_callback(self, line):
_logger.warning(line.decode('utf-8', 'replace').rstrip())
@asyncio.coroutine
def _stdout_callback(self, line):
_logger.info(line.decode('utf-8', 'replace').rstrip())
def _write_warc_metadata(self):
'''Write the JSON metadata to WARC.
Uses pywb spec.
'''
uri = 'metadata://{}{}'.format(self._item_session.url_record.url_info.authority,
self._item_session.url_record.url_info.resource)
glob_pattern = self._path_prefix + '*.info.json'
filenames = list(glob.glob(glob_pattern))
if not filenames:
_logger.warning(__(
_('Could not find external process metadata file: {filename}'),
filename=glob_pattern
))
return
for filename in filenames:
record = WARCRecord()
record.set_common_fields('metadata', 'application/vnd.youtube-dl_formats+json')
record.fields['WARC-Target-URI'] = uri
record.block_file = open(filename, 'rb')
self._warc_recorder.set_length_and_maybe_checksums(record)
self._warc_recorder.write_record(record)
record.block_file.close()
|
class Session(object):
'''youtube-dl session.'''
def __init__(self, proxy_address, youtube_dl_path, root_path, item_session: ItemSession,
file_writer_session, user_agent, warc_recorder, inet_family,
check_certificate):
pass
@asyncio.coroutine
def run(self):
pass
def close(self):
pass
def _get_output_template(self):
'''Return the path prefix and output template.'''
pass
@asyncio.coroutine
def _stderr_callback(self, line):
pass
@asyncio.coroutine
def _stdout_callback(self, line):
pass
def _write_warc_metadata(self):
'''Write the JSON metadata to WARC.
Uses pywb spec.
'''
pass
| 11 | 3 | 14 | 2 | 11 | 1 | 2 | 0.06 | 1 | 5 | 3 | 0 | 7 | 11 | 7 | 7 | 109 | 20 | 84 | 35 | 71 | 5 | 57 | 30 | 49 | 5 | 1 | 1 | 15 |
6,874 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/coprocessor/youtubedl.py
|
wpull.processor.coprocessor.youtubedl.YoutubeDlCoprocessor
|
class YoutubeDlCoprocessor(object):
'''youtube-dl coprocessor.'''
def __init__(self, youtube_dl_path, proxy_address, root_path='.',
user_agent=None, warc_recorder=None, inet_family=False,
check_certificate=True):
self._youtube_dl_path = youtube_dl_path
self._proxy_address = proxy_address
self._root_path = root_path
self._user_agent = user_agent
self._warc_recorder = warc_recorder
self._inet_family = inet_family
self._check_certificate = check_certificate
assert len(proxy_address) == 2, len(proxy_address)
assert isinstance(proxy_address[0], str), proxy_address
assert isinstance(proxy_address[1], int), proxy_address
@asyncio.coroutine
def process(self, item_session: ItemSession, request, response, file_writer_session):
if response.status_code != 200:
return
if not HTMLReader.is_supported(request=request, response=response):
return
session = Session(
self._proxy_address, self._youtube_dl_path, self._root_path,
item_session, file_writer_session, self._user_agent,
self._warc_recorder, self._inet_family, self._check_certificate
)
url = item_session.url_record.url
_logger.info(__(_('youtube-dl fetching ‘{url}’.'), url=url))
with contextlib.closing(session):
yield from session.run()
_logger.info(__(_('youtube-dl fetched ‘{url}’.'), url=url))
|
class YoutubeDlCoprocessor(object):
'''youtube-dl coprocessor.'''
def __init__(self, youtube_dl_path, proxy_address, root_path='.',
user_agent=None, warc_recorder=None, inet_family=False,
check_certificate=True):
pass
@asyncio.coroutine
def process(self, item_session: ItemSession, request, response, file_writer_session):
pass
| 4 | 1 | 17 | 3 | 14 | 0 | 2 | 0.03 | 1 | 6 | 3 | 0 | 2 | 7 | 2 | 2 | 38 | 7 | 30 | 15 | 24 | 1 | 23 | 12 | 20 | 3 | 1 | 1 | 4 |
6,875 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/delegate.py
|
wpull.processor.delegate.DelegateProcessor
|
class DelegateProcessor(BaseProcessor):
'''Delegate to Web or FTP processor.'''
def __init__(self):
self._processors = {}
@asyncio.coroutine
def process(self, item_session: ItemSession):
scheme = item_session.url_record.url_info.scheme
processor = self._processors.get(scheme)
if processor:
return (yield from processor.process(item_session))
else:
_logger.warning(
_('No processor available to handle {scheme} scheme.'),
scheme=repr(scheme)
)
item_session.skip()
def close(self):
for processor in self._processors.values():
processor.close()
def register(self, scheme: str, processor: BaseProcessor):
self._processors[scheme] = processor
|
class DelegateProcessor(BaseProcessor):
'''Delegate to Web or FTP processor.'''
def __init__(self):
pass
@asyncio.coroutine
def process(self, item_session: ItemSession):
pass
def close(self):
pass
def register(self, scheme: str, processor: BaseProcessor):
pass
| 6 | 1 | 5 | 1 | 5 | 0 | 2 | 0.05 | 1 | 2 | 1 | 0 | 4 | 1 | 4 | 26 | 26 | 5 | 20 | 10 | 14 | 1 | 15 | 9 | 10 | 2 | 4 | 1 | 6 |
6,876 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/ftp.py
|
wpull.processor.ftp.FTPProcessor
|
class FTPProcessor(BaseProcessor):
'''FTP processor.
Args:
ftp_client: The FTP client.
fetch_params (:class:`WebProcessorFetchParams`): Parameters for
fetching.
'''
def __init__(self, ftp_client: Client, fetch_params):
super().__init__()
self._ftp_client = ftp_client
self._fetch_params = fetch_params
self._listing_cache = LRUCache(max_items=10, time_to_live=3600)
@property
def ftp_client(self) -> Client:
'''The ftp client.'''
return self._ftp_client
@property
def fetch_params(self) -> FTPProcessorFetchParams:
'''The fetch parameters.'''
return self._fetch_params
@property
def listing_cache(self) -> LRUCache:
'''Listing cache.
Returns:
A cache mapping
from URL to list of :class:`.ftp.ls.listing.FileEntry`.
'''
return self._listing_cache
@asyncio.coroutine
def process(self, item_session: ItemSession):
session = FTPProcessorSession(self, item_session)
try:
return (yield from session.process())
finally:
session.close()
def close(self):
'''Close the FTP client.'''
self._ftp_client.close()
|
class FTPProcessor(BaseProcessor):
'''FTP processor.
Args:
ftp_client: The FTP client.
fetch_params (:class:`WebProcessorFetchParams`): Parameters for
fetching.
'''
def __init__(self, ftp_client: Client, fetch_params):
pass
@property
def ftp_client(self) -> Client:
'''The ftp client.'''
pass
@property
def fetch_params(self) -> FTPProcessorFetchParams:
'''The fetch parameters.'''
pass
@property
def listing_cache(self) -> LRUCache:
'''Listing cache.
Returns:
A cache mapping
from URL to list of :class:`.ftp.ls.listing.FileEntry`.
'''
pass
@asyncio.coroutine
def process(self, item_session: ItemSession):
pass
def close(self):
'''Close the FTP client.'''
pass
| 11 | 5 | 5 | 0 | 3 | 1 | 1 | 0.58 | 1 | 5 | 4 | 0 | 6 | 3 | 6 | 28 | 46 | 8 | 24 | 15 | 13 | 14 | 19 | 11 | 12 | 1 | 4 | 1 | 6 |
6,877 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/abstract/client.py
|
wpull.protocol.abstract.client.BaseSession
|
class BaseSession(HookableMixin, metaclass=abc.ABCMeta):
'''Base session.'''
class SessionEvent(enum.Enum):
begin_session = 'begin_session'
end_session = 'end_session'
def __init__(self, connection_pool):
super().__init__()
self._connection_pool = connection_pool
self._connections = set()
self.event_dispatcher.register(self.SessionEvent.begin_session)
self.event_dispatcher.register(self.SessionEvent.end_session)
def __enter__(self):
self.event_dispatcher.notify(self.SessionEvent.begin_session)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_val and not isinstance(exc_val, StopIteration):
_logger.debug('Early close session.')
error = True
self.abort()
else:
error = False
self.recycle()
self.event_dispatcher.notify(self.SessionEvent.end_session, error=error)
def abort(self):
'''Terminate early and close any connections.'''
for connection in self._connections:
connection.close()
def recycle(self):
'''Clean up and return connections back to the pool.
Connections should be kept alive if supported.
'''
for connection in self._connections:
self._connection_pool.no_wait_release(connection)
self._connections.clear()
@asyncio.coroutine
def _acquire_request_connection(self, request):
'''Return a connection.'''
host = request.url_info.hostname
port = request.url_info.port
use_ssl = request.url_info.scheme == 'https'
tunnel = request.url_info.scheme != 'http'
connection = yield from self._acquire_connection(host, port, use_ssl, tunnel)
return connection
@asyncio.coroutine
def _acquire_connection(self, host, port, use_ssl=False, tunnel=True):
'''Return a connection.'''
if hasattr(self._connection_pool, 'acquire_proxy'):
connection = yield from \
self._connection_pool.acquire_proxy(host, port, use_ssl,
tunnel=tunnel)
else:
connection = yield from \
self._connection_pool.acquire(host, port, use_ssl)
self._connections.add(connection)
return connection
|
class BaseSession(HookableMixin, metaclass=abc.ABCMeta):
'''Base session.'''
class SessionEvent(enum.Enum):
def __init__(self, connection_pool):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def abort(self):
'''Terminate early and close any connections.'''
pass
def recycle(self):
'''Clean up and return connections back to the pool.
Connections should be kept alive if supported.
'''
pass
@asyncio.coroutine
def _acquire_request_connection(self, request):
'''Return a connection.'''
pass
@asyncio.coroutine
def _acquire_connection(self, host, port, use_ssl=False, tunnel=True):
'''Return a connection.'''
pass
| 11 | 5 | 8 | 1 | 6 | 1 | 2 | 0.15 | 2 | 4 | 1 | 2 | 7 | 2 | 7 | 29 | 72 | 17 | 48 | 24 | 37 | 7 | 41 | 22 | 32 | 2 | 3 | 1 | 11 |
6,878 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/ftp.py
|
wpull.processor.ftp.FTPProcessorSession
|
class FTPProcessorSession(BaseProcessorSession):
'''Fetches FTP files or directory listings.'''
def __init__(self, processor: FTPProcessor, item_session: ItemSession):
super().__init__()
self._processor = processor
self._item_session = item_session
self._fetch_rule = cast(FetchRule, item_session.app_session.factory['FetchRule'])
self._result_rule = cast(ResultRule, item_session.app_session.factory['ResultRule'])
file_writer = cast(BaseFileWriter, item_session.app_session.factory['FileWriter'])
self._file_writer_session = file_writer.session()
self._glob_pattern = None
def close(self):
pass
@asyncio.coroutine
def process(self):
'''Process.
Coroutine.
'''
self._item_session.request = request = Request(self._item_session.url_record.url)
verdict = self._fetch_rule.check_ftp_request(self._item_session)[0]
if not verdict:
self._item_session.skip()
return
self._add_request_password(request)
dir_name, filename = self._item_session.url_record.url_info.split_path()
if self._processor.fetch_params.glob and frozenset(filename) & GLOB_CHARS:
request = self._to_directory_request(request)
is_file = False
self._glob_pattern = urllib.parse.unquote(filename)
else:
is_file = yield from self._prepare_request_file_vs_dir(request)
self._file_writer_session.process_request(request)
wait_time = yield from self._fetch(request, is_file)
if wait_time:
_logger.debug('Sleeping {0}.', wait_time)
yield from asyncio.sleep(wait_time)
def _add_request_password(self, request: Request):
if self._fetch_rule.ftp_login:
request.username, request.password = self._fetch_rule.ftp_login
@classmethod
def _to_directory_request(cls, request: Request) -> Request:
directory_url = to_dir_path_url(request.url_info)
directory_request = copy.deepcopy(request)
directory_request.url = directory_url
return directory_request
@asyncio.coroutine
def _prepare_request_file_vs_dir(self, request: Request) -> bool:
'''Check if file, modify request, and return whether is a file.
Coroutine.
'''
if self._item_session.url_record.link_type:
is_file = self._item_session.url_record.link_type == LinkType.file
elif request.url_info.path.endswith('/'):
is_file = False
else:
is_file = 'unknown'
if is_file == 'unknown':
files = yield from self._fetch_parent_path(request)
if not files:
return True
filename = posixpath.basename(request.file_path)
for file_entry in files:
if file_entry.name == filename:
_logger.debug('Found entry in parent. Type {}',
file_entry.type)
is_file = file_entry.type != 'dir'
break
else:
_logger.debug('Did not find entry. Assume file.')
return True
if not is_file:
request.url = append_slash_to_path_url(request.url_info)
_logger.debug('Request URL changed to {}. Path={}.',
request.url, request.file_path)
return is_file
@asyncio.coroutine
def _fetch_parent_path(self, request: Request, use_cache: bool=True):
'''Fetch parent directory and return list FileEntry.
Coroutine.
'''
directory_url = to_dir_path_url(request.url_info)
if use_cache:
if directory_url in self._processor.listing_cache:
return self._processor.listing_cache[directory_url]
directory_request = copy.deepcopy(request)
directory_request.url = directory_url
_logger.debug('Check if URL {} is file with {}.', request.url,
directory_url)
with self._processor.ftp_client.session() as session:
try:
yield from session.start_listing(directory_request)
except FTPServerError:
_logger.debug('Got an error. Assume is file.')
if use_cache:
self._processor.listing_cache[directory_url] = None
return
temp_file = tempfile.NamedTemporaryFile(
dir=self._item_session.app_session.root_path,
prefix='tmp-wpull-list'
)
with temp_file as file:
directory_response = yield from session.download_listing(
file, duration_timeout=self._fetch_rule.duration_timeout)
if use_cache:
self._processor.listing_cache[directory_url] = \
directory_response.files
return directory_response.files
@asyncio.coroutine
def _fetch(self, request: Request, is_file: bool):
'''Fetch the request
Coroutine.
'''
_logger.info(_('Fetching ‘{url}’.'), url=request.url)
self._item_session.request = request
response = None
try:
with self._processor.ftp_client.session() as session:
if is_file:
response = yield from session.start(request)
else:
response = yield from session.start_listing(request)
self._item_session.response = response
action = self._result_rule.handle_pre_response(
self._item_session
)
if action in (Actions.RETRY, Actions.FINISH):
raise HookPreResponseBreak()
self._file_writer_session.process_response(response)
if not response.body:
response.body = Body(
directory=self._item_session.app_session.root_path,
hint='resp_cb')
duration_timeout = self._fetch_rule.duration_timeout
if is_file:
yield from session.download(
response.body, duration_timeout=duration_timeout)
else:
yield from session.download_listing(
response.body, duration_timeout=duration_timeout)
except HookPreResponseBreak:
if response:
response.body.close()
except REMOTE_ERRORS as error:
self._log_error(request, error)
self._result_rule.handle_error(self._item_session, error)
wait_time = self._result_rule.get_wait_time(
self._item_session, error=error
)
if response:
response.body.close()
return wait_time
else:
self._log_response(request, response)
self._handle_response(request, response)
wait_time = self._result_rule.get_wait_time(
self._item_session
)
if is_file and \
self._processor.fetch_params.preserve_permissions and \
hasattr(response.body, 'name'):
yield from self._apply_unix_permissions(request, response)
response.body.close()
return wait_time
def _add_listing_links(self, response: ListingResponse):
'''Add links from file listing response.'''
base_url = response.request.url_info.url
if self._glob_pattern:
level = self._item_session.url_record.level
else:
level = None
for file_entry in response.files:
if self._glob_pattern and \
not fnmatch.fnmatchcase(file_entry.name, self._glob_pattern):
continue
if file_entry.type == 'dir':
linked_url = urljoin_safe(base_url, file_entry.name + '/')
elif file_entry.type in ('file', 'symlink', None):
if not self._processor.fetch_params.retr_symlinks and \
file_entry.type == 'symlink':
self._make_symlink(file_entry.name, file_entry.dest)
linked_url = None
else:
linked_url = urljoin_safe(base_url, file_entry.name)
else:
linked_url = None
if linked_url:
linked_url_info = parse_url_or_log(linked_url)
if linked_url_info:
verdict = self._fetch_rule.check_ftp_request(self._item_session)[0]
if verdict:
if linked_url_info.path.endswith('/'):
self._item_session.add_child_url(linked_url_info.url, link_type=LinkType.directory)
else:
self._item_session.add_child_url(linked_url_info.url, link_type=LinkType.file, level=level)
def _log_response(self, request: Request, response: Response):
'''Log response.'''
_logger.info(
_('Fetched ‘{url}’: {reply_code} {reply_text}. '
'Length: {content_length}.'),
url=request.url,
reply_code=response.reply.code,
reply_text=response.reply.text,
content_length=response.body.size(),
)
def _handle_response(self, request: Request, response: Response):
'''Process a response.'''
self._item_session.update_record_value(status_code=response.reply.code)
is_listing = isinstance(response, ListingResponse)
if is_listing and not self._processor.fetch_params.remove_listing or \
not is_listing:
filename = self._file_writer_session.save_document(response)
action = self._result_rule.handle_document(self._item_session, filename)
else:
self._file_writer_session.discard_document(response)
action = self._result_rule.handle_no_document(self._item_session)
if isinstance(response, ListingResponse):
self._add_listing_links(response)
return action
def _make_symlink(self, link_name: str, link_target: str):
'''Make a symlink on the system.'''
path = self._file_writer_session.extra_resource_path('dummy')
if path:
dir_path = os.path.dirname(path)
symlink_path = os.path.join(dir_path, link_name)
_logger.debug('symlink {} -> {}', symlink_path, link_target)
os.symlink(link_target, symlink_path)
_logger.info(
_('Created symbolic link {symlink_path} to target {symlink_target}.'),
symlink_path=symlink_path,
symlink_target=link_target
)
@asyncio.coroutine
def _apply_unix_permissions(self, request: Request, response: Response):
'''Fetch and apply Unix permissions.
Coroutine.
'''
files = yield from self._fetch_parent_path(request)
if not files:
return
filename = posixpath.basename(request.file_path)
for file_entry in files:
if file_entry.name == filename and file_entry.perm:
_logger.debug(
'Set chmod {} o{:o}.',
response.body.name, file_entry.perm
)
os.chmod(response.body.name, file_entry.perm)
|
class FTPProcessorSession(BaseProcessorSession):
'''Fetches FTP files or directory listings.'''
def __init__(self, processor: FTPProcessor, item_session: ItemSession):
pass
def close(self):
pass
@asyncio.coroutine
def process(self):
'''Process.
Coroutine.
'''
pass
def _add_request_password(self, request: Request):
pass
@classmethod
def _to_directory_request(cls, request: Request) -> Request:
pass
@asyncio.coroutine
def _prepare_request_file_vs_dir(self, request: Request) -> bool:
'''Check if file, modify request, and return whether is a file.
Coroutine.
'''
pass
@asyncio.coroutine
def _fetch_parent_path(self, request: Request, use_cache: bool=True):
'''Fetch parent directory and return list FileEntry.
Coroutine.
'''
pass
@asyncio.coroutine
def _fetch_parent_path(self, request: Request, use_cache: bool=True):
'''Fetch the request
Coroutine.
'''
pass
def _add_listing_links(self, response: ListingResponse):
'''Add links from file listing response.'''
pass
def _log_response(self, request: Request, response: Response):
'''Log response.'''
pass
def _handle_response(self, request: Request, response: Response):
'''Process a response.'''
pass
def _make_symlink(self, link_name: str, link_target: str):
'''Make a symlink on the system.'''
pass
@asyncio.coroutine
def _apply_unix_permissions(self, request: Request, response: Response):
'''Fetch and apply Unix permissions.
Coroutine.
'''
pass
| 20 | 10 | 23 | 5 | 17 | 1 | 4 | 0.09 | 1 | 17 | 13 | 0 | 12 | 6 | 13 | 34 | 324 | 77 | 227 | 65 | 207 | 20 | 173 | 54 | 159 | 11 | 4 | 5 | 53 |
6,879 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/rule_test.py
|
wpull.processor.rule_test.TestFetchRule
|
class TestFetchRule(unittest.TestCase):
def get_fetch_rule(self):
url_filter = DemuxURLFilter([SchemeFilter()])
return FetchRule(url_filter=url_filter)
def test_consult_helix_fossil(self):
fetch_rule = self.get_fetch_rule()
fetch_rule.consult_helix_fossil()
def test_consult_filters(self):
fetch_rule = self.get_fetch_rule()
url_info = URLInfo.parse('http://example.com')
url_record = new_mock_url_record()
verdict, reason, test_info = fetch_rule.consult_filters(url_info, url_record)
self.assertTrue(verdict)
self.assertEqual('filters', reason)
def test_is_only_span_hosts_failed(self):
info = {
'verdict': True,
'passed': ('SpanHostsFilter', 'SchemeFilter'),
'failed': (),
'map': {
'SpanHostsFilter': True,
'SchemeFilter': True
},
}
self.assertFalse(FetchRule.is_only_span_hosts_failed(info))
info = {
'verdict': False,
'passed': ('SchemeFilter',),
'failed': ('SpanHostsFilter',),
'map': {
'SpanHostsFilter': False,
'SchemeFilter': True
},
}
self.assertTrue(FetchRule.is_only_span_hosts_failed(info))
|
class TestFetchRule(unittest.TestCase):
def get_fetch_rule(self):
pass
def test_consult_helix_fossil(self):
pass
def test_consult_filters(self):
pass
def test_is_only_span_hosts_failed(self):
pass
| 5 | 0 | 10 | 2 | 9 | 0 | 1 | 0 | 1 | 4 | 4 | 0 | 4 | 0 | 4 | 76 | 44 | 9 | 35 | 12 | 30 | 0 | 19 | 12 | 14 | 1 | 2 | 0 | 4 |
6,880 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/rule_test.py
|
wpull.processor.rule_test.TestProcessingRule
|
class TestProcessingRule(unittest.TestCase):
def test_parse_url_no_crash(self):
self.assertTrue(
ProcessingRule.parse_url('http://example.com')
)
self.assertFalse(
ProcessingRule.parse_url('http://')
)
self.assertFalse(
ProcessingRule.parse_url('')
)
self.assertFalse(
ProcessingRule.parse_url('.xn--hda.com/')
)
|
class TestProcessingRule(unittest.TestCase):
def test_parse_url_no_crash(self):
pass
| 2 | 0 | 13 | 0 | 13 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 73 | 14 | 0 | 14 | 2 | 12 | 0 | 6 | 2 | 4 | 1 | 2 | 0 | 1 |
6,881 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/rule_test.py
|
wpull.processor.rule_test.TestResultRule
|
class TestResultRule(unittest.TestCase):
def get_result_rule(self):
return ResultRule()
def test_handle_response(self):
result_rule = self.get_result_rule()
item_session = new_mock_item_session()
action = result_rule.handle_response(item_session)
self.assertEqual(Actions.NORMAL, action)
|
class TestResultRule(unittest.TestCase):
def get_result_rule(self):
pass
def test_handle_response(self):
pass
| 3 | 0 | 5 | 1 | 4 | 0 | 1 | 0 | 1 | 2 | 2 | 0 | 2 | 0 | 2 | 74 | 11 | 3 | 8 | 6 | 5 | 0 | 8 | 6 | 5 | 1 | 2 | 0 | 2 |
6,882 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/web.py
|
wpull.processor.web.HookPreResponseBreak
|
class HookPreResponseBreak(ProtocolError):
'''Hook pre-response break.'''
|
class HookPreResponseBreak(ProtocolError):
'''Hook pre-response break.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 5 | 0 | 0 |
6,883 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/web.py
|
wpull.processor.web.WebProcessor
|
class WebProcessor(BaseProcessor, HookableMixin):
'''HTTP processor.
Args:
web_client: The web client.
fetch_params: Fetch parameters
.. seealso:: :class:`WebProcessorSession`
'''
DOCUMENT_STATUS_CODES = (200, 204, 206, 304,)
'''Default status codes considered successfully fetching a document.'''
NO_DOCUMENT_STATUS_CODES = (401, 403, 404, 405, 410,)
'''Default status codes considered a permanent error.'''
def __init__(self, web_client: WebClient, fetch_params: WebProcessorFetchParams):
super().__init__()
self._web_client = web_client
self._fetch_params = fetch_params
self._session_class = WebProcessorSession
@property
def web_client(self) -> WebClient:
'''The web client.'''
return self._web_client
@property
def fetch_params(self) -> WebProcessorFetchParams:
'''The fetch parameters.'''
return self._fetch_params
@asyncio.coroutine
def process(self, item_session: ItemSession):
session = self._session_class(self, item_session)
try:
return (yield from session.process())
finally:
session.close()
def close(self):
'''Close the web client.'''
self._web_client.close()
|
class WebProcessor(BaseProcessor, HookableMixin):
'''HTTP processor.
Args:
web_client: The web client.
fetch_params: Fetch parameters
.. seealso:: :class:`WebProcessorSession`
'''
def __init__(self, web_client: WebClient, fetch_params: WebProcessorFetchParams):
pass
@property
def web_client(self) -> WebClient:
'''The web client.'''
pass
@property
def fetch_params(self) -> WebProcessorFetchParams:
'''The fetch parameters.'''
pass
@asyncio.coroutine
def process(self, item_session: ItemSession):
pass
def close(self):
'''Close the web client.'''
pass
| 9 | 4 | 4 | 0 | 3 | 1 | 1 | 0.48 | 2 | 4 | 3 | 0 | 5 | 3 | 5 | 29 | 43 | 9 | 23 | 15 | 14 | 11 | 19 | 12 | 13 | 1 | 4 | 1 | 5 |
6,884 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/web.py
|
wpull.processor.web.WebProcessorSession
|
class WebProcessorSession(BaseProcessorSession):
'''Fetches an HTTP document.
This Processor Session will handle document redirects within the same
Session. HTTP errors such as 404 are considered permanent errors.
HTTP errors like 500 are considered transient errors and are handled in
subsequence sessions by marking the item as "error".
If a successful document has been downloaded, it will be scraped for
URLs to be added to the URL table. This Processor Session is very simple;
it cannot handle JavaScript or Flash plugins.
'''
def __init__(self, processor: WebProcessor, item_session: ItemSession):
super().__init__()
self._processor = processor
self._item_session = item_session
file_writer = cast(BaseFileWriter, item_session.app_session.factory['FileWriter'])
self._file_writer_session = file_writer.session()
self._web_client_session = None
self._document_codes = WebProcessor.DOCUMENT_STATUS_CODES
self._no_document_codes = WebProcessor.NO_DOCUMENT_STATUS_CODES
self._temp_files = set()
self._fetch_rule = cast(FetchRule, item_session.app_session.factory['FetchRule'])
self._result_rule = cast(ResultRule, item_session.app_session.factory['ResultRule'])
self._processing_rule = cast(ProcessingRule, item_session.app_session.factory['ProcessingRule'])
self._strong_redirects = self._processor.fetch_params.strong_redirects
def _new_initial_request(self, with_body: bool=True):
'''Return a new Request to be passed to the Web Client.'''
url_record = self._item_session.url_record
url_info = url_record.url_info
request = self._item_session.app_session.factory['WebClient'].request_factory(url_info.url)
self._populate_common_request(request)
if with_body:
if url_record.post_data or self._processor.fetch_params.post_data:
self._add_post_data(request)
if self._file_writer_session:
request = self._file_writer_session.process_request(request)
return request
def _populate_common_request(self, request):
'''Populate the Request with common fields.'''
url_record = self._item_session.url_record
# Note that referrer may have already been set by the --referer option
if url_record.parent_url and not request.fields.get('Referer'):
self._add_referrer(request, url_record)
if self._fetch_rule.http_login:
request.username, request.password = self._fetch_rule.http_login
@classmethod
def _add_referrer(cls, request: Request, url_record: URLRecord):
'''Add referrer URL to request.'''
# Prohibit leak of referrer from HTTPS to HTTP
# rfc7231 section 5.5.2.
if url_record.parent_url.startswith('https://') and \
url_record.url_info.scheme == 'http':
return
request.fields['Referer'] = url_record.parent_url
@asyncio.coroutine
def process(self):
ok = yield from self._process_robots()
if not ok:
return
self._processing_rule.add_extra_urls(self._item_session)
self._web_client_session = self._processor.web_client.session(
self._new_initial_request()
)
with self._web_client_session:
yield from self._process_loop()
if not self._item_session.is_processed:
_logger.debug('Was not processed. Skipping.')
self._item_session.skip()
@asyncio.coroutine
def _process_robots(self):
'''Process robots.txt.
Coroutine.
'''
try:
self._item_session.request = request = self._new_initial_request(with_body=False)
verdict, reason = (yield from self._should_fetch_reason_with_robots(
request))
except REMOTE_ERRORS as error:
_logger.error(
_('Fetching robots.txt for ‘{url}’ '
'encountered an error: {error}'),
url=self._next_url_info.url, error=error
)
self._result_rule.handle_error(self._item_session, error)
wait_time = self._result_rule.get_wait_time(
self._item_session, error=error
)
if wait_time:
_logger.debug('Sleeping {0}.', wait_time)
yield from asyncio.sleep(wait_time)
return False
else:
_logger.debug('Robots filter verdict {} reason {}', verdict, reason)
if not verdict:
self._item_session.skip()
return False
return True
@asyncio.coroutine
def _process_loop(self):
'''Fetch URL including redirects.
Coroutine.
'''
while not self._web_client_session.done():
self._item_session.request = self._web_client_session.next_request()
verdict, reason = self._should_fetch_reason()
_logger.debug('Filter verdict {} reason {}', verdict, reason)
if not verdict:
self._item_session.skip()
break
exit_early, wait_time = yield from self._fetch_one(cast(Request, self._item_session.request))
if wait_time:
_logger.debug('Sleeping {}', wait_time)
yield from asyncio.sleep(wait_time)
if exit_early:
break
@asyncio.coroutine
def _fetch_one(self, request: Request) -> Tuple[bool, float]:
'''Process one of the loop iteration.
Coroutine.
Returns:
If True, stop processing any future requests.
'''
_logger.info(_('Fetching ‘{url}’.'), url=request.url)
response = None
try:
response = yield from self._web_client_session.start()
self._item_session.response = response
action = self._result_rule.handle_pre_response(self._item_session)
if action in (Actions.RETRY, Actions.FINISH):
raise HookPreResponseBreak()
self._file_writer_session.process_response(response)
if not response.body:
response.body = Body(
directory=self._item_session.app_session.root_path,
hint='resp_cb'
)
yield from \
self._web_client_session.download(
file=response.body,
duration_timeout=self._fetch_rule.duration_timeout
)
except HookPreResponseBreak:
_logger.debug('Hook pre-response break.')
return True, None
except REMOTE_ERRORS as error:
self._log_error(request, error)
self._result_rule.handle_error(self._item_session, error)
wait_time = self._result_rule.get_wait_time(
self._item_session, error=error
)
if request.body:
request.body.close()
if response:
response.body.close()
return True, wait_time
else:
self._log_response(request, response)
action = self._handle_response(request, response)
wait_time = self._result_rule.get_wait_time(self._item_session)
yield from self._run_coprocessors(request, response)
response.body.close()
if request.body:
request.body.close()
return action != Actions.NORMAL, wait_time
def close(self):
'''Close any temp files.'''
for file in self._temp_files:
file.close()
@property
def _next_url_info(self) -> URLInfo:
'''Return the next URLInfo to be processed.
This returns either the original URLInfo or the next URLinfo
containing the redirect link.
'''
if not self._web_client_session:
return self._item_session.url_record.url_info
return self._web_client_session.next_request().url_info
def _should_fetch_reason(self) -> Tuple[bool, str]:
'''Return info about whether the URL should be fetched.
Returns:
tuple: A two item tuple:
1. bool: If True, the URL should be fetched.
2. str: A short reason string explaining the verdict.
'''
is_redirect = False
if self._strong_redirects:
try:
is_redirect = self._web_client_session.redirect_tracker\
.is_redirect()
except AttributeError:
pass
return self._fetch_rule.check_subsequent_web_request(
self._item_session, is_redirect=is_redirect)
@asyncio.coroutine
def _should_fetch_reason_with_robots(self, request: Request) -> Tuple[bool, str]:
'''Return info whether the URL should be fetched including checking
robots.txt.
Coroutine.
'''
result = yield from \
self._fetch_rule.check_initial_web_request(self._item_session, request)
return result
def _add_post_data(self, request: Request):
'''Add data to the payload.'''
if self._item_session.url_record.post_data:
data = wpull.string.to_bytes(self._item_session.url_record.post_data)
else:
data = wpull.string.to_bytes(
self._processor.fetch_params.post_data
)
request.method = 'POST'
request.fields['Content-Type'] = 'application/x-www-form-urlencoded'
request.fields['Content-Length'] = str(len(data))
_logger.debug('Posting with data {0}.', data)
if not request.body:
request.body = Body(io.BytesIO())
with wpull.util.reset_file_offset(request.body):
request.body.write(data)
def _log_response(self, request: Request, response: Response):
'''Log response.'''
_logger.info(
_('Fetched ‘{url}’: {status_code} {reason}. '
'Length: {content_length} [{content_type}].'),
url=request.url,
status_code=response.status_code,
reason=wpull.string.printable_str(response.reason),
content_length=wpull.string.printable_str(
response.fields.get('Content-Length', _('unspecified'))),
content_type=wpull.string.printable_str(
response.fields.get('Content-Type', _('unspecified'))),
)
def _handle_response(self, request: Request, response: Response) -> Actions:
'''Process the response.
Returns:
A value from :class:`.hook.Actions`.
'''
self._item_session.update_record_value(status_code=response.status_code)
if self._web_client_session.redirect_tracker.is_redirect() or \
self._web_client_session.loop_type() == LoopType.authentication:
self._file_writer_session.discard_document(response)
return self._result_rule.handle_intermediate_response(
self._item_session
)
elif (response.status_code in self._document_codes
or self._processor.fetch_params.content_on_error):
filename = self._file_writer_session.save_document(response)
self._processing_rule.scrape_document(self._item_session)
return self._result_rule.handle_document(
self._item_session, filename
)
elif response.status_code in self._no_document_codes:
self._file_writer_session.discard_document(response)
return self._result_rule.handle_no_document(
self._item_session
)
else:
self._file_writer_session.discard_document(response)
return self._result_rule.handle_document_error(
self._item_session
)
def _close_instance_body(self, instance):
'''Close any files on instance.
This function will attempt to call ``body.close`` on
the instance.
'''
if hasattr(instance, 'body'):
instance.body.close()
def _run_coprocessors(self, request: Request, response: Response):
phantomjs_coprocessor = self._item_session.app_session.factory.get('PhantomJSCoprocessor')
if phantomjs_coprocessor:
phantomjs_coprocessor = cast(PhantomJSCoprocessor, phantomjs_coprocessor)
yield from phantomjs_coprocessor.process(
self._item_session, request, response, self._file_writer_session
)
youtube_dl_coprocessor = self._item_session.app_session.factory.get('YoutubeDlCoprocessor')
if youtube_dl_coprocessor:
youtube_dl_coprocessor = cast(YoutubeDlCoprocessor, youtube_dl_coprocessor)
yield from youtube_dl_coprocessor.process(
self._item_session, request, response, self._file_writer_session
)
|
class WebProcessorSession(BaseProcessorSession):
'''Fetches an HTTP document.
This Processor Session will handle document redirects within the same
Session. HTTP errors such as 404 are considered permanent errors.
HTTP errors like 500 are considered transient errors and are handled in
subsequence sessions by marking the item as "error".
If a successful document has been downloaded, it will be scraped for
URLs to be added to the URL table. This Processor Session is very simple;
it cannot handle JavaScript or Flash plugins.
'''
def __init__(self, processor: WebProcessor, item_session: ItemSession):
pass
def _new_initial_request(self, with_body: bool=True):
'''Return a new Request to be passed to the Web Client.'''
pass
def _populate_common_request(self, request):
'''Populate the Request with common fields.'''
pass
@classmethod
def _add_referrer(cls, request: Request, url_record: URLRecord):
'''Add referrer URL to request.'''
pass
@asyncio.coroutine
def process(self):
pass
@asyncio.coroutine
def _process_robots(self):
'''Process robots.txt.
Coroutine.
'''
pass
@asyncio.coroutine
def _process_loop(self):
'''Fetch URL including redirects.
Coroutine.
'''
pass
@asyncio.coroutine
def _fetch_one(self, request: Request) -> Tuple[bool, float]:
'''Process one of the loop iteration.
Coroutine.
Returns:
If True, stop processing any future requests.
'''
pass
def close(self):
'''Close any temp files.'''
pass
@property
def _next_url_info(self) -> URLInfo:
'''Return the next URLInfo to be processed.
This returns either the original URLInfo or the next URLinfo
containing the redirect link.
'''
pass
def _should_fetch_reason(self) -> Tuple[bool, str]:
'''Return info about whether the URL should be fetched.
Returns:
tuple: A two item tuple:
1. bool: If True, the URL should be fetched.
2. str: A short reason string explaining the verdict.
'''
pass
@asyncio.coroutine
def _should_fetch_reason_with_robots(self, request: Request) -> Tuple[bool, str]:
'''Return info whether the URL should be fetched including checking
robots.txt.
Coroutine.
'''
pass
def _add_post_data(self, request: Request):
'''Add data to the payload.'''
pass
def _log_response(self, request: Request, response: Response):
'''Log response.'''
pass
def _handle_response(self, request: Request, response: Response) -> Actions:
'''Process the response.
Returns:
A value from :class:`.hook.Actions`.
'''
pass
def _close_instance_body(self, instance):
'''Close any files on instance.
This function will attempt to call ``body.close`` on
the instance.
'''
pass
def _run_coprocessors(self, request: Request, response: Response):
pass
| 25 | 15 | 20 | 4 | 13 | 2 | 3 | 0.22 | 1 | 22 | 16 | 0 | 16 | 11 | 17 | 38 | 367 | 88 | 228 | 59 | 203 | 51 | 169 | 49 | 151 | 8 | 4 | 2 | 51 |
6,885 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/processor/ftp_test.py
|
wpull.processor.ftp_test.TestFTP
|
class TestFTP(unittest.TestCase):
def test_to_dir_path_url(self):
self.assertEqual(
'ftp://putfile.com/',
to_dir_path_url(URLInfo.parse('ftp://putfile.com/'))
)
self.assertEqual(
'ftp://putfile.com/',
to_dir_path_url(URLInfo.parse('ftp://putfile.com/asdf'))
)
self.assertEqual(
'ftp://putfile.com/asdf/',
to_dir_path_url(URLInfo.parse('ftp://putfile.com/asdf/qwer'))
)
def test_append_slash_to_path_url(self):
self.assertEqual(
'ftp://putfile.com/example/',
append_slash_to_path_url(
URLInfo.parse('ftp://putfile.com/example')
)
)
|
class TestFTP(unittest.TestCase):
def test_to_dir_path_url(self):
pass
def test_append_slash_to_path_url(self):
pass
| 3 | 0 | 10 | 0 | 10 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 0 | 2 | 74 | 22 | 1 | 21 | 3 | 18 | 0 | 7 | 3 | 4 | 1 | 2 | 0 | 2 |
6,886 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament_fs/__init__.py
|
ligament_fs.Copy
|
class Copy(BuildTarget):
def __init__(self, filemap, **kwargs):
BuildTarget.__init__(self, **kwargs)
self.filemap = filemap
def build(self, template_scripts="", template_styles=""):
for key in self.filemap:
targets = glob.glob(key)
if len(targets) == 1 and key == targets[0]:
# if it's not a glob, copy to specified filename
mkdir_recursive(
os.path.dirname(self.filemap[key]))
self._copyfile_(targets[0], self.filemap[key])
else:
# otherwise, copy it to the folder
mkdir_recursive(self.filemap[key])
for f in targets:
self._copyfile_(f, self.filemap[key])
def _copyfile_(self, src, dest):
pdebug("[copy::%s] %s -> %s" %(
self.name,
os.path.relpath(src),
os.path.relpath(dest)), groups=["build_task"])
if os.path.isdir(src):
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
else:
if os.path.isdir(dest):
bn = os.path.basename(src)
dest = os.path.join(dest, bn)
shutil.copyfile(src, dest)
|
class Copy(BuildTarget):
def __init__(self, filemap, **kwargs):
pass
def build(self, template_scripts="", template_styles=""):
pass
def _copyfile_(self, src, dest):
pass
| 4 | 0 | 11 | 1 | 9 | 1 | 3 | 0.07 | 1 | 0 | 0 | 0 | 3 | 2 | 3 | 11 | 38 | 7 | 29 | 10 | 25 | 2 | 23 | 9 | 19 | 4 | 2 | 3 | 9 |
6,887 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament/exceptions.py
|
ligament.exceptions.TaskExecutionException
|
class TaskExecutionException(Exception):
def __init__(self, header, *args, **vargs):
""" header : str
a short description of the error message
(kwarg) payload : (any)
A value to return in place of a normal return value
"""
self.header = header
"""the header for the error (short error message)"""
if "payload" in vargs:
self.payload = vargs["payload"]
del vargs["payload"]
else:
self.payload = Nones
"""The value this exception should default to"""
Exception.__init__(self, *args, **vargs)
|
class TaskExecutionException(Exception):
def __init__(self, header, *args, **vargs):
''' header : str
a short description of the error message
(kwarg) payload : (any)
A value to return in place of a normal return value
'''
pass
| 2 | 1 | 19 | 4 | 8 | 7 | 2 | 0.78 | 1 | 0 | 0 | 0 | 1 | 2 | 1 | 11 | 21 | 5 | 9 | 4 | 7 | 7 | 8 | 4 | 6 | 2 | 3 | 1 | 2 |
6,888 |
Archived-Object/ligament
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Archived-Object_ligament/ligament_precompiler_template/__init__.py
|
ligament_precompiler_template.Precompiler
|
class Precompiler(BuildTarget):
""" A reusable template for a precompiler task
classes that extend Precompiler must do the following at minimum:
declare external_template_string to a template string with a single
%s, where the value of the compiled filename will be placed
declare embed_template_string to a template string with a single
%s, where the compiled file's text will be placed
declare out_path_of
declare compile_file
"""
external_template_string = None
embed_template_string = None
def __init__(self,
minify=True,
embed=True,
concat=True,
source_dir=None,
target_dir=None,
build_targets=[],
relative_directory="./",
external_template_string=None,
embed_template_string=None,
**kwargs):
BuildTarget.__init__(self, **kwargs)
self.relative_directory = relative_directory
self.input_directory = os.path.abspath(source_dir)
self.output_directory = os.path.abspath(target_dir)
self.compiler_name = "???"
pdebug(self.input_directory)
pdebug(self.output_directory)
self.build_targets = [os.path.abspath(
os.path.join(
self.input_directory,
target))
for target in build_targets]
self.file_watch_targets = self.build_targets
if embed_template_string:
self.embed_template_string = embed_template_string
if external_template_string:
self.external_template_string = external_template_string
self.minify = minify
self.embed = embed
self.concat = concat
def out_path_of(self, in_path):
"""given the input path of a file, return the ouput path"""
raise Exception("Precompiler out_path_of not implemented!")
def compile_file(self, path):
"""given the path of a file, compile it and return the result"""
raise Exception("Precompiler compile_file not implemented!")
@capture_exception
@zip_with_output(skip_args=[0])
def compile_and_process(self, in_path):
"""compile a file, save it to the ouput file if the inline flag true"""
out_path = self.path_mapping[in_path]
if not self.embed:
pdebug("[%s::%s] %s -> %s" % (
self.compiler_name,
self.name,
os.path.relpath(in_path),
os.path.relpath(out_path)),
groups=["build_task"],
autobreak=True)
else:
pdebug("[%s::%s] %s -> <cache>" % (
self.compiler_name,
self.name,
os.path.relpath(in_path)),
groups=["build_task"],
autobreak=True)
compiled_string = self.compile_file(in_path)
if not self.embed:
if compiled_string != "":
with open(out_path, "w") as f:
f.write(compiled_string)
return compiled_string
def collect_output(self):
""" helper function to gather the results of `compile_and_process` on
all target files
"""
if self.embed:
if self.concat:
concat_scripts = [self.compiled_scripts[path]
for path in self.build_order]
return [self.embed_template_string % '\n'.join(concat_scripts)]
else:
return [self.embed_template_string %
self.compiled_scripts[path]
for path in self.build_order]
else:
return [self.external_template_string %
os.path.join(
self.relative_directory,
os.path.relpath(
self.out_path_of(path),
self.output_directory))
for path in self.build_order
if self.compiled_scripts[path] != ""]
def build(self):
"""build the scripts and return a string"""
if not self.embed:
mkdir_recursive(self.output_directory)
# get list of script files in build order
self.build_order = remove_dups(
reduce(lambda a, b: a + glob.glob(b),
self.build_targets,
[]))
self.build_order_output = [self.out_path_of(t)
for (t) in self.build_order]
self.path_mapping = dict(zip(
self.build_order,
self.build_order_output))
self.compiled_scripts = {}
exceptions, values = partition(
lambda x: isinstance(x, Exception),
[self.compile_and_process(target)
for target in self.build_order])
self.compiled_scripts.update(dict(values))
saneExceptions, insaneExceptions = partition(
lambda x: isinstance(x, TaskExecutionException),
exceptions)
if len(insaneExceptions) != 0:
raise insaneExceptions[0]
if len(exceptions) != 0:
raise TaskExecutionException(
"Precompiler Errors (%s):" % type(self).__name__,
"\n".join([
x.header + "\n " +
x.message.replace("\n", "\n ")
for x in exceptions]))
return self.collect_output()
def update_build(self, updated_files):
""" updates a build based on updated files
TODO implement this pls
"""
for f in updated_files:
self.compiled_scripts[f] = self.compile_and_process(f)
return self.collect_output()
|
class Precompiler(BuildTarget):
''' A reusable template for a precompiler task
classes that extend Precompiler must do the following at minimum:
declare external_template_string to a template string with a single
%s, where the value of the compiled filename will be placed
declare embed_template_string to a template string with a single
%s, where the compiled file's text will be placed
declare out_path_of
declare compile_file
'''
def __init__(self,
minify=True,
embed=True,
concat=True,
source_dir=None,
target_dir=None,
build_targets=[],
relative_directory="./",
external_template_string=None,
embed_template_string=None,
**kwargs):
pass
def out_path_of(self, in_path):
'''given the input path of a file, return the ouput path'''
pass
def compile_file(self, path):
'''given the path of a file, compile it and return the result'''
pass
@capture_exception
@zip_with_output(skip_args=[0])
def compile_and_process(self, in_path):
'''compile a file, save it to the ouput file if the inline flag true'''
pass
def collect_output(self):
''' helper function to gather the results of `compile_and_process` on
all target files
'''
pass
def build(self):
'''build the scripts and return a string'''
pass
def update_build(self, updated_files):
''' updates a build based on updated files
TODO implement this pls
'''
pass
| 10 | 7 | 21 | 3 | 16 | 2 | 3 | 0.17 | 1 | 5 | 1 | 0 | 7 | 14 | 7 | 15 | 174 | 37 | 117 | 42 | 97 | 20 | 61 | 29 | 53 | 4 | 2 | 3 | 18 |
6,889 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament/compositors.py
|
ligament.compositors.BuildTargetList
|
class BuildTargetList(BuildTargetFn):
"""An empty build target meant only to depend on other targets"""
def __init__(self, tasklist):
BuildTargetFn.__init__(self, lambda *args: args, *tasklist)
|
class BuildTargetList(BuildTargetFn):
'''An empty build target meant only to depend on other targets'''
def __init__(self, tasklist):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 11 | 5 | 1 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
6,890 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament/buildcontext.py
|
ligament.buildcontext.Context
|
class Context(object):
""" A sandboxed area that manages a set of build tasks and their
dependencies
"""
tasks = {}
""" A dict of ContextEntries by task name """
def register_task(self, name, task):
if name not in self.tasks:
self.tasks[name] = ContextEntry(name, task)
elif not self.tasks[name].task:
self.tasks[name].task = task
else:
perror("tried to register duplicate tasks under name \"%s\"" %
(name))
def _gettask(self, name):
if name not in self.tasks:
self.tasks[name] = ContextEntry(name)
return self.tasks[name]
def register_dependency(self, data_src, data_sink):
""" registers a dependency of data_src -> data_sink
by placing appropriate entries in provides_for and depends_on
"""
pdebug("registering dependency %s -> %s" % (data_src, data_sink))
if (data_src not in self._gettask(data_sink).depends_on):
self._gettask(data_sink).depends_on.append(data_src)
if (data_sink not in self._gettask(data_src).provides_for):
self._gettask(data_src).provides_for.append(data_sink)
def build_task(self, name):
""" Builds a task by name, resolving any dependencies on the way """
try:
self._gettask(name).value = (
self._gettask(name).task.resolve_and_build())
except TaskExecutionException as e:
perror(e.header, indent="+0")
perror(e.message, indent="+4")
self._gettask(name).value = e.payload
except Exception as e:
perror("error evaluating target '%s' %s" %
(name, type(self._gettask(name).task)))
perror(traceback.format_exc(e), indent='+4')
self._gettask(name).value = None
self._gettask(name).last_build_time = time.time()
def is_build_needed(self, data_sink, data_src):
""" returns true if data_src needs to be rebuilt, given that data_sink
has had a rebuild requested.
"""
return (self._gettask(data_src).last_build_time == 0 or
self._gettask(data_src).last_build_time <
self._gettask(data_sink).last_build_time)
def verify_valid_dependencies(self):
""" Checks if the assigned dependencies are valid
valid dependency graphs are:
- noncyclic (i.e. no `A -> B -> ... -> A`)
- Contain no undefined dependencies
(dependencies referencing undefined tasks)
"""
unobserved_dependencies = set(self.tasks.keys())
target_queue = []
while len(unobserved_dependencies) > 0:
target_queue = [unobserved_dependencies.pop()]
while target_queue is not []:
target_queue += unobserved_dependencies
# verify_provides_depends_match()
def deep_dependendants(self, target):
""" Recursively finds the dependents of a given build target.
Assumes the dependency graph is noncyclic
"""
direct_dependents = self._gettask(target).provides_for
return (direct_dependents +
reduce(
lambda a, b: a + b,
[self.deep_dependendants(x) for x in direct_dependents],
[]))
def resolve_dependency_graph(self, target):
""" resolves the build order for interdependent build targets
Assumes no cyclic dependencies
"""
targets = self.deep_dependendants(target)
# print "deep dependants:", targets
return sorted(targets,
cmp=lambda a, b:
1 if b in self.deep_dependendants(a) else
-1 if a in self.deep_dependendants(b) else
0)
def update_task(self, taskname, ignore_dependents=[]):
pout("updating task %s" % taskname)
last_value = self._gettask(taskname).value
self.build_task(taskname)
if last_value != self._gettask(taskname).value:
dependent_order = self.resolve_dependency_graph(taskname)
for index, dependent in enumerate(dependent_order):
if (dependent not in ignore_dependents and
self.tasks[dependent].last_build_time > 0):
self.update_task(
dependent,
ignore_dependents=dependent_order[index:])
else:
pdebug("no change in %s" % taskname)
def lock_task(self, name):
pass
def unlock_task(self, name):
pass
def expose_task(self, name):
self.tasks[name].exposed = True
|
class Context(object):
''' A sandboxed area that manages a set of build tasks and their
dependencies
'''
def register_task(self, name, task):
pass
def _gettask(self, name):
pass
def register_dependency(self, data_src, data_sink):
''' registers a dependency of data_src -> data_sink
by placing appropriate entries in provides_for and depends_on
'''
pass
def build_task(self, name):
''' Builds a task by name, resolving any dependencies on the way '''
pass
def is_build_needed(self, data_sink, data_src):
''' returns true if data_src needs to be rebuilt, given that data_sink
has had a rebuild requested.
'''
pass
def verify_valid_dependencies(self):
''' Checks if the assigned dependencies are valid
valid dependency graphs are:
- noncyclic (i.e. no `A -> B -> ... -> A`)
- Contain no undefined dependencies
(dependencies referencing undefined tasks)
'''
pass
def deep_dependendants(self, target):
''' Recursively finds the dependents of a given build target.
Assumes the dependency graph is noncyclic
'''
pass
def resolve_dependency_graph(self, target):
''' resolves the build order for interdependent build targets
Assumes no cyclic dependencies
'''
pass
def update_task(self, taskname, ignore_dependents=[]):
pass
def lock_task(self, name):
pass
def unlock_task(self, name):
pass
def expose_task(self, name):
pass
| 13 | 7 | 9 | 1 | 6 | 2 | 2 | 0.32 | 1 | 6 | 2 | 0 | 12 | 0 | 12 | 12 | 130 | 26 | 79 | 22 | 66 | 25 | 60 | 21 | 47 | 4 | 1 | 3 | 24 |
6,891 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament/buildcontext.py
|
ligament.buildcontext.ContextEntry
|
class ContextEntry(object):
""" A 'dumb' object holding metadata for a given build task """
def __init__(self, name, task=None):
self.name = name
self.task = task
self.last_build_time = 0
self.depends_on = []
self.provides_for = []
self.value = None
self.exposed = False
def __str__(self):
return json.dumps(dict([(key, str(self.__dict__[key]))
for key in filter(
lambda s: not s.startswith("_"),
self.__dict__)]),
indent=2)
|
class ContextEntry(object):
''' A 'dumb' object holding metadata for a given build task '''
def __init__(self, name, task=None):
pass
def __str__(self):
pass
| 3 | 1 | 7 | 0 | 7 | 0 | 1 | 0.07 | 1 | 3 | 0 | 0 | 2 | 8 | 2 | 2 | 18 | 2 | 15 | 11 | 12 | 1 | 11 | 10 | 8 | 1 | 1 | 0 | 2 |
6,892 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament/buildcontext.py
|
ligament.buildcontext.DeferredDependency
|
class DeferredDependency(object):
def __init__(self, *target_names, **kw):
# python2 2.7 workaround for kwargs after target_names
self.function = kw.get(
'function',
lambda **k: k.values()[0] if len(k) == 1 else k)
"""A kwarg function to be called on the results of all the dependencies
"""
self.keyword_chain = kw.get('keyword_chain', [])
"""The chain of attribute accesses on each target"""
self.parent = None
"""The name of the object this dependency provides for.
(Assigned in BuildTarget.register_with_context)"""
self.context = None
"""The context this dependency operates in"""
self.target_names = target_names
"""The name of buildtargets this DeferredDependency operates on"""
def resolve(self):
"""Builds all targets of this dependency and returns the result
of self.function on the resulting values
"""
values = {}
for target_name in self.target_names:
if self.context.is_build_needed(self.parent, target_name):
self.context.build_task(target_name)
if len(self.keyword_chain) == 0:
values[target_name] = self.context.tasks[target_name].value
else:
values[target_name] = reduce(
lambda task, name: getattr(task, name),
self.keyword_chain,
self.context.tasks[target_name].task)
return self.function(**values)
def get_context(self):
return self.context
def get_parent(self):
return self.context
def __getattr__(self, name):
return DeferredDependency(
*self.target_names,
function=self.function,
keyword_chain=(self.keyword_chain + [name]))
|
class DeferredDependency(object):
def __init__(self, *target_names, **kw):
pass
def resolve(self):
'''Builds all targets of this dependency and returns the result
of self.function on the resulting values
'''
pass
def get_context(self):
pass
def get_parent(self):
pass
def __getattr__(self, name):
pass
| 6 | 1 | 9 | 1 | 6 | 2 | 2 | 0.35 | 1 | 0 | 0 | 0 | 5 | 5 | 5 | 5 | 52 | 10 | 31 | 13 | 25 | 11 | 22 | 13 | 16 | 4 | 1 | 2 | 8 |
6,893 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament/compositors.py
|
ligament.compositors.BuildTargetMap
|
class BuildTargetMap(BuildTarget):
"""Folds a function over the results of a set of build targets"""
def __init__(self, task, binding={}):
self.task = task;
BuildTarget.__init__(self, binding)
def register_with_context(self, myname, context):
BuildTarget.register_with_context(self, myname, context)
self.task.name = myname + ".mapped_task"
def build(self, **binding):
# print [(key, type(val), len(val) if isinstance(val, list) else "")
# for key, val in binding.iteritems()]
minlen = -1
for name, values in binding.iteritems():
if isinstance(values, list):
if minlen == -1:
minlen = len(values)
else:
minlen = min(minlen, len(values))
outputs = []
for index in range(0, minlen):
calling_args = {}
for name, values in binding.iteritems():
calling_args[name] = (values[index]
if isinstance(values, list)
else values)
outputs.append(self.task.build(**calling_args))
return outputs
|
class BuildTargetMap(BuildTarget):
'''Folds a function over the results of a set of build targets'''
def __init__(self, task, binding={}):
pass
def register_with_context(self, myname, context):
pass
def build(self, **binding):
pass
| 4 | 1 | 10 | 2 | 8 | 1 | 3 | 0.13 | 1 | 2 | 0 | 0 | 3 | 1 | 3 | 11 | 36 | 9 | 24 | 10 | 20 | 3 | 21 | 10 | 17 | 7 | 2 | 3 | 9 |
6,894 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament/compositors.py
|
ligament.compositors.BuildTargetFold
|
class BuildTargetFold(BuildTarget):
"""Folds a function over the results of a set of build targets"""
def __init__(self, foldi, foldfn, *tasklist):
self.foldi = foldi
"""the initial value for the fold"""
self.foldfn = foldfn
"""the function to fold with"""
tl = ((("%" + str(len(tasklist)) + "d") % k, v)
for k, v in enumerate(tasklist))
d = dict(tl)
BuildTarget.__init__(self, d)
def build(self, **dependencies):
return reduce(
self.foldfn,
(dependencies[key] for key in sorted(dependencies.keys())),
self.foldi)
|
class BuildTargetFold(BuildTarget):
'''Folds a function over the results of a set of build targets'''
def __init__(self, foldi, foldfn, *tasklist):
pass
def build(self, **dependencies):
pass
| 3 | 1 | 9 | 2 | 6 | 1 | 1 | 0.23 | 1 | 3 | 0 | 0 | 2 | 2 | 2 | 10 | 21 | 5 | 13 | 7 | 10 | 3 | 9 | 7 | 6 | 1 | 2 | 0 | 2 |
6,895 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament/buildcontextfseventhandler.py
|
ligament.buildcontextfseventhandler.BuildContextFsEventHandler
|
class BuildContextFsEventHandler(FileSystemEventHandler):
""" A file system event handler for WatchDog that updates build tasks
(specified by glob in task.file_watch_targets).
"""
def __init__(self, context):
self.context = context
self.file_depends = {}
for name, entry in context.tasks.iteritems():
glob_targets = reduce(
lambda a, b: a + b,
[glob.glob(x) for x in entry.task.file_watch_targets],
[])
for file_target in glob_targets:
if file_target in self.file_depends:
self.file_depends[file_target].append(name)
else:
self.file_depends[file_target] = [name]
def on_modified(self, event):
if event.src_path in self.file_depends:
for name in self.file_depends[event.src_path]:
self.context.lock_task(name)
self.context.update_task(name)
self.context.unlock_task(name)
|
class BuildContextFsEventHandler(FileSystemEventHandler):
''' A file system event handler for WatchDog that updates build tasks
(specified by glob in task.file_watch_targets).
'''
def __init__(self, context):
pass
def on_modified(self, event):
pass
| 3 | 1 | 11 | 1 | 10 | 0 | 4 | 0.15 | 1 | 0 | 0 | 0 | 2 | 2 | 2 | 2 | 28 | 5 | 20 | 9 | 17 | 3 | 16 | 9 | 13 | 4 | 1 | 3 | 7 |
6,896 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament/buildtarget.py
|
ligament.buildtarget.BuildTarget
|
class BuildTarget(object):
""" An action in ligament
BuildTargets exist within Build Contexts
(see ligament.buildcontext.Context)
Tasks extending buildtarget are expected to pass the keyword argument
data_dependencies up from their declaration.
"""
@property
def name(self):
return (self._name
if self._name is not None
else "<unnamed>")
@name.setter
def name(self, name):
self._name = name
def __init__(self,
data_dependencies={}):
self.data_dependencies = data_dependencies
""" A dict of names -> (DeferredDependencies or values).
when a build is requested, the DeferredDependencies are evaluated,
and the resulting dict is passed as kwargs to self.build()
for example
SomeBuildTarget(
data_dependencies={
"foo": DeferredDependency("bar"),
"baz": DeferredDependency("quod"),
"bul": 4
})
will mean that `SomeBuildTarget.build` is called with kwargs
SomeBuildTarget.build(
foo=<value of bar>,
baz=<value of quod>,
bul=4)
"""
self._name = None
""" The name of this task in its registered build context """
self.context = None
""" The build context this target is registered with """
self.file_watch_targets = []
""" The list of files this build target wants to be notified of """
def register_with_context(self, myname, context):
""" registers this build target (exclusively) with a given context """
if self.context is not None:
raise Exception("attempted to register BuildTarget with multiple "
"BuildContexts")
context.register_task(myname, self)
self._name = myname
self.context = context
for key in self.data_dependencies:
if type(self.data_dependencies[key]) is DeferredDependency:
self.data_dependencies[key].parent = myname
self.data_dependencies[key].context = context
for tnmame in self.data_dependencies[key].target_names:
context.register_dependency(tnmame, myname)
def resolve_dependencies(self):
""" evaluate each of the data dependencies of this build target,
returns the resulting dict"""
return dict(
[((key, self.data_dependencies[key])
if type(self.data_dependencies[key]) != DeferredDependency
else (key, self.data_dependencies[key].resolve()))
for key in self.data_dependencies])
def resolve_and_build(self):
""" resolves the dependencies of this build target and builds it """
pdebug("resolving and building task '%s'" % self.name,
groups=["build_task"])
indent_text(indent="++2")
toret = self.build(**self.resolve_dependencies())
indent_text(indent="--2")
return toret
def build(self):
""" (abstract) perform some task and return the result.
Also assigns the value f self.file_watch_targets """
raise Exception("build not implemented for %s" % type(self))
pass
def update_build(self, changedfiles):
""" (abstract) updates the task given a list of changed files """
raise Exception("update_build not implemented for %s" % type(self))
pass
|
class BuildTarget(object):
''' An action in ligament
BuildTargets exist within Build Contexts
(see ligament.buildcontext.Context)
Tasks extending buildtarget are expected to pass the keyword argument
data_dependencies up from their declaration.
'''
@property
def name(self):
pass
@name.setter
def name(self):
pass
def __init__(self,
data_dependencies={}):
pass
def register_with_context(self, myname, context):
''' registers this build target (exclusively) with a given context '''
pass
def resolve_dependencies(self):
''' evaluate each of the data dependencies of this build target,
returns the resulting dict'''
pass
def resolve_and_build(self):
''' resolves the dependencies of this build target and builds it '''
pass
def build(self):
''' (abstract) perform some task and return the result.
Also assigns the value f self.file_watch_targets '''
pass
def update_build(self, changedfiles):
''' (abstract) updates the task given a list of changed files '''
pass
| 11 | 6 | 10 | 1 | 6 | 3 | 2 | 0.68 | 1 | 4 | 1 | 5 | 8 | 4 | 8 | 8 | 99 | 20 | 47 | 19 | 35 | 32 | 36 | 16 | 27 | 5 | 1 | 3 | 14 |
6,897 |
Archived-Object/ligament
|
Archived-Object_ligament/ligament/compositors.py
|
ligament.compositors.BuildTargetFn
|
class BuildTargetFn(BuildTarget):
""" Calls a function on the results of a list of build tasks (positionally)
"""
def __init__(self, fn, *tasklist):
self.fn = fn
tl = ((("%0" + str(math.ceil(math.log(len(tasklist), 10))) + "d") % k, v)
for k, v in enumerate(tasklist))
d = dict(tl)
BuildTarget.__init__(self, d)
def build(self, **dependencies):
return self.fn(*(dependencies[key]
for key in sorted(dependencies.keys())))
|
class BuildTargetFn(BuildTarget):
''' Calls a function on the results of a list of build tasks (positionally)
'''
def __init__(self, fn, *tasklist):
pass
def build(self, **dependencies):
pass
| 3 | 1 | 6 | 1 | 5 | 0 | 1 | 0.2 | 1 | 3 | 0 | 1 | 2 | 1 | 2 | 10 | 16 | 4 | 10 | 6 | 7 | 2 | 8 | 6 | 5 | 1 | 2 | 0 | 2 |
6,898 |
ArduPilot/MAVProxy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArduPilot_MAVProxy/MAVProxy/modules/mavproxy_param.py
|
MAVProxy.modules.mavproxy_param.ParamState.ParamSet
|
class ParamSet():
'''class to hold information about a parameter set being attempted'''
def __init__(self, master, name, value, param_type=None, attempts=None):
self.master = master
self.name = name
self.value = value
self.param_type = param_type
self.attempts_remaining = attempts
self.retry_interval = 1 # seconds
self.last_value_received = None
if self.attempts_remaining is None:
self.attempts_remaining = 3
self.request_sent = 0 # this is a timestamp
def normalize_parameter_for_param_set_send(self, name, value, param_type):
'''uses param_type to convert value into a value suitable for passing
into the mavlink param_set_send binding. Note that this
is a copy of a method in pymavlink, in case the user has
an older version of that library.
'''
if param_type is not None and param_type != mavutil.mavlink.MAV_PARAM_TYPE_REAL32:
# need to encode as a float for sending
if param_type == mavutil.mavlink.MAV_PARAM_TYPE_UINT8:
vstr = struct.pack(">xxxB", int(value))
elif param_type == mavutil.mavlink.MAV_PARAM_TYPE_INT8:
vstr = struct.pack(">xxxb", int(value))
elif param_type == mavutil.mavlink.MAV_PARAM_TYPE_UINT16:
vstr = struct.pack(">xxH", int(value))
elif param_type == mavutil.mavlink.MAV_PARAM_TYPE_INT16:
vstr = struct.pack(">xxh", int(value))
elif param_type == mavutil.mavlink.MAV_PARAM_TYPE_UINT32:
vstr = struct.pack(">I", int(value))
elif param_type == mavutil.mavlink.MAV_PARAM_TYPE_INT32:
vstr = struct.pack(">i", int(value))
else:
print("can't send %s of type %u" % (name, param_type))
return None
numeric_value, = struct.unpack(">f", vstr)
else:
if isinstance(value, str) and value.lower().startswith('0x'):
numeric_value = int(value[2:], 16)
else:
try:
numeric_value = float(value)
except ValueError:
print(
f"can't convert {name} ({value}, {type(value)}) to float")
return None
return numeric_value
def send_set(self):
numeric_value = self.normalize_parameter_for_param_set_send(
self.name, self.value, self.param_type)
if numeric_value is None:
print(f"can't send {self.name} of type {self.param_type}")
self.attempts_remaining = 0
return
# print(f"Sending set attempts-remaining={self.attempts_remaining}")
self.master.param_set_send(
self.name.upper(),
numeric_value,
parm_type=self.param_type,
)
self.request_sent = time.time()
self.attempts_remaining -= 1
def expired(self):
if self.attempts_remaining > 0:
return False
return time.time() - self.request_sent > self.retry_interval
def due_for_retry(self):
if self.attempts_remaining <= 0:
return False
return time.time() - self.request_sent > self.retry_interval
def handle_PARAM_VALUE(self, m, value):
'''handle PARAM_VALUE packet m which has already been checked for a
match against self.name. Returns true if this Set is now
satisfied. value is the value extracted and potentially
manipulated from the packet
'''
self.last_value_received = value
if abs(value - float(self.value)) > 0.00001:
return False
return True
def print_expired_message(self):
reason = ""
if self.last_value_received is None:
reason = " (no PARAM_VALUE received)"
else:
reason = f" (invalid returned value {self.last_value_received})"
print(f"Failed to set {self.name} to {self.value}{reason}")
|
class ParamSet():
'''class to hold information about a parameter set being attempted'''
def __init__(self, master, name, value, param_type=None, attempts=None):
pass
def normalize_parameter_for_param_set_send(self, name, value, param_type):
'''uses param_type to convert value into a value suitable for passing
into the mavlink param_set_send binding. Note that this
is a copy of a method in pymavlink, in case the user has
an older version of that library.
'''
pass
def send_set(self):
pass
def expired(self):
pass
def due_for_retry(self):
pass
def handle_PARAM_VALUE(self, m, value):
'''handle PARAM_VALUE packet m which has already been checked for a
match against self.name. Returns true if this Set is now
satisfied. value is the value extracted and potentially
manipulated from the packet
'''
pass
def print_expired_message(self):
pass
| 8 | 3 | 13 | 1 | 10 | 2 | 3 | 0.21 | 0 | 5 | 0 | 0 | 7 | 8 | 7 | 7 | 96 | 10 | 73 | 20 | 65 | 15 | 60 | 20 | 52 | 10 | 0 | 3 | 22 |
6,899 |
ArduPilot/MAVProxy
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArduPilot_MAVProxy/MAVProxy/modules/mavproxy_magical/magical_ui.py
|
MAVProxy.modules.mavproxy_magical.magical_ui.ReportDialog.StatusIcon
|
class StatusIcon(wx.PyWindow):
success_color = '#00ed00'
failure_color = '#d81313'
def __init__(self, *k, **kw):
super(ReportDialog.StatusIcon, self).__init__(*k, **kw)
self.success = True
self.Bind(wx.EVT_PAINT, self.OnPaint)
def Success(self, success):
self.success = success
self.Refresh()
def OnPaint(self, evt):
dc = wx.BufferedPaintDC(self)
self.Draw(dc)
def Draw(self, dc):
width, height = self.GetClientSize()
if not width or not height:
return
gcdc = wx.GCDC(dc)
gcdc.SetPen(wx.NullPen)
bg = self.GetParent().GetBackgroundColour()
gcdc.SetBackground(wx.Brush(bg, wx.SOLID))
gcdc.Clear()
color = self.success_color if self.success else self.failure_color
gcdc.SetBrush(wx.Brush(color))
x = width / 2
y = height / 2
gcdc.DrawCircle(x, y, min(x, y))
|
class StatusIcon(wx.PyWindow):
def __init__(self, *k, **kw):
pass
def Success(self, success):
pass
def OnPaint(self, evt):
pass
def Draw(self, dc):
pass
| 5 | 0 | 7 | 1 | 6 | 0 | 2 | 0.07 | 1 | 2 | 1 | 0 | 4 | 1 | 4 | 4 | 36 | 9 | 27 | 15 | 22 | 2 | 27 | 15 | 22 | 3 | 1 | 1 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.