id
int64
0
843k
repository_name
stringlengths
7
55
file_path
stringlengths
9
332
class_name
stringlengths
3
290
human_written_code
stringlengths
12
4.36M
class_skeleton
stringlengths
19
2.2M
total_program_units
int64
1
9.57k
total_doc_str
int64
0
4.2k
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
300
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
176
CountClassBase
float64
0
48
CountClassCoupled
float64
0
589
CountClassCoupledModified
float64
0
581
CountClassDerived
float64
0
5.37k
CountDeclInstanceMethod
float64
0
4.2k
CountDeclInstanceVariable
float64
0
299
CountDeclMethod
float64
0
4.2k
CountDeclMethodAll
float64
0
4.2k
CountLine
float64
1
115k
CountLineBlank
float64
0
9.01k
CountLineCode
float64
0
94.4k
CountLineCodeDecl
float64
0
46.1k
CountLineCodeExe
float64
0
91.3k
CountLineComment
float64
0
27k
CountStmt
float64
1
93.2k
CountStmtDecl
float64
0
46.1k
CountStmtExe
float64
0
90.2k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
6k
6,600
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/css.py
wpull.document.css.CSSReader
class CSSReader(BaseDocumentDetector, BaseTextStreamReader): '''Cascading Stylesheet Document Reader.''' URL_PATTERN = r'''url\(\s*(['"]?)(.{1,500}?)(?:\1)\s*\)''' IMPORT_URL_PATTERN = r'''@import\s*(?:url\()?['"]?([^\s'")]{1,500}).*?;''' URL_REGEX = re.compile(r'{}|{}'.format(URL_PATTERN, IMPORT_URL_PATTERN)) BUFFER_SIZE = 1048576 STREAM_REWIND = 4096 @classmethod def is_url(cls, url_info): '''Return whether the document is likely to be CSS.''' if '.css' in url_info.path.lower(): return True @classmethod def is_request(cls, request): '''Return whether the document is likely to be CSS.''' return cls.is_url(request.url_info) @classmethod def is_response(cls, response): '''Return whether the document is likely to be CSS.''' if 'css' in response.fields.get('content-type', '').lower(): return True if response.body: # Stylesheet mistakenly served as HTML if 'html' in response.fields.get('content-type', '').lower(): return cls.is_file(response.body) @classmethod def is_file(cls, file): '''Return whether the file is likely CSS.''' peeked_data = wpull.string.printable_bytes( wpull.util.peek_file(file)).lower() if b'<html' in peeked_data: return VeryFalse if re.search(br'@import |color:|background[a-z-]*:|font[a-z-]*:', peeked_data): return True def iter_text(self, file, encoding=None): if isinstance(file, io.TextIOBase): stream = file else: stream = codecs.getreader(encoding or 'latin1')(file) regex_stream = RegexStream(stream, self.URL_REGEX) for match, text in regex_stream.stream(): if match: yield (text, 'import' if match.group(3) else 'url') else: yield (text, False)
class CSSReader(BaseDocumentDetector, BaseTextStreamReader): '''Cascading Stylesheet Document Reader.''' @classmethod def is_url(cls, url_info): '''Return whether the document is likely to be CSS.''' pass @classmethod def is_request(cls, request): '''Return whether the document is likely to be CSS.''' pass @classmethod def is_response(cls, response): '''Return whether the document is likely to be CSS.''' pass @classmethod def is_file(cls, file): '''Return whether the file is likely CSS.''' pass def iter_text(self, file, encoding=None): pass
10
5
8
1
6
1
3
0.15
2
2
1
1
1
0
5
32
56
10
40
19
30
6
32
15
26
5
4
2
15
6,601
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/css_test.py
wpull.document.css_test.TestCSS
class TestCSS(unittest.TestCase): def test_css_detect(self): self.assertTrue(CSSReader.is_file( io.BytesIO('body { color: white }'.encode('utf-16le')) )) self.assertFalse(CSSReader.is_file( io.BytesIO('hello world!'.encode('utf-16le')) )) self.assertFalse(CSSReader.is_file( io.BytesIO(b'<html><body>hello') )) self.assertTrue(CSSReader.is_file( io.BytesIO(b'<html><body>hello') ) is VeryFalse) self.assertTrue(CSSReader.is_file( io.BytesIO(b'h1 { background-color: red }') )) self.assertTrue(CSSReader.is_file( io.BytesIO(b'@import url.css;') )) self.assertTrue( CSSReader.is_url(URLInfo.parse('example.com/index.css')) ) self.assertFalse( CSSReader.is_url(URLInfo.parse('example.com/image.jpg')) ) self.assertTrue( CSSReader.is_request(Request('example.com/index.css')) ) self.assertFalse( CSSReader.is_request(Request('example.com/image.jpg')) ) response = Response(200, 'OK') response.fields['Content-Type'] = 'text/css' self.assertTrue(CSSReader.is_response(response)) response = Response(200, 'OK') response.fields['Content-Type'] = 'image/png' self.assertFalse(CSSReader.is_response(response)) def test_css_links_simple(self): css_data = b'''@import url('wow.css'); body { background: url('cool.png') } ''' reader = CSSReader() links = set() for link in reader.iter_links( io.BytesIO(css_data), encoding='ascii', context=True): links.add(link) self.assertEqual( { ('wow.css', 'import'), ('cool.png', 'url') }, links ) def test_css_read_links_big(self): css_data = b'\n'.join( [ 'url(blah{0});'.format(num).encode('ascii') for num in range(100000) ] ) reader = CSSReader() self.assertGreater(len(css_data), reader.BUFFER_SIZE) links = set() for link in reader.iter_links( io.BytesIO(css_data), encoding='ascii'): links.add(link) self.assertEqual(len(links), 100000) def test_css_read_links_huge(self): css_data = b'\n'.join( [ 'url(blah{0});'.format(num).encode('ascii') for num in range(200000) ] ) reader = CSSReader() self.assertGreater(len(css_data), reader.BUFFER_SIZE) links = set() for link in reader.iter_links( io.BytesIO(css_data), encoding='ascii'): links.add(link) self.assertEqual(len(links), 200000)
class TestCSS(unittest.TestCase): def test_css_detect(self): pass def test_css_links_simple(self): pass def test_css_read_links_big(self): pass def test_css_read_links_huge(self): pass
5
0
23
3
20
0
2
0
1
6
4
0
4
0
4
76
97
15
82
18
75
0
41
18
36
2
2
1
7
6,602
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/html.py
wpull.document.html.HTMLLightParserTarget
class HTMLLightParserTarget(object): '''An HTML parser target for partial elements. Args: callback: A callback function. The function should accept the arguments: 1. `tag` (str): The tag name of the element. 2. `attrib` (dict): The attributes of the element. 3. `text` (str, None): The text of the element. text_elements: A frozenset of element tag names that we should keep track of text. ''' def __init__(self, callback, text_elements=frozenset( ['style', 'script', 'link', 'url', 'icon'])): self.callback = callback self.text_elements = text_elements self.tag = None self.attrib = None self.buffer = None def start(self, tag, attrib): if tag not in self.text_elements: self.callback(tag, attrib, None) return if self.buffer: self.callback(self.tag, self.attrib, self.buffer.getvalue()) self.tag = tag self.attrib = attrib self.buffer = io.StringIO() def data(self, data): if self.buffer: self.buffer.write(data) def end(self, tag): if self.buffer: self.callback(self.tag, self.attrib, self.buffer.getvalue()) self.buffer = None def close(self): if self.buffer: self.callback(self.tag, self.attrib, self.buffer.getvalue()) return True
class HTMLLightParserTarget(object): '''An HTML parser target for partial elements. Args: callback: A callback function. The function should accept the arguments: 1. `tag` (str): The tag name of the element. 2. `attrib` (dict): The attributes of the element. 3. `text` (str, None): The text of the element. text_elements: A frozenset of element tag names that we should keep track of text. ''' def __init__(self, callback, text_elements=frozenset( ['style', 'script', 'link', 'url', 'icon'])): pass def start(self, tag, attrib): pass def data(self, data): pass def end(self, tag): pass def close(self): pass
6
1
6
1
6
0
2
0.34
1
1
0
0
5
5
5
5
49
10
29
13
21
10
27
11
21
3
1
1
10
6,603
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/html.py
wpull.document.html.HTMLParserTarget
class HTMLParserTarget(object): '''An HTML parser target. Args: callback: A callback function. The function should accept the arguments: 1. `tag` (str): The tag name of the element. 2. `attrib` (dict): The attributes of the element. 3. `text` (str, None): The text of the element. 4. `tail` (str, None): The text after the element. 5. `end` (bool): Whether the tag is and end tag. ''' def __init__(self, callback): self.callback = callback self.tag = None self.attrib = None self.buffer = None self.tail_buffer = None def start(self, tag, attrib): if self.buffer: self.callback( self.tag, self.attrib, self.buffer.getvalue(), None ) self.buffer = None if self.tail_buffer: self.callback( self.tag, None, None, self.tail_buffer.getvalue(), True ) self.tail_buffer = None self.tag = tag self.attrib = attrib self.buffer = io.StringIO() def data(self, data): if self.buffer: self.buffer.write(data) if self.tail_buffer: self.tail_buffer.write(data) def end(self, tag): if self.buffer: self.callback( tag, self.attrib, self.buffer.getvalue(), None ) self.buffer = None if self.tail_buffer: self.callback( self.tag, None, None, self.tail_buffer.getvalue(), True ) self.tail_buffer = None self.tail_buffer = io.StringIO() self.tag = tag def comment(self, text): self.callback(COMMENT, None, text, None) def close(self): if self.buffer: self.callback( self.tag, self.attrib, self.buffer.getvalue(), None ) self.buffer = None if self.tail_buffer: self.callback( self.tag, None, None, self.tail_buffer.getvalue(), True ) self.tail_buffer = None return True
class HTMLParserTarget(object): '''An HTML parser target. Args: callback: A callback function. The function should accept the arguments: 1. `tag` (str): The tag name of the element. 2. `attrib` (dict): The attributes of the element. 3. `text` (str, None): The text of the element. 4. `tail` (str, None): The text after the element. 5. `end` (bool): Whether the tag is and end tag. ''' def __init__(self, callback): pass def start(self, tag, attrib): pass def data(self, data): pass def end(self, tag): pass def comment(self, text): pass def close(self): pass
7
1
12
1
11
0
2
0.15
1
0
0
0
6
5
6
6
92
14
68
12
61
10
41
12
34
3
1
1
14
6,604
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/html.py
wpull.document.html.HTMLReadElement
class HTMLReadElement(object): '''Results from :meth:`HTMLReader.read_links`. Attributes: tag (str): The element tag name. attrib (dict): The element attributes. text (str, None): The element text. tail (str, None): The text after the element. end (bool): Whether the tag is an end tag. ''' __slots__ = ('tag', 'attrib', 'text', 'tail', 'end') def __init__(self, tag, attrib, text, tail, end): self.tag = tag self.attrib = attrib self.text = text self.tail = tail self.end = end def __repr__(self): return 'HTMLReadElement({0}, {1}, {2}, {3}, {4})'.format( repr(self.tag), repr(self.attrib), repr(self.text), repr(self.tail), repr(self.end) )
class HTMLReadElement(object): '''Results from :meth:`HTMLReader.read_links`. Attributes: tag (str): The element tag name. attrib (dict): The element attributes. text (str, None): The element text. tail (str, None): The text after the element. end (bool): Whether the tag is an end tag. ''' def __init__(self, tag, attrib, text, tail, end): pass def __repr__(self): pass
3
1
6
0
6
0
1
0.62
1
0
0
0
2
5
2
2
24
3
13
9
10
8
10
9
7
1
1
0
2
6,605
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/base.py
wpull.document.base.VeryFalseType
class VeryFalseType(object): def __bool__(self): return False
class VeryFalseType(object): def __bool__(self): pass
2
0
2
0
2
0
1
0
1
0
0
0
1
0
1
1
3
0
3
2
1
0
3
2
1
1
1
0
1
6,606
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/database/sqltable.py
wpull.database.sqltable.GenericSQLURLTable
class GenericSQLURLTable(BaseSQLURLTable): '''URL table using SQLAlchemy without any customizations. Args: url: A SQLAlchemy database URL. ''' def __init__(self, url): super().__init__() self._engine = create_engine(url) DBBase.metadata.create_all(self._engine) self._session_maker_instance = sessionmaker(bind=self._engine) @property def _session_maker(self): return self._session_maker_instance def close(self): self._engine.dispose()
class GenericSQLURLTable(BaseSQLURLTable): '''URL table using SQLAlchemy without any customizations. Args: url: A SQLAlchemy database URL. ''' def __init__(self, url): pass @property def _session_maker(self): pass def close(self): pass
5
1
3
0
3
0
1
0.36
1
1
0
0
3
2
3
59
18
3
11
7
6
4
10
6
6
1
5
0
3
6,607
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/database/sqlmodel.py
wpull.database.sqlmodel.WARCVisit
class WARCVisit(DBBase): '''Standalone table for ``--cdx-dedup`` feature.''' __tablename__ = 'warc_visits' url = Column(String, primary_key=True, nullable=False) warc_id = Column(String, nullable=False) payload_digest = Column(String, nullable=False) @classmethod def add_visits(cls, session, visits): for url, warc_id, payload_digest in visits: session.execute( insert(WARCVisit).prefix_with('OR IGNORE'), dict( url=url, warc_id=warc_id, payload_digest=payload_digest ) ) @classmethod def get_revisit_id(cls, session, url, payload_digest): query = select([WARCVisit.warc_id]).where( and_( WARCVisit.url == url, WARCVisit.payload_digest == payload_digest ) ) row = session.execute(query).first() if row: return row.warc_id
class WARCVisit(DBBase): '''Standalone table for ``--cdx-dedup`` feature.''' @classmethod def add_visits(cls, session, visits): pass @classmethod def get_revisit_id(cls, session, url, payload_digest): pass
5
1
11
1
10
0
2
0.04
1
1
0
0
0
0
2
2
33
5
27
12
22
1
13
10
10
2
1
1
4
6,608
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/database/sqlmodel.py
wpull.database.sqlmodel.URLString
class URLString(DBBase): '''Table containing the URL strings. The :class:`URL` references this table. ''' __tablename__ = 'url_strings' id = Column(Integer, primary_key=True, autoincrement=True) url = Column(String, nullable=False, unique=True, index=True) @classmethod def add_urls(cls, session, urls: Iterable[str]): query = insert(URLString).prefix_with('OR IGNORE') session.execute(query, [{'url': url} for url in urls])
class URLString(DBBase): '''Table containing the URL strings. The :class:`URL` references this table. ''' @classmethod def add_urls(cls, session, urls: Iterable[str]): pass
3
1
3
0
3
0
1
0.38
1
1
0
0
0
0
1
1
14
3
8
6
5
3
7
5
5
1
1
0
1
6,609
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/converter.py
wpull.converter.HTMLConverter
class HTMLConverter(HTMLScraper, BaseDocumentConverter): '''HTML converter.''' def __init__(self, html_parser, element_walker, url_table): super().__init__(html_parser, element_walker) self._url_table = url_table self._css_converter = CSSConverter(url_table) self._out_file = None self._css_already_done = None self._base_url = None self._encoding = None def convert(self, input_filename, output_filename, base_url=None): self._css_already_done = set() self._base_url = base_url with open(input_filename, 'rb') as in_file: encoding = wpull.string.detect_encoding( in_file.peek(1048576), is_html=True ) with open(input_filename, 'rb') as in_file: try: doctype = self._html_parser.parse_doctype(in_file, encoding=encoding) is_xhtml = doctype and 'XHTML' in doctype except AttributeError: # using html5lib is_xhtml = False doctype = None with open(input_filename, 'rb') as in_file: with open(output_filename, 'wb') as bin_out_file: elements = self.iter_elements(in_file, encoding=encoding) out_file = io.TextIOWrapper(bin_out_file, encoding=encoding) if doctype: out_file.write(doctype) out_file.write('\r\n') self._out_file = out_file self._encoding = encoding for element in elements: if isinstance(element, Comment): out_file.write( '<!--{0}-->'.format(element.text) ) elif isinstance(element, Element): if element.end: if element.tag not in empty_tags: self._out_file.write('</{0}>' .format(element.tag)) if element.tail: self._out_file.write(element.tail) else: self._convert_element(element, is_xhtml=is_xhtml) elif isinstance(element, Doctype): doctype = element.text is_xhtml = doctype and 'XHTML' in doctype self._out_file.close() self._out_file = None def _convert_element(self, element, is_xhtml=False): self._out_file.write('<') self._out_file.write(element.tag) new_text = element.text unfilled_value = object() new_attribs = dict(((name, unfilled_value) for name in element.attrib)) for link_info in self._element_walker.iter_links_element(element): new_value = None if link_info.value_type == 'plain': new_value = self._convert_plain(link_info) elif link_info.value_type == 'css': if link_info.attrib: new_value = self._convert_css_attrib(link_info) else: text = self._convert_css_text(link_info) if text: new_text = text if new_value and link_info.attrib: if new_attribs[link_info.attrib] == unfilled_value: new_attribs[link_info.attrib] = [new_value] else: new_attribs[link_info.attrib].append(new_value) for name in new_attribs: if new_attribs[name] == unfilled_value: value = element.attrib[name] else: value = ' '.join(new_attribs[name]) self._out_file.write(' {0}="{1}"'.format(name, value)) if is_xhtml and element.tag in empty_tags: self._out_file.write('/') self._out_file.write('>') if element.tag not in empty_tags: if new_text: self._out_file.write(new_text) def _convert_plain(self, link_info): base_url = self._base_url if link_info.base_link: if self._base_url: base_url = wpull.url.urljoin( self._base_url, link_info.base_link ) else: base_url = link_info.base_link if base_url: url = wpull.url.urljoin(base_url, link_info.link) else: url = link_info.link url_info = URLInfo.parse(url, encoding=self._encoding) new_url = self._get_new_url(url_info) return new_url def _convert_css_attrib(self, link_info): done_key = (link_info.element, link_info.attrib) if done_key in self._css_already_done: return text = wpull.string.to_str( link_info.element.attrib.get(link_info.attrib) ) new_value = self._css_converter.convert_text( text, base_url=self._base_url ) self._css_already_done.add(done_key) return new_value def _convert_css_text(self, link_info): if link_info.element in self._css_already_done: return text = wpull.string.to_str(link_info.element.text) new_text = self._css_converter.convert_text( text, base_url=self._base_url ) self._css_already_done.add(id(link_info.element)) return new_text def _get_new_url(self, url_info): try: url_record = self._url_table.get_one(url_info.url) except NotFound: url_record = None if url_record \ and url_record.status == Status.done and url_record.filename: new_url = url_record.filename else: new_url = url_info.url return new_url
class HTMLConverter(HTMLScraper, BaseDocumentConverter): '''HTML converter.''' def __init__(self, html_parser, element_walker, url_table): pass def convert(self, input_filename, output_filename, base_url=None): pass def _convert_element(self, element, is_xhtml=False): pass def _convert_plain(self, link_info): pass def _convert_css_attrib(self, link_info): pass def _convert_css_text(self, link_info): pass def _get_new_url(self, url_info): pass
8
1
24
4
19
0
5
0.01
2
8
4
0
7
6
7
46
173
37
134
40
126
2
109
38
101
13
6
6
35
6,610
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/converter_test.py
wpull.converter_test.TestHTML5LibConverter
class TestHTML5LibConverter(unittest.TestCase, Mixin, TempDirMixin): def setUp(self): self.set_up_temp_dir() def tearDown(self): self.tear_down_temp_dir() def get_html_parser(self): return HTML5LibHTMLParser()
class TestHTML5LibConverter(unittest.TestCase, Mixin, TempDirMixin): def setUp(self): pass def tearDown(self): pass def get_html_parser(self): pass
4
0
2
0
2
0
1
0
3
0
0
0
3
0
3
82
9
2
7
4
3
0
7
4
3
1
2
0
3
6,611
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/converter_test.py
wpull.converter_test.TestLxmlConverter
class TestLxmlConverter(unittest.TestCase, Mixin, TempDirMixin): def setUp(self): self.set_up_temp_dir() def tearDown(self): self.tear_down_temp_dir() def get_html_parser(self): return LxmlHTMLParser()
class TestLxmlConverter(unittest.TestCase, Mixin, TempDirMixin): def setUp(self): pass def tearDown(self): pass def get_html_parser(self): pass
4
0
2
0
2
0
1
0
3
0
0
0
3
0
3
82
9
2
7
4
3
0
7
4
3
1
2
0
3
6,612
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/cookie.py
wpull.cookie.BetterMozillaCookieJar
class BetterMozillaCookieJar(http.cookiejar.FileCookieJar): '''MozillaCookieJar that is compatible with Wget/Curl. It ignores file header checks and supports session cookies. ''' # This class from cpython/Lib/http/cookiejar.py changeset 95436:ea94f6c87f5d # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights # Reserved magic_re = re.compile(r'.') header = """\ # Netscape HTTP Cookie File # http://curl.haxx.se/rfc/cookie_spec.html # This is a generated file! Do not edit. """ def _really_load(self, f, filename, ignore_discard, ignore_expires): now = time.time() magic = f.readline() if not self.magic_re.search(magic): raise http.cookiejar.LoadError( "%r does not look like a Netscape format cookies file" % filename) line = "" try: while 1: line = f.readline() if line == "": break # last field may be absent, so keep any trailing tab if line.endswith("\n"): line = line[:-1] # skip comments and blank lines XXX what is $ for? if (line.strip().startswith(("#", "$")) or line.strip() == ""): continue domain, domain_specified, path, secure, expires, name, value = \ line.split("\t") secure = (secure == "TRUE") domain_specified = (domain_specified == "TRUE") if name == "": # cookies.txt regards 'Set-Cookie: foo' as a cookie # with no name, whereas http.cookiejar regards it as a # cookie with no value. name = value value = None initial_dot = domain.startswith(".") assert domain_specified == initial_dot discard = False if expires in ("0", ""): expires = None discard = True # assume path_specified is false c = http.cookiejar.Cookie( 0, name, value, None, False, domain, domain_specified, initial_dot, path, False, secure, expires, discard, None, None, {}) if not ignore_discard and c.discard: continue if not ignore_expires and c.is_expired(now): continue self.set_cookie(c) except OSError: raise except Exception: f = io.StringIO() traceback.print_exc(None, f) msg = f.getvalue() warnings.warn("http.cookiejar bug!\n%s" % msg, stacklevel=2) raise http.cookiejar.LoadError( "invalid Netscape format cookies file %r: %r" % (filename, line)) def save(self, filename=None, ignore_discard=False, ignore_expires=False): if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT) with open(filename, "w") as f: f.write(self.header) now = time.time() for cookie in self: if not ignore_discard and cookie.discard: continue if not ignore_expires and cookie.is_expired(now): continue if cookie.secure: secure = "TRUE" else: secure = "FALSE" if cookie.domain.startswith("."): initial_dot = "TRUE" else: initial_dot = "FALSE" if cookie.expires is not None: expires = str(cookie.expires) else: expires = "0" if cookie.value is None: # cookies.txt regards 'Set-Cookie: foo' as a cookie # with no name, whereas http.cookiejar regards it as a # cookie with no value. name = "" value = cookie.name else: name = cookie.name value = cookie.value f.write( "\t".join([cookie.domain, initial_dot, cookie.path, secure, expires, name, value]) + "\n")
class BetterMozillaCookieJar(http.cookiejar.FileCookieJar): '''MozillaCookieJar that is compatible with Wget/Curl. It ignores file header checks and supports session cookies. ''' def _really_load(self, f, filename, ignore_discard, ignore_expires): pass def save(self, filename=None, ignore_discard=False, ignore_expires=False): pass
3
1
55
5
46
5
11
0.21
1
6
0
0
2
0
2
27
128
14
95
21
92
20
72
20
69
12
2
3
22
6,613
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/cookie.py
wpull.cookie.DeFactoCookiePolicy
class DeFactoCookiePolicy(DefaultCookiePolicy): '''Cookie policy that limits the content and length of the cookie. Args: cookie_jar: The CookieJar instance. This policy class is *not* designed to be shared between CookieJar instances. ''' def __init__(self, *args, **kwargs): self.cookie_jar = kwargs.pop('cookie_jar') DefaultCookiePolicy.__init__(self, *args, **kwargs) def set_ok(self, cookie, request): if not DefaultCookiePolicy.set_ok(self, cookie, request): return False try: new_cookie_length = (self.cookie_length(cookie.domain) + len(cookie.path) + len(cookie.name) + len(cookie.value or '')) except TypeError: # cookiejar is not infallible #220 _logger.debug('Cookie handling error', exc_info=1) return False if new_cookie_length >= 4100: return False if self.count_cookies(cookie.domain) >= 50: cookies = self.cookie_jar._cookies try: cookies[cookie.domain][cookie.path][cookie.name] except KeyError: return False if not wpull.util.is_ascii(str(cookie)): return False return True def count_cookies(self, domain): '''Return the number of cookies for the given domain.''' cookies = self.cookie_jar._cookies if domain in cookies: return sum( [len(cookie) for cookie in cookies[domain].values()] ) else: return 0 def cookie_length(self, domain): '''Return approximate length of all cookie key-values for a domain.''' cookies = self.cookie_jar._cookies if domain not in cookies: return 0 length = 0 for path in cookies[domain]: for name in cookies[domain][path]: cookie = cookies[domain][path][name] length += len(path) + len(name) + len(cookie.value or '') return length
class DeFactoCookiePolicy(DefaultCookiePolicy): '''Cookie policy that limits the content and length of the cookie. Args: cookie_jar: The CookieJar instance. This policy class is *not* designed to be shared between CookieJar instances. ''' def __init__(self, *args, **kwargs): pass def set_ok(self, cookie, request): pass def count_cookies(self, domain): '''Return the number of cookies for the given domain.''' pass def cookie_length(self, domain): '''Return approximate length of all cookie key-values for a domain.''' pass
5
3
14
3
11
1
4
0.21
1
3
0
0
4
1
4
31
67
15
43
14
38
9
38
14
33
7
2
2
14
6,614
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/cookie_test.py
wpull.cookie_test.FakeResponse
class FakeResponse(object): def __init__(self, headers=None, url=None): """ headers: list of RFC822-style 'Key: value' strings """ self._headers = email.message_from_string("\n".join(headers)) self._url = url or [] def info(self): return self._headers
class FakeResponse(object): def __init__(self, headers=None, url=None): ''' headers: list of RFC822-style 'Key: value' strings ''' pass def info(self): pass
3
1
5
1
3
2
1
0.5
1
0
0
0
2
2
2
2
11
2
6
5
3
3
6
5
3
1
1
0
2
6,615
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/cookie_test.py
wpull.cookie_test.TestCookie
class TestCookie(unittest.TestCase): def setUp(self): http.cookiejar.debug = True def test_length(self): cookie_jar = CookieJar() policy = DeFactoCookiePolicy(cookie_jar=cookie_jar) cookie_jar.set_policy(policy) request = urllib.request.Request('http://example.com/') response = FakeResponse( [ 'Set-Cookie: k={0}'.format('a' * 400) ], 'http://example.com/' ) cookie_jar.extract_cookies(response, request) print(cookie_jar._cookies) self.assertTrue(cookie_jar._cookies['example.com']['/'].get('k')) request = urllib.request.Request('http://example.com/') response = FakeResponse( [ 'Set-Cookie: k={0}'.format('a' * 5000) ], 'http://example.com/' ) cookie_jar.extract_cookies(response, request) self.assertFalse(cookie_jar._cookies['example.com']['/'].get('k2')) def test_domain_limit(self): cookie_jar = CookieJar() policy = DeFactoCookiePolicy(cookie_jar=cookie_jar) cookie_jar.set_policy(policy) request = urllib.request.Request('http://example.com/') for key in range(55): response = FakeResponse( [ 'Set-Cookie: k{0}=a'.format(key) ], 'http://example.com/' ) cookie_jar.extract_cookies(response, request) if key < 50: self.assertTrue( cookie_jar._cookies['example.com']['/'] .get('k{0}'.format(key)) ) else: self.assertFalse( cookie_jar._cookies['example.com']['/'] .get('k{0}'.format(key)) ) response = FakeResponse( [ 'Set-Cookie: k3=b' ], 'http://example.com/' ) cookie_jar.extract_cookies(response, request) self.assertEqual( 'b', cookie_jar._cookies['example.com']['/']['k3'].value ) def test_ascii(self): cookie_jar = CookieJar() policy = DeFactoCookiePolicy(cookie_jar=cookie_jar) cookie_jar.set_policy(policy) request = urllib.request.Request('http://example.com/') response = FakeResponse( [ 'Set-Cookie: k=🐭' ], 'http://example.com/' ) cookie_jar.extract_cookies(response, request) print(cookie_jar._cookies) self.assertFalse(cookie_jar._cookies.get('example.com')) def test_empty_value(self): cookie_jar = CookieJar() policy = DeFactoCookiePolicy(cookie_jar=cookie_jar) cookie_jar.set_policy(policy) request = urllib.request.Request('http://example.com/') response = FakeResponse( [ 'Set-Cookie: k' ], 'http://example.com/' ) cookie_jar.extract_cookies(response, request) print(cookie_jar._cookies) self.assertTrue(cookie_jar._cookies.get('example.com')) def test_load_bad_cookie(self): cookie_jar = BetterMozillaCookieJar() with self.assertRaises(http.cookiejar.LoadError): with tempfile.TemporaryDirectory() as temp_dir: filename = os.path.join(temp_dir, 'cookies.txt') with open(filename, 'w') as file: file.write('You know what they say:\n') file.write('All toasters toast toast!') cookie_jar.load(filename)
class TestCookie(unittest.TestCase): def setUp(self): pass def test_length(self): pass def test_domain_limit(self): pass def test_ascii(self): pass def test_empty_value(self): pass def test_load_bad_cookie(self): pass
7
0
20
4
16
0
1
0
1
8
3
0
6
0
6
78
125
28
97
28
90
0
57
26
50
3
2
3
8
6,616
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/cookiewrapper.py
wpull.cookiewrapper.CookieJarWrapper
class CookieJarWrapper(object): '''Wraps a CookieJar. Args: cookie_jar: An instance of :class:`http.cookiejar.CookieJar`. save_filename (str, optional): A filename to save the cookies. keep_session_cookies (bool): If True, session cookies are kept when saving to file. ''' def __init__(self, cookie_jar, save_filename=None, keep_session_cookies=False): self._cookie_jar = cookie_jar self._save_filename = save_filename self._keep_session_cookies = keep_session_cookies def add_cookie_header(self, request, referrer_host=None): '''Wrapped ``add_cookie_header``. Args: request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL. ''' new_request = convert_http_request(request, referrer_host) self._cookie_jar.add_cookie_header(new_request) request.fields.clear() for name, value in new_request.header_items(): request.fields.add(name, value) def extract_cookies(self, response, request, referrer_host=None): '''Wrapped ``extract_cookies``. Args: response: An instance of :class:`.http.request.Response`. request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL. ''' new_response = HTTPResponseInfoWrapper(response) new_request = convert_http_request(request, referrer_host) self._cookie_jar.extract_cookies(new_response, new_request) @property def cookie_jar(self): '''Return the wrapped Cookie Jar.''' return self._cookie_jar def close(self): '''Save the cookie jar if needed.''' if self._save_filename: self._cookie_jar.save( self._save_filename, ignore_discard=self._keep_session_cookies )
class CookieJarWrapper(object): '''Wraps a CookieJar. Args: cookie_jar: An instance of :class:`http.cookiejar.CookieJar`. save_filename (str, optional): A filename to save the cookies. keep_session_cookies (bool): If True, session cookies are kept when saving to file. ''' def __init__(self, cookie_jar, save_filename=None, keep_session_cookies=False): pass def add_cookie_header(self, request, referrer_host=None): '''Wrapped ``add_cookie_header``. Args: request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL. ''' pass def extract_cookies(self, response, request, referrer_host=None): '''Wrapped ``extract_cookies``. Args: response: An instance of :class:`.http.request.Response`. request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL. ''' pass @property def cookie_jar(self): '''Return the wrapped Cookie Jar.''' pass def close(self): '''Save the cookie jar if needed.''' pass
7
5
9
1
5
3
1
0.88
1
1
1
0
5
3
5
5
57
10
25
15
17
22
20
13
14
2
1
1
7
6,617
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/cookiewrapper.py
wpull.cookiewrapper.HTTPResponseInfoWrapper
class HTTPResponseInfoWrapper(object): '''Wraps a HTTP Response. Args: response: An instance of :class:`.http.request.Response` ''' def __init__(self, response): self._response = response def info(self): '''Return the header fields as a Message: Returns: Message: An instance of :class:`email.message.Message`. If Python 2, returns an instance of :class:`mimetools.Message`. ''' if sys.version_info[0] == 2: return mimetools.Message(io.StringIO(str(self._response.fields))) else: return email.message_from_string(str(self._response.fields))
class HTTPResponseInfoWrapper(object): '''Wraps a HTTP Response. Args: response: An instance of :class:`.http.request.Response` ''' def __init__(self, response): pass def info(self): '''Return the header fields as a Message: Returns: Message: An instance of :class:`email.message.Message`. If Python 2, returns an instance of :class:`mimetools.Message`. ''' pass
3
2
7
1
4
3
2
1.13
1
1
0
0
2
1
2
2
20
3
8
4
5
9
7
4
4
2
1
1
3
6,618
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/cookiewrapper_test.py
wpull.cookiewrapper_test.TestWrapper
class TestWrapper(unittest.TestCase): def test_http_request(self): request = Request('http://example.com') request.fields['hello'] = 'world' new_request = convert_http_request(request) self.assertEqual('example.com', new_request.host) self.assertEqual('world', new_request.get_header('Hello')) def test_http_response(self): response = Response(200, 'OK', version='HTTP/1.0') response.fields['hello'] = 'world' new_response = HTTPResponseInfoWrapper(response) info = new_response.info() self.assertEqual('world', info.get('hello'))
class TestWrapper(unittest.TestCase): def test_http_request(self): pass def test_http_response(self): pass
3
0
8
2
6
0
1
0
1
3
3
0
2
0
2
74
17
4
13
8
10
0
13
8
10
1
2
0
2
6,619
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/database/base.py
wpull.database.base.DatabaseError
class DatabaseError(Exception): '''Any database error.'''
class DatabaseError(Exception): '''Any database error.''' pass
1
1
0
0
0
0
0
1
1
0
0
1
0
0
0
10
2
0
1
1
0
1
1
1
0
0
3
0
0
6,620
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/database/base.py
wpull.database.base.NotFound
class NotFound(DatabaseError): '''Item not found in the table.'''
class NotFound(DatabaseError): '''Item not found in the table.''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
10
2
0
1
1
0
1
1
1
0
0
4
0
0
6,621
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/database/sqlmodel.py
wpull.database.sqlmodel.Hostname
class Hostname(DBBase): __tablename__ = 'hostnames' id = Column(Integer, primary_key=True, autoincrement=True) hostname = Column(String, nullable=False, unique=True)
class Hostname(DBBase): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
5
1
4
3
3
0
4
3
3
0
1
0
0
6,622
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/database/sqlmodel.py
wpull.database.sqlmodel.QueuedFile
class QueuedFile(DBBase): __tablename__ = 'queued_files' id = Column(Integer, primary_key=True, autoincrement=True) queued_url_id = Column(Integer, ForeignKey(QueuedURL.id), nullable=False, unique=True) queued_url = relationship( QueuedURL, uselist=False, foreign_keys=[queued_url_id] ) status = Column( Enum(*list(member.value for member in Status)), index=True, default=Status.todo.value, nullable=False, )
class QueuedFile(DBBase): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
15
1
14
5
13
0
6
5
5
0
1
0
0
6,623
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/database/sqlmodel.py
wpull.database.sqlmodel.QueuedURL
class QueuedURL(DBBase): __tablename__ = 'queued_urls' id = Column(Integer, primary_key=True, autoincrement=True) # -- URLs -- url_string_id = Column( Integer, ForeignKey(URLString.id), nullable=False, unique=True, index=True, doc='Target URL to fetch' ) url_string = relationship( URLString, uselist=False, foreign_keys=[url_string_id] ) url = association_proxy('url_string', 'url') parent_url_string_id = Column( Integer, ForeignKey(URLString.id), doc='Optional referral URL' ) parent_url_string = relationship( URLString, uselist=False, foreign_keys=[parent_url_string_id]) parent_url = association_proxy('parent_url_string', 'url') root_url_string_id = Column( Integer, ForeignKey(URLString.id), doc='Optional root URL' ) root_url_string = relationship( 'URLString', uselist=False, foreign_keys=[root_url_string_id]) root_url = association_proxy('root_url_string', 'url') # -- Fetch parameters -- status = Column( Enum(*list(member.value for member in Status)), index=True, default=Status.todo.value, nullable=False, doc='Status of the completion of the item.' ) try_count = Column( Integer, nullable=False, default=0, doc='Number of attempts made in order to process the item.' ) level = Column( Integer, nullable=False, default=0, doc='Recursive depth of the item. 0 is root, 1 is child of root, etc.' ) inline_level = Column( Integer, doc='Depth of the page requisite object. ' '0 is the object, 1 is the object\'s dependency, etc.' ) link_type = Column( Enum(*list(member.value for member in LinkType)), doc='Expected content type of extracted link.' ) priority = Column( Integer, nullable=False, default=0, doc='Priority of item.' ) # -- Fetch extra data -- post_data = Column(String, doc='Additional percent-encoded data for POST.') # -- Fetch result info -- status_code = Column(Integer, doc='HTTP status code or FTP rely code.') filename = Column(String, doc='Local filename of the item.') @classmethod @contextlib.contextmanager def watch_urls_inserted(cls, session): last_primary_key = session.query(func.max(QueuedURL.id)).scalar() or 0 def get_urls(): query = select([URLString.url]).where( and_(QueuedURL.id > last_primary_key, QueuedURL.url_string_id == URLString.id) ) return [row[0] for row in session.execute(query)] yield get_urls def to_plain(self) -> URLRecord: record = URLRecord() record.url = self.url record.parent_url = self.parent_url record.root_url = self.root_url record.status = Status(self.status) record.try_count = self.try_count record.level = self.level record.inline_level = self.inline_level record.link_type = LinkType(self.link_type) if self.link_type else None record.priority = self.priority record.post_data = self.post_data record.status_code = self.status_code record.filename = self.filename return record
class QueuedURL(DBBase): @classmethod @contextlib.contextmanager def watch_urls_inserted(cls, session): pass def get_urls(): pass def to_plain(self) -> URLRecord: pass
6
0
11
1
10
0
1
0.05
1
4
4
0
1
0
2
2
99
12
83
27
77
4
42
26
38
2
1
0
4
6,624
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/html.py
wpull.document.html.HTMLReader
class HTMLReader(BaseDocumentDetector, BaseHTMLReader): '''HTML document reader. Arguments: html_parser (:class:`.document.htmlparse.BaseParser`): An HTML parser. ''' def __init__(self, html_parser): self._html_parser = html_parser @classmethod def is_response(cls, response): '''Return whether the Response is likely to be HTML.''' if 'html' in response.fields.get('content-type', '').lower(): return True if response.body: return cls.is_file(response.body) @classmethod def is_request(cls, request): '''Return whether the Request is likely to be a HTML.''' return cls.is_url(request.url_info) @classmethod def is_url(cls, url_info): '''Return whether the URLInfo is likely to be a HTML.''' path = url_info.path.lower() if '.htm' in path or '.dhtm' in path or '.xht' in path: return True @classmethod def is_file(cls, file): '''Return whether the file is likely to be HTML.''' peeked_data = wpull.string.printable_bytes( wpull.util.peek_file(file)).lower() if b'<!doctype html' in peeked_data \ or b'<head' in peeked_data \ or b'<title' in peeked_data \ or b'<html' in peeked_data \ or b'<script' in peeked_data \ or b'<table' in peeked_data \ or b'<a href' in peeked_data: return True def iter_elements(self, file, encoding=None): return self._html_parser.parse(file, encoding)
class HTMLReader(BaseDocumentDetector, BaseHTMLReader): '''HTML document reader. Arguments: html_parser (:class:`.document.htmlparse.BaseParser`): An HTML parser. ''' def __init__(self, html_parser): pass @classmethod def is_response(cls, response): '''Return whether the Response is likely to be HTML.''' pass @classmethod def is_request(cls, request): '''Return whether the Request is likely to be a HTML.''' pass @classmethod def is_url(cls, url_info): '''Return whether the URLInfo is likely to be a HTML.''' pass @classmethod def is_file(cls, file): '''Return whether the file is likely to be HTML.''' pass def iter_elements(self, file, encoding=None): pass
11
5
5
0
4
1
2
0.26
2
0
0
1
2
1
6
32
47
8
31
14
20
8
20
10
13
3
4
1
10
6,625
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/converter.py
wpull.converter.CSSConverter
class CSSConverter(CSSScraper, BaseDocumentConverter): '''CSS converter.''' def __init__(self, url_table): super().__init__() self._url_table = url_table def convert(self, input_filename, output_filename, base_url=None): with open(input_filename, 'rb') as in_file, \ open(output_filename, 'wb') as out_file: encoding = wpull.string.detect_encoding( wpull.util.peek_file(in_file)) out_stream = codecs.getwriter(encoding)(out_file) for text, is_link in self.iter_processed_text(in_file, encoding): if is_link: out_stream.write(self.get_new_url(text, base_url)) else: out_stream.write(text) def convert_text(self, text, base_url=None): text_list = [] for text, is_link in self.iter_processed_text(io.StringIO(text)): if is_link: text_list.append(self.get_new_url(text, base_url)) else: text_list.append(text) return ''.join(text_list) def get_new_url(self, url, base_url=None): if base_url: url = wpull.url.urljoin(base_url, url) try: url_record = self._url_table.get_one(url) except NotFound: url_record = None if url_record \ and url_record.status == Status.done and url_record.filename: new_url = url_record.filename else: new_url = url return new_url
class CSSConverter(CSSScraper, BaseDocumentConverter): '''CSS converter.''' def __init__(self, url_table): pass def convert(self, input_filename, output_filename, base_url=None): pass def convert_text(self, text, base_url=None): pass def get_new_url(self, url, base_url=None): pass
5
1
10
1
9
0
3
0.03
2
3
2
0
4
1
4
44
45
8
36
15
31
1
30
13
25
4
6
3
11
6,626
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/html_test.py
wpull.document.html_test.TestHTML5LibHTML
class TestHTML5LibHTML(Mixin, unittest.TestCase): def get_html_parser(self): return HTML5LibHTMLParser()
class TestHTML5LibHTML(Mixin, unittest.TestCase): def get_html_parser(self): pass
2
0
2
0
2
0
1
0
2
0
0
0
1
0
1
80
3
0
3
2
1
0
3
2
1
1
2
0
1
6,627
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/htmlparse/html5lib_.py
wpull.document.htmlparse.html5lib_.HTMLParser
class HTMLParser(BaseParser): @property def parser_error(self): return ValueError def parse(self, file, encoding=None): tokenizer = html5lib.tokenizer.HTMLTokenizer( file, encoding=encoding, useChardet=False if encoding else True, parseMeta=False if encoding else True, ) tag = None attrib = None buffer = None tail_buffer = None for token in tokenizer: token_type = token['type'] if token_type == START_TAG: if buffer: yield Element(tag, attrib, buffer.getvalue(), None, False) buffer = None if tail_buffer: yield Element(tag, dict(), None, tail_buffer.getvalue(), True) tail_buffer = None tag = token['name'] attrib = dict(token['data']) buffer = io.StringIO() if token['name'] == 'script': tokenizer.state = tokenizer.scriptDataState elif token_type in (CHARACTERS, SPACE_CHARACTERS): if buffer: buffer.write(token['data']) if tail_buffer: tail_buffer.write(token['data']) elif token_type == END_TAG: if buffer: yield Element(tag, attrib, buffer.getvalue(), None, False) buffer = None if tail_buffer: yield Element(tag, dict(), None, tail_buffer.getvalue(), True) tail_buffer = None tail_buffer = io.StringIO() tag = token['name'] elif token_type == COMMENT: yield Comment(token['data']) elif token_type == DOCTYPE: yield Doctype('{} {} {}'.format( token['name'], token['publicId'], token['systemId'])) elif token_type == PARSE_ERROR: pass else: raise ValueError('Unhandled token {}'.format(token)) if buffer: yield Element(tag, attrib, buffer.getvalue(), None, False) buffer = None if tail_buffer: yield Element(tag, dict(), None, tail_buffer.getvalue(), True) tail_buffer = None
class HTMLParser(BaseParser): @property def parser_error(self): pass def parser_error(self): pass
4
0
34
7
28
0
10
0
1
2
0
0
2
0
2
24
71
14
57
11
53
0
45
10
42
19
4
3
20
6,628
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/bandwidth.py
wpull.network.bandwidth.BandwidthLimiter
class BandwidthLimiter(BandwidthMeter): '''Bandwidth rate limit calculator.''' def __init__(self, rate_limit): super().__init__(sample_min_time=0) self._rate_limit = rate_limit def sleep_time(self): if not self._samples or not self._rate_limit: return 0 elapsed_time = 0 byte_sum = 0 for time_diff, data_len in self._samples: elapsed_time += time_diff byte_sum += data_len expected_elapsed_time = byte_sum / self._rate_limit if expected_elapsed_time > elapsed_time: sleep_time = expected_elapsed_time - elapsed_time if sleep_time < 0.001: return 0 else: return sleep_time else: return 0
class BandwidthLimiter(BandwidthMeter): '''Bandwidth rate limit calculator.''' def __init__(self, rate_limit): pass def sleep_time(self): pass
3
1
12
2
10
0
3
0.05
1
1
0
0
2
1
2
8
27
5
21
9
18
1
19
9
16
5
2
2
6
6,629
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/bandwidth.py
wpull.network.bandwidth.BandwidthMeter
class BandwidthMeter(object): '''Calculates the speed of data transfer. Args: sample_size (int): The number of samples for measuring the speed. sample_min_time (float): The minimum duration between samples in seconds. stall_time (float): The time in seconds to consider no traffic to be connection stalled. ''' def __init__(self, sample_size=20, sample_min_time=0.15, stall_time=5.0): self._bytes_transferred = 0 self._samples = collections.deque(maxlen=sample_size) self._last_feed_time = time.time() self._sample_min_time = sample_min_time self._stall_time = stall_time self._stalled = False self._collected_bytes_transferred = 0 @property def bytes_transferred(self): '''Return the number of bytes transferred Returns: int ''' return self._bytes_transferred @property def stalled(self): '''Return whether the connection is stalled. Returns: bool ''' return self._stalled @property def num_samples(self): '''Return the number of samples collected.''' return len(self._samples) def feed(self, data_len, feed_time=None): '''Update the bandwidth meter. Args: data_len (int): The number of bytes transfered since the last call to :func:`feed`. feed_time (float): Current time. ''' self._bytes_transferred += data_len self._collected_bytes_transferred += data_len time_now = feed_time or time.time() time_diff = time_now - self._last_feed_time if time_diff < self._sample_min_time: return self._last_feed_time = time.time() if data_len == 0 and time_diff >= self._stall_time: self._stalled = True return self._samples.append((time_diff, self._collected_bytes_transferred)) self._collected_bytes_transferred = 0 def speed(self): '''Return the current transfer speed. Returns: int: The speed in bytes per second. ''' if self._stalled: return 0 time_sum = 0 data_len_sum = 0 for time_diff, data_len in self._samples: time_sum += time_diff data_len_sum += data_len if time_sum: return data_len_sum / time_sum else: return 0
class BandwidthMeter(object): '''Calculates the speed of data transfer. Args: sample_size (int): The number of samples for measuring the speed. sample_min_time (float): The minimum duration between samples in seconds. stall_time (float): The time in seconds to consider no traffic to be connection stalled. ''' def __init__(self, sample_size=20, sample_min_time=0.15, stall_time=5.0): pass @property def bytes_transferred(self): '''Return the number of bytes transferred Returns: int ''' pass @property def stalled(self): '''Return whether the connection is stalled. Returns: bool ''' pass @property def num_samples(self): '''Return the number of samples collected.''' pass def feed(self, data_len, feed_time=None): '''Update the bandwidth meter. Args: data_len (int): The number of bytes transfered since the last call to :func:`feed`. feed_time (float): Current time. ''' pass def speed(self): '''Return the current transfer speed. Returns: int: The speed in bytes per second. ''' pass
10
6
12
2
7
3
2
0.63
1
0
0
1
6
7
6
6
88
18
43
22
33
27
39
19
32
4
1
1
11
6,630
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/bandwidth_test.py
wpull.network.bandwidth_test.TestNetwork
class TestNetwork(unittest.TestCase): def test_bandwidth_meter(self): meter = BandwidthMeter() self.assertEqual(0, meter.speed()) meter.feed(1000, feed_time=time.time() + 0.2) self.assertTrue(meter.speed()) def test_bandwidth_limit(self): meter = BandwidthLimiter(rate_limit=100) self.assertEqual(0, meter.sleep_time()) meter.feed(1000, feed_time=time.time() + 1.0) self.assertAlmostEqual(9.0, meter.sleep_time(), delta=0.2)
class TestNetwork(unittest.TestCase): def test_bandwidth_meter(self): pass def test_bandwidth_limit(self): pass
3
0
8
3
5
0
1
0
1
2
2
0
2
0
2
74
18
7
11
5
8
0
11
5
8
1
2
0
2
6,631
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/connection.py
wpull.network.connection.BaseConnection
class BaseConnection(object): '''Base network stream. Args: address: 2-item tuple containing the IP address and port or 4-item for IPv6. hostname: Hostname of the address (for SSL). timeout: Time in seconds before a read/write operation times out. connect_timeout: Time in seconds before a connect operation times out. bind_host: Host name for binding the socket interface. sock: Use given socket. The socket must already by connected. Attributes: reader: Stream Reader instance. writer: Stream Writer instance. address: 2-item tuple containing the IP address. host: Host name. port: Port number. ''' def __init__(self, address: tuple, hostname: Optional[str]=None, timeout: Optional[float]=None, connect_timeout: Optional[float]=None, bind_host: Optional[str]=None, sock: Optional[socket.socket]=None): assert len(address) >= 2, 'Expect str & port. Got {}.'.format(address) assert '.' in address[0] or ':' in address[0], \ 'Expect numerical address. Got {}.'.format(address[0]) self._address = address self._hostname = hostname or address[0] self._timeout = timeout self._connect_timeout = connect_timeout self._bind_host = bind_host self._sock = sock self.reader = None self.writer = None self._close_timer = None self._state = ConnectionState.ready @property def address(self) -> tuple: return self._address @property def hostname(self) -> Optional[str]: return self._hostname @property def host(self) -> str: return self._address[0] @property def port(self) -> int: return self._address[1] def closed(self) -> bool: '''Return whether the connection is closed.''' return not self.writer or not self.reader or self.reader.at_eof() def state(self) -> ConnectionState: '''Return the state of this connection.''' return self._state @asyncio.coroutine def connect(self): '''Establish a connection.''' _logger.debug(__('Connecting to {0}.', self._address)) if self._state != ConnectionState.ready: raise Exception('Closed connection must be reset before reusing.') if self._sock: connection_future = asyncio.open_connection( sock=self._sock, **self._connection_kwargs() ) else: # TODO: maybe we don't want to ignore flow-info and scope-id? host = self._address[0] port = self._address[1] connection_future = asyncio.open_connection( host, port, **self._connection_kwargs() ) self.reader, self.writer = yield from \ self.run_network_operation( connection_future, wait_timeout=self._connect_timeout, name='Connect') if self._timeout is not None: self._close_timer = CloseTimer(self._timeout, self) else: self._close_timer = DummyCloseTimer() self._state = ConnectionState.created _logger.debug('Connected.') def _connection_kwargs(self): '''Return additional connection arguments.''' kwargs = {} if self._bind_host: kwargs['local_addr'] = (self._bind_host, 0) return kwargs def close(self): '''Close the connection.''' if self.writer: _logger.debug('Closing connection.') self.writer.close() self.writer = None self.reader = None if self._close_timer: self._close_timer.close() self._state = ConnectionState.dead def reset(self): '''Prepare connection for reuse.''' self.close() self._state = ConnectionState.ready @asyncio.coroutine def write(self, data: bytes, drain: bool=True): '''Write data.''' assert self._state == ConnectionState.created, \ 'Expect conn created. Got {}.'.format(self._state) self.writer.write(data) if drain: fut = self.writer.drain() if fut: yield from self.run_network_operation( fut, close_timeout=self._timeout, name='Write') @asyncio.coroutine def read(self, amount: int=-1) -> bytes: '''Read data.''' assert self._state == ConnectionState.created, \ 'Expect conn created. Got {}.'.format(self._state) data = yield from \ self.run_network_operation( self.reader.read(amount), close_timeout=self._timeout, name='Read') return data @asyncio.coroutine def readline(self) -> bytes: '''Read a line of data.''' assert self._state == ConnectionState.created, \ 'Expect conn created. Got {}.'.format(self._state) with self._close_timer.with_timeout(): data = yield from \ self.run_network_operation( self.reader.readline(), close_timeout=self._timeout, name='Readline') return data @asyncio.coroutine def run_network_operation(self, task, wait_timeout=None, close_timeout=None, name='Network operation'): '''Run the task and raise appropriate exceptions. Coroutine. ''' if wait_timeout is not None and close_timeout is not None: raise Exception( 'Cannot use wait_timeout and close_timeout at the same time') try: if close_timeout is not None: with self._close_timer.with_timeout(): data = yield from task if self._close_timer.is_timeout(): raise NetworkTimedOut( '{name} timed out.'.format(name=name)) else: return data elif wait_timeout is not None: data = yield from asyncio.wait_for(task, wait_timeout) return data else: return (yield from task) except asyncio.TimeoutError as error: self.close() raise NetworkTimedOut( '{name} timed out.'.format(name=name)) from error except (tornado.netutil.SSLCertificateError, SSLVerificationError) \ as error: self.close() raise SSLVerificationError( '{name} certificate error: {error}' .format(name=name, error=error)) from error except AttributeError as error: self.close() raise NetworkError( '{name} network error: connection closed unexpectedly: {error}' .format(name=name, error=error)) from error except (socket.error, ssl.SSLError, OSError, IOError) as error: self.close() if isinstance(error, NetworkError): raise if error.errno == errno.ECONNREFUSED: raise ConnectionRefused( error.errno, os.strerror(error.errno)) from error # XXX: This quality case brought to you by OpenSSL and Python. # Example: _ssl.SSLError: [Errno 1] error:14094418:SSL # routines:SSL3_READ_BYTES:tlsv1 alert unknown ca error_string = str(error).lower() if 'certificate' in error_string or 'unknown ca' in error_string: raise SSLVerificationError( '{name} certificate error: {error}' .format(name=name, error=error)) from error else: if error.errno: raise NetworkError( error.errno, os.strerror(error.errno)) from error else: raise NetworkError( '{name} network error: {error}' .format(name=name, error=error)) from error
class BaseConnection(object): '''Base network stream. Args: address: 2-item tuple containing the IP address and port or 4-item for IPv6. hostname: Hostname of the address (for SSL). timeout: Time in seconds before a read/write operation times out. connect_timeout: Time in seconds before a connect operation times out. bind_host: Host name for binding the socket interface. sock: Use given socket. The socket must already by connected. Attributes: reader: Stream Reader instance. writer: Stream Writer instance. address: 2-item tuple containing the IP address. host: Host name. port: Port number. ''' def __init__(self, address: tuple, hostname: Optional[str]=None, timeout: Optional[float]=None, connect_timeout: Optional[float]=None, bind_host: Optional[str]=None, sock: Optional[socket.socket]=None): pass @property def address(self) -> tuple: pass @property def hostname(self) -> Optional[str]: pass @property def hostname(self) -> Optional[str]: pass @property def port(self) -> int: pass def closed(self) -> bool: '''Return whether the connection is closed.''' pass def state(self) -> ConnectionState: '''Return the state of this connection.''' pass @asyncio.coroutine def connect(self): '''Establish a connection.''' pass def _connection_kwargs(self): '''Return additional connection arguments.''' pass def closed(self) -> bool: '''Close the connection.''' pass def reset(self): '''Prepare connection for reuse.''' pass @asyncio.coroutine def write(self, data: bytes, drain: bool=True): '''Write data.''' pass @asyncio.coroutine def read(self, amount: int=-1) -> bytes: '''Read data.''' pass @asyncio.coroutine def readline(self) -> bytes: '''Read a line of data.''' pass @asyncio.coroutine def run_network_operation(self, task, wait_timeout=None, close_timeout=None, name='Network operation'): '''Run the task and raise appropriate exceptions. Coroutine. ''' pass
25
11
13
2
10
1
2
0.19
1
18
7
1
15
10
15
15
240
43
165
51
134
32
108
35
92
13
1
3
35
6,632
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/connection.py
wpull.network.connection.CloseTimer
class CloseTimer(object): '''Periodic timer to close connections if stalled.''' def __init__(self, timeout, connection): self._timeout = timeout self._touch_time = None self._call_later_handle = None self._connection = connection self._event_loop = asyncio.get_event_loop() self._timed_out = False self._running = True assert self._timeout > 0 self._schedule() def _schedule(self): '''Schedule check function.''' if self._running: _logger.debug('Schedule check function.') self._call_later_handle = self._event_loop.call_later( self._timeout, self._check) def _check(self): '''Check and close connection if needed.''' _logger.debug('Check if timeout.') self._call_later_handle = None if self._touch_time is not None: difference = self._event_loop.time() - self._touch_time _logger.debug('Time difference %s', difference) if difference > self._timeout: self._connection.close() self._timed_out = True if not self._connection.closed(): self._schedule() def close(self): '''Stop running timers.''' if self._call_later_handle: self._call_later_handle.cancel() self._running = False @contextlib.contextmanager def with_timeout(self): '''Context manager that applies timeout checks.''' self._touch_time = self._event_loop.time() try: yield finally: self._touch_time = None def is_timeout(self) -> bool: '''Return whether the timer has timed out.''' return self._timed_out
class CloseTimer(object): '''Periodic timer to close connections if stalled.''' def __init__(self, timeout, connection): pass def _schedule(self): '''Schedule check function.''' pass def _check(self): '''Check and close connection if needed.''' pass def close(self): '''Stop running timers.''' pass @contextlib.contextmanager def with_timeout(self): '''Context manager that applies timeout checks.''' pass def is_timeout(self) -> bool: '''Return whether the timer has timed out.''' pass
8
6
8
1
6
1
2
0.15
1
1
0
0
6
7
6
6
56
10
40
16
32
6
37
15
30
4
1
2
11
6,633
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/connection.py
wpull.network.connection.Connection
class Connection(BaseConnection): '''Network stream. Args: bandwidth_limiter (class:`.bandwidth.BandwidthLimiter`): Bandwidth limiter for connection speed limiting. Attributes: key: Value used by the ConnectionPool for its host pool map. Internal use only. wrapped_connection: A wrapped connection for ConnectionPool. Internal use only. is_ssl (bool): Whether connection is SSL. proxied (bool): Whether the connection is to a HTTP proxy. tunneled (bool): Whether the connection has been tunneled with the ``CONNECT`` request. ''' def __init__(self, *args, bandwidth_limiter=None, **kwargs): super().__init__(*args, **kwargs) self._bandwidth_limiter = bandwidth_limiter self.key = None self.wrapped_connection = None self._proxied = False self._tunneled = False @property def is_ssl(self) -> bool: return False @property def tunneled(self) -> bool: if self.closed(): self._tunneled = False return self._tunneled @tunneled.setter def tunneled(self, value): self._tunneled = value @property def proxied(self) -> bool: return self._proxied @proxied.setter def proxied(self, value): self._proxied = value @asyncio.coroutine def read(self, amount: int=-1) -> bytes: data = yield from super().read(amount) if self._bandwidth_limiter: self._bandwidth_limiter.feed(len(data)) sleep_time = self._bandwidth_limiter.sleep_time() if sleep_time: _logger.debug('Sleep %s', sleep_time) yield from asyncio.sleep(sleep_time) return data @asyncio.coroutine def start_tls(self, ssl_context: Union[bool, dict, ssl.SSLContext]=True) \ -> 'SSLConnection': '''Start client TLS on this connection and return SSLConnection. Coroutine ''' sock = self.writer.get_extra_info('socket') ssl_conn = SSLConnection( self._address, ssl_context=ssl_context, hostname=self._hostname, timeout=self._timeout, connect_timeout=self._connect_timeout, bind_host=self._bind_host, bandwidth_limiter=self._bandwidth_limiter, sock=sock ) yield from ssl_conn.connect() return ssl_conn
class Connection(BaseConnection): '''Network stream. Args: bandwidth_limiter (class:`.bandwidth.BandwidthLimiter`): Bandwidth limiter for connection speed limiting. Attributes: key: Value used by the ConnectionPool for its host pool map. Internal use only. wrapped_connection: A wrapped connection for ConnectionPool. Internal use only. is_ssl (bool): Whether connection is SSL. proxied (bool): Whether the connection is to a HTTP proxy. tunneled (bool): Whether the connection has been tunneled with the ``CONNECT`` request. ''' def __init__(self, *args, bandwidth_limiter=None, **kwargs): pass @property def is_ssl(self) -> bool: pass @property def tunneled(self) -> bool: pass @tunneled.setter def tunneled(self) -> bool: pass @property def proxied(self) -> bool: pass @proxied.setter def proxied(self) -> bool: pass @asyncio.coroutine def read(self, amount: int=-1) -> bytes: pass @asyncio.coroutine def start_tls(self, ssl_context: Union[bool, dict, ssl.SSLContext]=True) \ -> 'SSLConnection': '''Start client TLS on this connection and return SSLConnection. Coroutine ''' pass
16
2
6
1
5
0
1
0.35
1
7
1
1
8
10
8
23
83
18
48
29
31
17
34
18
25
3
2
2
11
6,634
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/namevalue_test.py
wpull.namevalue_test.TestNameValue
class TestNameValue(unittest.TestCase): RECORD_STR_1 = ('entry:\r\n' 'who: Gilbert, W.S. | Sullivan, Arthur\r\n' 'what: The Yeomen of\r\n' ' the Guard\r\n' 'when/created: 1888\r\n') RECORD_STR_2 = ('entry:\n' 'who: Gilbert, W.S. | Sullivan, Arthur\n' 'what: The Yeomen of\n' ' the Guard\n' 'when/created: 1888\n') RECORD_STR_3 = ('entry:\r\n' 'who: Gilbert, W.S. | Sullivan, Arthur\r\n' 'what: The Yeomen of the Guard\r\n' 'when/created: 1888\r\n') MIXED_LINE_ENDING_STR_1 = ( 'dog: woof\n' 'cat: meow\r\n' 'bird: tweet\r' 'mouse: squeak\n' 'cow: moo\r\n' 'frog: croak\n\r' 'elephant: toot\n' 'duck: \n' ' quack\r\n' 'fish: blub\n' 'seal: ow ow ow\r' 'fox: ???\r' ) def test_guess_line_ending(self): self.assertEqual('\r\n', guess_line_ending(self.RECORD_STR_1)) self.assertEqual('\n', guess_line_ending(self.RECORD_STR_2)) def test_unfold_lines(self): self.assertEqual(self.RECORD_STR_3, unfold_lines(self.RECORD_STR_1)) def test_name_value_record_setters(self): record = NameValueRecord() self.assertNotIn('cache', record) self.assertRaises(KeyError, lambda: record['cache']) record['cache'] = 'value1' self.assertEqual('value1', record['CACHE']) self.assertEqual(['value1'], record.get_list('Cache')) self.assertEqual( [('Cache', 'value1')], list(record.get_all()) ) def test_name_value_record_parsing(self): record = NameValueRecord() record.parse(self.RECORD_STR_1) self.assertIn('who', record) self.assertEqual('Gilbert, W.S. | Sullivan, Arthur', record['who']) def test_name_value_str_format(self): record = NameValueRecord() record.parse(self.RECORD_STR_1) self.assertEqual( ('Entry:\r\n' 'Who: Gilbert, W.S. | Sullivan, Arthur\r\n' 'What: The Yeomen of the Guard\r\n' 'When/Created: 1888\r\n'), str(record) ) def test_name_value_utf8(self): text = '''Name: dogé''' record = NameValueRecord() record.parse(text) self.assertEqual('dogé', record['name']) def test_name_value_encoding(self): text = '''Name: Кракозябры'''.encode('koi8-r') record = NameValueRecord(encoding='koi8-r') record.parse(text) self.assertEqual( 'Кракозябры', record['name']) def test_missing_colon(self): record = NameValueRecord() self.assertRaises(ValueError, record.parse, 'text:hello\nhi\n') record = NameValueRecord() record.parse('text:hello\nhi\n', strict=False) self.assertEqual('hello', record['text']) self.assertNotIn('hi', record) def test_normalize_name(self): self.assertEqual('Hi', normalize_name('hi')) self.assertEqual('Hi', normalize_name('Hi')) self.assertEqual('Hi', normalize_name('HI')) self.assertEqual('Content-Type', normalize_name('ContEnt-TYPE')) self.assertEqual('Content-Type', normalize_name('content-type')) self.assertEqual('Content-Type', normalize_name('content-type', ['Content-Type'])) self.assertEqual('content-type', normalize_name('content-type', ['content-type'])) self.assertEqual('CoNTeNT-TYPe', normalize_name('Content-Type', ['CoNTeNT-TYPe'])) def test_with_normalize_overrides(self): record = NameValueRecord(normalize_overrides=['WARC-Type']) record.add('WARC-Type', 'warcinfo') self.assertIn('WARC-Type', record) self.assertEqual('warcinfo', record['WARC-Type']) self.assertEqual([('WARC-Type', 'warcinfo')], list(record.get_all())) self.assertEqual(['warcinfo'], record.get_list('Warc-Type')) self.assertEqual(['WARC-Type'], list(record.keys())) record['Warc-Type'] = 'resource' self.assertIn('WARC-Type', record) self.assertEqual('resource', record['WARC-Type']) self.assertEqual([('WARC-Type', 'resource')], list(record.get_all())) self.assertEqual(['resource'], record.get_list('Warc-Type')) self.assertEqual(['WARC-Type'], list(record.keys())) record['WARC-Blah'] = 'blah' self.assertEqual(['WARC-Type', 'Warc-Blah'], list(record.keys())) def test_mixed_line_ending(self): record = NameValueRecord() record.parse(self.MIXED_LINE_ENDING_STR_1) self.assertEqual('woof', record['dog']) self.assertEqual('meow', record['cat']) self.assertEqual('tweet', record['bird']) self.assertEqual('squeak', record['mouse']) self.assertEqual('moo', record['cow']) self.assertEqual('croak', record['frog']) self.assertEqual('toot', record['elephant']) self.assertEqual('quack', record['duck']) self.assertEqual('blub', record['fish']) self.assertEqual('ow ow ow', record['seal']) self.assertEqual('???', record['fox']) def test_copy(self): record = NameValueRecord() record['blah'] = 'hello' # Check for no crash copy.deepcopy(record) def test_wrap_width(self): record = NameValueRecord(wrap_width=24) record['blah'] = 'hello ' * 10 self.assertEqual( 'Blah: hello hello hello hello\r\n' ' hello hello hello \r\n' ' hello hello hello \r\n', str(record) )
class TestNameValue(unittest.TestCase): def test_guess_line_ending(self): pass def test_unfold_lines(self): pass def test_name_value_record_setters(self): pass def test_name_value_record_parsing(self): pass def test_name_value_str_format(self): pass def test_name_value_utf8(self): pass def test_name_value_encoding(self): pass def test_missing_colon(self): pass def test_normalize_name(self): pass def test_with_normalize_overrides(self): pass def test_mixed_line_ending(self): pass def test_copy(self): pass def test_wrap_width(self): pass
14
0
9
1
8
0
1
0.01
1
5
1
0
13
0
13
85
163
28
134
30
120
1
91
30
77
1
2
0
13
6,635
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/connection.py
wpull.network.connection.ConnectionState
class ConnectionState(enum.Enum): '''State of a connection Attributes: ready: Connection is ready to be used created: connect has been called successfully dead: Connection is closed ''' ready = 'ready' created = 'created' dead = 'dead'
class ConnectionState(enum.Enum): '''State of a connection Attributes: ready: Connection is ready to be used created: connect has been called successfully dead: Connection is closed ''' pass
1
1
0
0
0
0
0
1.5
1
0
0
0
0
0
0
49
11
1
4
4
3
6
4
4
3
0
4
0
0
6,636
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/urlfilter.py
wpull.urlfilter.SpanHostsFilter
class SpanHostsFilter(BaseURLFilter): '''Filter URLs that go to other hostnames.''' def __init__(self, hostnames, enabled=False, page_requisites=False, linked_pages=False): self._hostnames = hostnames self._enabled = enabled self._page_requisites = page_requisites self._linked_pages = linked_pages def test(self, url_info, url_table_record): if self._enabled: return True if url_info.hostname in self._hostnames: return True if self._page_requisites and url_table_record.inline_level: return True if self._linked_pages and url_table_record.parent_url_info \ and url_table_record.parent_url_info.hostname in self._hostnames: return True
class SpanHostsFilter(BaseURLFilter): '''Filter URLs that go to other hostnames.''' def __init__(self, hostnames, enabled=False, page_requisites=False, linked_pages=False): pass def test(self, url_info, url_table_record): pass
3
1
10
2
8
0
3
0.06
1
0
0
0
2
4
2
23
22
4
17
8
13
1
15
7
12
5
4
1
6
6,637
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/connection.py
wpull.network.connection.SSLConnection
class SSLConnection(Connection): '''SSL network stream. Args: ssl_context: SSLContext ''' def __init__(self, *args, ssl_context: Union[bool, dict, ssl.SSLContext]=True, **kwargs): super().__init__(*args, **kwargs) self._ssl_context = ssl_context if self._ssl_context is True: self._ssl_context = tornado.netutil.ssl_options_to_context({}) elif isinstance(self._ssl_context, dict): self._ssl_context = tornado.netutil.ssl_options_to_context( self._ssl_context) @property def is_ssl(self) -> bool: return True def _connection_kwargs(self): kwargs = super()._connection_kwargs() if self._ssl_context: kwargs['ssl'] = self._ssl_context kwargs['server_hostname'] = self._hostname return kwargs @asyncio.coroutine def connect(self): result = yield from super().connect() try: sock = self.writer.transport.get_extra_info('ssl_object', self.writer.transport.get_extra_info('socket')) except AttributeError as error: raise SSLVerificationError('Failed to establish SSL connection; ' 'server unexpectedly closed') from error self._verify_cert(sock) return result def _verify_cert(self, sock: ssl.SSLSocket): '''Check if certificate matches hostname.''' # Based on tornado.iostream.SSLIOStream # Needed for older OpenSSL (<0.9.8f) versions verify_mode = self._ssl_context.verify_mode assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL), \ 'Unknown verify mode {}'.format(verify_mode) if verify_mode == ssl.CERT_NONE: return cert = sock.getpeercert() if not cert and verify_mode == ssl.CERT_OPTIONAL: return if not cert: raise SSLVerificationError('No SSL certificate given') try: ssl.match_hostname(cert, self._hostname) except ssl.CertificateError as error: raise SSLVerificationError('Invalid SSL certificate') from error
class SSLConnection(Connection): '''SSL network stream. Args: ssl_context: SSLContext ''' def __init__(self, *args, ssl_context: Union[bool, dict, ssl.SSLContext]=True, **kwargs): pass @property def is_ssl(self) -> bool: pass def _connection_kwargs(self): pass @asyncio.coroutine def connect(self): pass def _verify_cert(self, sock: ssl.SSLSocket): '''Check if certificate matches hostname.''' pass
8
2
11
2
9
1
3
0.15
1
7
1
0
5
1
5
28
68
15
46
17
37
7
37
12
31
5
3
1
13
6,638
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/dns.py
wpull.network.dns.DNSInfo
class DNSInfo(_DNSInfo): '''DNS resource records.''' __slots__ = () def to_text_format(self): '''Format as detached DNS information as text.''' return '\n'.join(itertools.chain( (self.fetch_date.strftime('%Y%m%d%H%M%S'), ), (rr.to_text() for rr in self.resource_records), (), ))
class DNSInfo(_DNSInfo): '''DNS resource records.''' def to_text_format(self): '''Format as detached DNS information as text.''' pass
2
2
7
0
6
1
1
0.25
1
1
0
0
1
0
1
1
11
1
8
3
6
2
4
3
2
1
1
0
1
6,639
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/dns.py
wpull.network.dns.IPFamilyPreference
class IPFamilyPreference(enum.Enum): '''IPv4 and IPV6 preferences.''' any = 'any' ipv4_only = socket.AF_INET ipv6_only = socket.AF_INET6
class IPFamilyPreference(enum.Enum): '''IPv4 and IPV6 preferences.''' pass
1
1
0
0
0
0
0
0.25
1
0
0
0
0
0
0
49
6
1
4
3
3
1
4
3
3
0
4
0
0
6,640
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/dns.py
wpull.network.dns.ResolveResult
class ResolveResult(object): '''DNS resolution information.''' def __init__(self, address_infos: List[AddressInfo], dns_infos: Optional[List[DNSInfo]]=None): self._address_infos = address_infos self._dns_infos = dns_infos @property def addresses(self) -> Sequence[AddressInfo]: '''The socket addresses.''' return self._address_infos @property def dns_infos(self) -> List[DNSInfo]: '''The DNS resource records.''' return self._dns_infos @property def first_ipv4(self) -> Optional[AddressInfo]: '''The first IPv4 address.''' for info in self._address_infos: if info.family == socket.AF_INET: return info @property def first_ipv6(self) -> Optional[AddressInfo]: '''The first IPV6 address.''' for info in self._address_infos: if info.family == socket.AF_INET6: return info def shuffle(self): '''Shuffle the addresses.''' random.shuffle(self._address_infos) def rotate(self): '''Move the first address to the last position.''' item = self._address_infos.pop(0) self._address_infos.append(item)
class ResolveResult(object): '''DNS resolution information.''' def __init__(self, address_infos: List[AddressInfo], dns_infos: Optional[List[DNSInfo]]=None): pass @property def addresses(self) -> Sequence[AddressInfo]: '''The socket addresses.''' pass @property def dns_infos(self) -> List[DNSInfo]: '''The DNS resource records.''' pass @property def first_ipv4(self) -> Optional[AddressInfo]: '''The first IPv4 address.''' pass @property def first_ipv6(self) -> Optional[AddressInfo]: '''The first IPV6 address.''' pass def shuffle(self): '''Shuffle the addresses.''' pass def rotate(self): '''Move the first address to the last position.''' pass
12
7
4
0
3
1
2
0.27
1
1
1
0
7
2
7
7
39
6
26
18
13
7
21
13
13
3
1
2
11
6,641
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/pool.py
wpull.network.pool.ConnectionPool
class ConnectionPool(object): '''Connection pool. Args: max_host_count: Number of connections per host. resolver: DNS resolver. connection_factory: A function that accepts ``address`` and ``hostname`` arguments and returns a :class:`Connection` instance. ssl_connection_factory: A function that returns a :class:`SSLConnection` instance. See `connection_factory`. max_count: Limit on number of connections ''' def __init__(self, max_host_count: int=6, resolver: Optional[Resolver]=None, connection_factory: Optional[Callable[[tuple, str], Connection]]=None, ssl_connection_factory: Optional[Callable[[tuple, str], SSLConnection]]=None, max_count: int=100): self._max_host_count = max_host_count self._resolver = resolver or Resolver() self._connection_factory = connection_factory or Connection self._ssl_connection_factory = ssl_connection_factory or SSLConnection self._max_count = max_count self._host_pools = {} self._host_pool_waiters = {} self._host_pools_lock = asyncio.Lock() self._release_tasks = set() self._closed = False self._happy_eyeballs_table = HappyEyeballsTable() @property def host_pools(self) -> Mapping[tuple, HostPool]: return self._host_pools @asyncio.coroutine def acquire(self, host: str, port: int, use_ssl: bool=False, host_key: Optional[Any]=None) \ -> Union[Connection, SSLConnection]: '''Return an available connection. Args: host: A hostname or IP address. port: Port number. use_ssl: Whether to return a SSL connection. host_key: If provided, it overrides the key used for per-host connection pooling. This is useful for proxies for example. Coroutine. ''' assert isinstance(port, int), 'Expect int. Got {}'.format(type(port)) assert not self._closed yield from self._process_no_wait_releases() if use_ssl: connection_factory = functools.partial( self._ssl_connection_factory, hostname=host) else: connection_factory = functools.partial( self._connection_factory, hostname=host) connection_factory = functools.partial( HappyEyeballsConnection, (host, port), connection_factory, self._resolver, self._happy_eyeballs_table, is_ssl=use_ssl ) key = host_key or (host, port, use_ssl) with (yield from self._host_pools_lock): if key not in self._host_pools: host_pool = self._host_pools[key] = HostPool( connection_factory, max_connections=self._max_host_count ) self._host_pool_waiters[key] = 1 else: host_pool = self._host_pools[key] self._host_pool_waiters[key] += 1 _logger.debug('Check out %s', key) connection = yield from host_pool.acquire() connection.key = key # TODO: Verify this assert is always true # assert host_pool.count() <= host_pool.max_connections # assert key in self._host_pools # assert self._host_pools[key] == host_pool with (yield from self._host_pools_lock): self._host_pool_waiters[key] -= 1 return connection @asyncio.coroutine def release(self, connection: Connection): '''Put a connection back in the pool. Coroutine. ''' assert not self._closed key = connection.key host_pool = self._host_pools[key] _logger.debug('Check in %s', key) yield from host_pool.release(connection) force = self.count() > self._max_count yield from self.clean(force=force) def no_wait_release(self, connection: Connection): '''Synchronous version of :meth:`release`.''' _logger.debug('No wait check in.') release_task = asyncio.get_event_loop().create_task( self.release(connection) ) self._release_tasks.add(release_task) @asyncio.coroutine def _process_no_wait_releases(self): '''Process check in tasks.''' while True: try: release_task = self._release_tasks.pop() except KeyError: return else: yield from release_task @asyncio.coroutine def session(self, host: str, port: int, use_ssl: bool=False): '''Return a context manager that returns a connection. Usage:: session = yield from connection_pool.session('example.com', 80) with session as connection: connection.write(b'blah') connection.close() Coroutine. ''' connection = yield from self.acquire(host, port, use_ssl) @contextlib.contextmanager def context_wrapper(): try: yield connection finally: self.no_wait_release(connection) return context_wrapper() @asyncio.coroutine def clean(self, force: bool=False): '''Clean all closed connections. Args: force: Clean connected and idle connections too. Coroutine. ''' assert not self._closed with (yield from self._host_pools_lock): for key, pool in tuple(self._host_pools.items()): yield from pool.clean(force=force) if not self._host_pool_waiters[key] and pool.empty(): del self._host_pools[key] del self._host_pool_waiters[key] def close(self): '''Close all the connections and clean up. This instance will not be usable after calling this method. ''' for key, pool in tuple(self._host_pools.items()): pool.close() del self._host_pools[key] del self._host_pool_waiters[key] self._closed = True def count(self) -> int: '''Return number of connections.''' counter = 0 for pool in self._host_pools.values(): counter += pool.count() return counter
class ConnectionPool(object): '''Connection pool. Args: max_host_count: Number of connections per host. resolver: DNS resolver. connection_factory: A function that accepts ``address`` and ``hostname`` arguments and returns a :class:`Connection` instance. ssl_connection_factory: A function that returns a :class:`SSLConnection` instance. See `connection_factory`. max_count: Limit on number of connections ''' def __init__(self, max_host_count: int=6, resolver: Optional[Resolver]=None, connection_factory: Optional[Callable[[tuple, str], Connection]]=None, ssl_connection_factory: Optional[Callable[[tuple, str], SSLConnection]]=None, max_count: int=100): pass @property def host_pools(self) -> Mapping[tuple, HostPool]: pass @asyncio.coroutine def acquire(self, host: str, port: int, use_ssl: bool=False, host_key: Optional[Any]=None) \ -> Union[Connection, SSLConnection]: '''Return an available connection. Args: host: A hostname or IP address. port: Port number. use_ssl: Whether to return a SSL connection. host_key: If provided, it overrides the key used for per-host connection pooling. This is useful for proxies for example. Coroutine. ''' pass @asyncio.coroutine def release(self, connection: Connection): '''Put a connection back in the pool. Coroutine. ''' pass def no_wait_release(self, connection: Connection): '''Synchronous version of :meth:`release`.''' pass @asyncio.coroutine def _process_no_wait_releases(self): '''Process check in tasks.''' pass @asyncio.coroutine def session(self, host: str, port: int, use_ssl: bool=False): '''Return a context manager that returns a connection. Usage:: session = yield from connection_pool.session('example.com', 80) with session as connection: connection.write(b'blah') connection.close() Coroutine. ''' pass @contextlib.contextmanager def context_wrapper(): pass @asyncio.coroutine def clean(self, force: bool=False): '''Clean all closed connections. Args: force: Clean connected and idle connections too. Coroutine. ''' pass def close(self): '''Close all the connections and clean up. This instance will not be usable after calling this method. ''' pass def count(self) -> int: '''Return number of connections.''' pass
19
9
16
3
10
3
2
0.41
1
15
6
1
10
11
10
10
197
41
111
52
84
45
82
37
70
3
1
3
19
6,642
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/connection.py
wpull.network.connection.DummyCloseTimer
class DummyCloseTimer(object): '''Dummy close timer.''' @contextlib.contextmanager def with_timeout(self): yield def is_timeout(self): return False def close(self): pass
class DummyCloseTimer(object): '''Dummy close timer.''' @contextlib.contextmanager def with_timeout(self): pass def is_timeout(self): pass def close(self): pass
5
1
2
0
2
0
1
0.13
1
0
0
0
3
0
3
3
11
2
8
5
3
1
7
4
3
1
1
0
3
6,643
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/namevalue.py
wpull.namevalue.NameValueRecord
class NameValueRecord(collections.MutableMapping): '''An ordered mapping of name-value pairs. Duplicated names are accepted. .. seealso:: http://tools.ietf.org/search/draft-kunze-anvl-02 ''' def __init__(self, normalize_overrides=None, encoding='utf-8', wrap_width=None): self._map = OrderedDefaultDict(list) self.raw = None self.encoding = encoding self._normalize_overrides = normalize_overrides self._wrap_width = wrap_width def parse(self, string, strict=True): '''Parse the string or bytes. Args: strict (bool): If True, errors will not be ignored Raises: :class:`ValueError` if the record is malformed. ''' if isinstance(string, bytes): errors = 'strict' if strict else 'replace' string = string.decode(self.encoding, errors=errors) if not self.raw: self.raw = string else: self.raw += string lines = unfold_lines(string).splitlines() for line in lines: if line: if ':' not in line: if strict: raise ValueError('Field missing colon.') else: continue name, value = line.split(':', 1) name = name.strip() value = value.strip() self.add(name, value) def __getitem__(self, name): normalized_name = normalize_name(name, self._normalize_overrides) if normalized_name in self._map: if self._map[normalized_name]: return self._map[normalized_name][0] raise KeyError(name) def __setitem__(self, name, value): normalized_name = normalize_name(name, self._normalize_overrides) self._map[normalized_name][:] = (value,) def __delitem__(self, name): del self._map[normalize_name(name, self._normalize_overrides)] def __iter__(self): return iter(self._map) def __len__(self): return len(self._map) def add(self, name, value): '''Append the name-value pair to the record.''' normalized_name = normalize_name(name, self._normalize_overrides) self._map[normalized_name].append(value) def get_list(self, name): '''Return all the values for given name.''' normalized_name = normalize_name(name, self._normalize_overrides) return self._map[normalized_name] def get_all(self): '''Return an iterator of name-value pairs.''' for name, values in self._map.items(): for value in values: yield (name, value) def __str__(self): return self.to_str() def to_str(self): '''Convert to string.''' pairs = [] for name, value in self.get_all(): if value and self._wrap_width: pairs.append('{0}:{1}'.format( name, '\r\n'.join(textwrap.wrap( value, width=self._wrap_width, drop_whitespace=False, initial_indent=' ', subsequent_indent=' ' )) )) elif value: pairs.append('{0}: {1}'.format(name, value)) else: pairs.append('{0}:'.format(name)) pairs.append('') return '\r\n'.join(pairs) def __bytes__(self): return self.to_bytes() def to_bytes(self, errors='strict'): '''Convert to bytes.''' return str(self).encode(self.encoding, errors=errors)
class NameValueRecord(collections.MutableMapping): '''An ordered mapping of name-value pairs. Duplicated names are accepted. .. seealso:: http://tools.ietf.org/search/draft-kunze-anvl-02 ''' def __init__(self, normalize_overrides=None, encoding='utf-8', wrap_width=None): pass def parse(self, string, strict=True): '''Parse the string or bytes. Args: strict (bool): If True, errors will not be ignored Raises: :class:`ValueError` if the record is malformed. ''' pass def __getitem__(self, name): pass def __setitem__(self, name, value): pass def __delitem__(self, name): pass def __iter__(self): pass def __len__(self): pass def add(self, name, value): '''Append the name-value pair to the record.''' pass def get_list(self, name): '''Return all the values for given name.''' pass def get_all(self): '''Return an iterator of name-value pairs.''' pass def __str__(self): pass def to_str(self): '''Convert to string.''' pass def __bytes__(self): pass def to_bytes(self, errors='strict'): '''Convert to bytes.''' pass
15
7
7
1
5
1
2
0.19
1
6
1
0
14
5
14
14
115
23
77
33
61
15
65
32
50
8
1
4
28
6,644
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/errors.py
wpull.errors.ServerError
class ServerError(ValueError): '''Server issued an error.'''
class ServerError(ValueError): '''Server issued an error.''' pass
1
1
0
0
0
0
0
1
1
0
0
2
0
0
0
11
2
0
1
1
0
1
1
1
0
0
4
0
0
6,645
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/errors.py
wpull.errors.SSLVerificationError
class SSLVerificationError(OSError): '''A problem occurred validating SSL certificates.'''
class SSLVerificationError(OSError): '''A problem occurred validating SSL certificates.''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
13
2
0
1
1
0
1
1
1
0
0
4
0
0
6,646
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/htmlparse/lxml_.py
wpull.document.htmlparse.lxml_.HTMLParser
class HTMLParser(BaseParser): '''HTML document parser. This reader uses lxml as the parser. ''' BUFFER_SIZE = 131072 @property def parser_error(self): return lxml.etree.LxmlError def parse(self, file, encoding=None): parser_type = self.detect_parser_type(file, encoding=encoding) if parser_type == 'xhtml': # Use the HTML parser because there exists XHTML soup parser_type = 'html' for element in self.parse_lxml(file, encoding=encoding, parser_type=parser_type): yield element def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget, parser_type='html'): '''Return an iterator of elements found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. target_class: A class to be used for target parsing. parser_type (str): The type of parser to use. Accepted values: ``html``, ``xhtml``, ``xml``. Returns: iterator: Each item is an element from :mod:`.document.htmlparse.element` ''' if encoding: lxml_encoding = to_lxml_encoding(encoding) or 'latin1' else: lxml_encoding = encoding elements = [] callback_func = elements.append target = target_class(callback_func) if parser_type == 'html': parser = lxml.html.HTMLParser( encoding=lxml_encoding, target=target ) elif parser_type == 'xhtml': parser = lxml.html.XHTMLParser( encoding=lxml_encoding, target=target, recover=True ) else: parser = lxml.etree.XMLParser( encoding=lxml_encoding, target=target, recover=True ) if parser_type == 'html': # XXX: Force libxml2 to do full read in case of early "</html>" # See https://github.com/chfoo/wpull/issues/104 # See https://bugzilla.gnome.org/show_bug.cgi?id=727935 for dummy in range(3): parser.feed('<html>'.encode(encoding)) while True: data = file.read(self.BUFFER_SIZE) if not data: break parser.feed(data) for element in elements: yield element del elements[:] parser.close() for element in elements: yield element @classmethod def parse_doctype(cls, file, encoding=None): '''Get the doctype from the document. Returns: str, None ''' if encoding: lxml_encoding = to_lxml_encoding(encoding) or 'latin1' else: lxml_encoding = encoding try: parser = lxml.etree.XMLParser(encoding=lxml_encoding, recover=True) tree = lxml.etree.parse( io.BytesIO(wpull.util.peek_file(file)), parser=parser ) if tree.getroot() is not None: return tree.docinfo.doctype except lxml.etree.LxmlError: pass @classmethod def detect_parser_type(cls, file, encoding=None): '''Get the suitable parser type for the document. Returns: str ''' is_xml = XMLDetector.is_file(file) doctype = cls.parse_doctype(file, encoding=encoding) or '' if not doctype and is_xml: return 'xml' if 'XHTML' in doctype: return 'xhtml' return 'html'
class HTMLParser(BaseParser): '''HTML document parser. This reader uses lxml as the parser. ''' @property def parser_error(self): pass def parser_error(self): pass def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget, parser_type='html'): '''Return an iterator of elements found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. target_class: A class to be used for target parsing. parser_type (str): The type of parser to use. Accepted values: ``html``, ``xhtml``, ``xml``. Returns: iterator: Each item is an element from :mod:`.document.htmlparse.element` ''' pass @classmethod def parse_doctype(cls, file, encoding=None): '''Get the doctype from the document. Returns: str, None ''' pass @classmethod def detect_parser_type(cls, file, encoding=None): '''Get the suitable parser type for the document. Returns: str ''' pass
9
4
22
4
13
5
4
0.37
1
3
2
0
3
0
5
27
125
28
71
26
61
26
54
22
48
10
4
2
21
6,647
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/htmlparse/lxml_.py
wpull.document.htmlparse.lxml_.HTMLParserTarget
class HTMLParserTarget(object): '''An HTML parser target. Args: callback: A callback function. The function should accept one argument from :mod:`.document.htmlparse.element`. ''' # NOTE: If we ever support Python 2 again, byte strings may be # returned from lxml def __init__(self, callback): self.callback = callback self.tag = None self.attrib = None self.buffer = None self.tail_buffer = None def start(self, tag, attrib): if self.buffer: self.callback(Element( self.tag, self.attrib, self.buffer.getvalue(), None, False )) self.buffer = None if self.tail_buffer: self.callback(Element( self.tag, dict(), None, self.tail_buffer.getvalue(), True )) self.tail_buffer = None self.tag = tag self.attrib = attrib self.buffer = io.StringIO() def data(self, data): if self.buffer: self.buffer.write(data) if self.tail_buffer: self.tail_buffer.write(data) def end(self, tag): if self.buffer: self.callback(Element( tag, self.attrib, self.buffer.getvalue(), None, False )) self.buffer = None if self.tail_buffer: self.callback(Element( self.tag, dict(), None, self.tail_buffer.getvalue(), True )) self.tail_buffer = None self.tail_buffer = io.StringIO() self.tag = tag def comment(self, text): self.callback(Comment(text)) def close(self): if self.buffer: self.callback(Element( self.tag, self.attrib, self.buffer.getvalue(), None, False )) self.buffer = None if self.tail_buffer: self.callback(Element( self.tag, dict(), None, self.tail_buffer.getvalue(), True )) self.tail_buffer = None return True
class HTMLParserTarget(object): '''An HTML parser target. Args: callback: A callback function. The function should accept one argument from :mod:`.document.htmlparse.element`. ''' def __init__(self, callback): pass def start(self, tag, attrib): pass def data(self, data): pass def end(self, tag): pass def comment(self, text): pass def close(self): pass
7
1
12
1
11
0
2
0.1
1
1
0
0
6
5
6
6
88
13
68
12
61
7
41
12
34
3
1
1
14
6,648
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/javascript.py
wpull.document.javascript.JavaScriptReader
class JavaScriptReader(BaseDocumentDetector, BaseTextStreamReader): '''JavaScript Document Reader.''' # Pattern based from https://github.com/internetarchive/heritrix3/ # blob/ffd248f7800dbd4bff1cf8afaa57a0a3e945ed85/modules/src/ # main/java/org/archive/modules/extractor/ExtractorJS.java URL_PATTERN = r'''(\\{0,8}['"])(https?://[^'"]{1,500}|[^\s'"]{1,500})(?:\1)''' URL_REGEX = re.compile(URL_PATTERN) BUFFER_SIZE = 1048576 STREAM_REWIND = 4096 @classmethod def is_url(cls, url_info): '''Return whether the document is likely to be JS.''' if '.js' in url_info.path.lower(): return True @classmethod def is_request(cls, request): '''Return whether the document is likely to be JS.''' return cls.is_url(request.url_info) @classmethod def is_response(cls, response): '''Return whether the document is likely to be JS.''' if 'javascript' in response.fields.get('content-type', '').lower(): return True if response.body: # script mistakenly served as HTML if 'html' in response.fields.get('content-type', '').lower(): return cls.is_file(response.body) @classmethod def is_file(cls, file): '''Return whether the file is likely JS.''' peeked_data = wpull.string.printable_bytes( wpull.util.peek_file(file)).lower() if b'<html' in peeked_data: return VeryFalse if re.search(br'var|function|settimeout|jquery\(', peeked_data): return True def iter_text(self, file, encoding=None): if isinstance(file, io.TextIOBase): stream = file else: stream = codecs.getreader(encoding or 'latin1')(file) regex_stream = RegexStream(stream, self.URL_REGEX) for match, text in regex_stream.stream(): yield (text, bool(match)) def read_links(self, file, encoding=None): '''Return an iterator of links found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. Returns: iterable: str ''' return [item[0] for item in self.iter_text(file, encoding) if item[1]]
class JavaScriptReader(BaseDocumentDetector, BaseTextStreamReader): '''JavaScript Document Reader.''' @classmethod def is_url(cls, url_info): '''Return whether the document is likely to be JS.''' pass @classmethod def is_request(cls, request): '''Return whether the document is likely to be JS.''' pass @classmethod def is_response(cls, response): '''Return whether the document is likely to be JS.''' pass @classmethod def is_file(cls, file): '''Return whether the file is likely JS.''' pass def iter_text(self, file, encoding=None): pass def read_links(self, file, encoding=None): '''Return an iterator of links found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. Returns: iterable: str ''' pass
11
6
8
1
5
2
2
0.42
2
3
1
1
2
0
6
33
67
13
38
19
27
16
31
15
24
4
4
2
14
6,649
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/javascript_test.py
wpull.document.javascript_test.TestJavaScript
class TestJavaScript(unittest.TestCase): def test_js_detect(self): self.assertTrue(JavaScriptReader.is_file( io.BytesIO('var a = 1;'.encode('utf-16le')) )) self.assertTrue(JavaScriptReader.is_file( io.BytesIO('setTimeout('.encode('utf-16le')) )) self.assertFalse(JavaScriptReader.is_file( io.BytesIO('hello world!'.encode('utf-16le')) )) self.assertFalse(JavaScriptReader.is_file( io.BytesIO(b'<html><body>hello') )) self.assertTrue(JavaScriptReader.is_file( io.BytesIO(b'<html><body>hello') ) is VeryFalse) response = Response(200, 'OK') response.fields['Content-Type'] = 'application/javascript' self.assertTrue(JavaScriptReader.is_response(response)) response = Response(200, 'OK') response.fields['Content-Type'] = 'image/png' self.assertFalse(JavaScriptReader.is_response(response))
class TestJavaScript(unittest.TestCase): def test_js_detect(self): pass
2
0
24
2
22
0
1
0
1
2
2
0
1
0
1
73
25
2
23
3
21
0
13
3
11
1
2
0
1
6,650
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/sitemap.py
wpull.document.sitemap.SitemapReader
class SitemapReader(BaseDocumentDetector, BaseExtractiveReader): '''Sitemap XML reader.''' MAX_ROBOTS_FILE_SIZE = 4096 def __init__(self, html_parser): super().__init__() self._html_parser = html_parser @classmethod def is_url(cls, url_info): '''Return whether the document is likely to be a Sitemap.''' path = url_info.path.lower() if path == '/robots.txt': return True if 'sitemap' in path and '.xml' in path: return True @classmethod def is_request(cls, request): '''Return whether the document is likely to be a Sitemap.''' return cls.is_url(request.url_info) @classmethod def is_response(cls, response): '''Return whether the document is likely to be a Sitemap.''' if response.body: if cls.is_file(response.body): return True @classmethod def is_file(cls, file): '''Return whether the file is likely a Sitemap.''' peeked_data = wpull.util.peek_file(file) if is_gzip(peeked_data): try: peeked_data = wpull.decompression.gzip_uncompress( peeked_data, truncated=True ) except zlib.error: pass peeked_data = wpull.string.printable_bytes(peeked_data) if b'<?xml' in peeked_data \ and (b'<sitemapindex' in peeked_data or b'<urlset' in peeked_data): return True def iter_links(self, file, encoding=None): peeked_data = wpull.util.peek_file(file) if is_gzip(peeked_data): file = gzip.GzipFile(mode='rb', fileobj=file) if self.is_file(file): for html_obj in self._html_parser.parse(file, encoding): if isinstance(html_obj, Element) \ and html_obj.tag.endswith('loc'): if html_obj.text: yield html_obj.text else: parser = robotexclusionrulesparser.RobotExclusionRulesParser() parser.parse(file.read(self.MAX_ROBOTS_FILE_SIZE)) for link in parser.sitemaps: yield link
class SitemapReader(BaseDocumentDetector, BaseExtractiveReader): '''Sitemap XML reader.''' def __init__(self, html_parser): pass @classmethod def is_url(cls, url_info): '''Return whether the document is likely to be a Sitemap.''' pass @classmethod def is_request(cls, request): '''Return whether the document is likely to be a Sitemap.''' pass @classmethod def is_response(cls, response): '''Return whether the document is likely to be a Sitemap.''' pass @classmethod def is_file(cls, file): '''Return whether the file is likely a Sitemap.''' pass def iter_links(self, file, encoding=None): pass
11
5
9
1
7
1
3
0.1
2
4
1
1
2
1
6
32
66
12
49
19
38
5
40
15
33
7
4
4
19
6,651
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/xml.py
wpull.document.xml.XMLDetector
class XMLDetector(BaseDocumentDetector): @classmethod def is_file(cls, file): peeked_data = wpull.string.printable_bytes( wpull.util.peek_file(file)).lower() if b'<?xml' in peeked_data: return True @classmethod def is_request(cls, request): return cls.is_url(request.url_info) @classmethod def is_response(cls, response): if 'xml' in response.fields.get('content-type', '').lower(): return True if response.body: if cls.is_file(response.body): return True @classmethod def is_url(cls, url_info): path = url_info.path.lower() if path.endswith('.xml'): return True
class XMLDetector(BaseDocumentDetector): @classmethod def is_file(cls, file): pass @classmethod def is_request(cls, request): pass @classmethod def is_response(cls, response): pass @classmethod def is_url(cls, url_info): pass
9
0
5
1
4
0
2
0
1
0
0
0
0
0
4
29
27
5
22
11
13
0
17
7
12
4
4
2
9
6,652
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/xml_test.py
wpull.document.xml_test.TestXML
class TestXML(unittest.TestCase): def test_xml_detect(self): self.assertTrue(XMLDetector.is_file( io.BytesIO('<?xml version='.encode('utf-16le')) )) self.assertFalse(XMLDetector.is_file( io.BytesIO('<!DOCTYPE html><html><body>'.encode('utf-16le')) )) self.assertFalse(XMLDetector.is_file( io.BytesIO(b'<html><body>hello') )) self.assertTrue(XMLDetector.is_file( io.BytesIO(b'<?xml version') )) self.assertTrue( XMLDetector.is_url(URLInfo.parse('example.com/index.xml')) ) self.assertFalse( XMLDetector.is_url(URLInfo.parse('example.com/image.jpg')) ) self.assertTrue( XMLDetector.is_request(Request('example.com/index.xml')) ) self.assertFalse( XMLDetector.is_request(Request('example.com/image.jpg')) ) response = Response(200, 'OK') response.fields['Content-Type'] = 'text/xml' self.assertTrue(XMLDetector.is_response(response)) response = Response(200, 'OK') response.fields['Content-Type'] = 'application/xml' self.assertTrue(XMLDetector.is_response(response)) response = Response(200, 'OK') response.fields['Content-Type'] = 'image/png' self.assertFalse(XMLDetector.is_response(response))
class TestXML(unittest.TestCase): def test_xml_detect(self): pass
2
0
37
3
34
0
1
0
1
4
4
0
1
0
1
73
38
3
35
3
33
0
19
3
17
1
2
0
1
6,653
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/driver/phantomjs.py
wpull.driver.phantomjs.PhantomJSDriver
class PhantomJSDriver(Process): '''PhantomJS processing. Args: exe_path (str): Path of the PhantomJS executable. extra_args (list): Additional arguments for PhantomJS. Most likely, you'll want to pass proxy settings for capturing traffic. params (:class:`PhantomJSDriverParams`): Parameters for controlling the processing pipeline. This class launches PhantomJS that scrolls and saves snapshots. It can only be used once per URL. ''' def __init__(self, exe_path='phantomjs', extra_args=None, params=None): script_path = wpull.util.get_package_filename('driver/phantomjs.js') self._config_file = tempfile.NamedTemporaryFile( prefix='tmp-wpull-', suffix='.json', delete=False ) args = [exe_path] + (extra_args or []) + [script_path, self._config_file.name] super().__init__(args, stderr_callback=self._stderr_callback) self._params = params @asyncio.coroutine def _stderr_callback(self, line): _logger.warning(line.decode('utf-8', 'replace').rstrip()) @asyncio.coroutine def start(self, use_atexit=True): _logger.debug('PhantomJS start.') self._write_config() yield from super().start(use_atexit) def _write_config(self): '''Write the parameters to a file for PhantomJS to read.''' param_dict = { 'url': self._params.url, 'snapshot_paths': self._params.snapshot_paths, 'wait_time': self._params.wait_time, 'num_scrolls': self._params.num_scrolls, 'smart_scroll': self._params.smart_scroll, 'snapshot': self._params.snapshot, 'viewport_width': self._params.viewport_size[0], 'viewport_height': self._params.viewport_size[1], 'paper_width': self._params.paper_size[0], 'paper_height': self._params.paper_size[1], 'custom_headers': self._params.custom_headers, 'page_settings': self._params.page_settings, } if self._params.event_log_filename: param_dict['event_log_filename'] = \ os.path.abspath(self._params.event_log_filename) if self._params.action_log_filename: param_dict['action_log_filename'] = \ os.path.abspath(self._params.action_log_filename) config_text = json.dumps(param_dict) self._config_file.write(config_text.encode('utf-8')) # Close it so the phantomjs process can read it on Windows self._config_file.close() def close(self): _logger.debug('Terminate phantomjs process.') super().close() if os.path.exists(self._config_file.name): os.remove(self._config_file.name)
class PhantomJSDriver(Process): '''PhantomJS processing. Args: exe_path (str): Path of the PhantomJS executable. extra_args (list): Additional arguments for PhantomJS. Most likely, you'll want to pass proxy settings for capturing traffic. params (:class:`PhantomJSDriverParams`): Parameters for controlling the processing pipeline. This class launches PhantomJS that scrolls and saves snapshots. It can only be used once per URL. ''' def __init__(self, exe_path='phantomjs', extra_args=None, params=None): pass @asyncio.coroutine def _stderr_callback(self, line): pass @asyncio.coroutine def start(self, use_atexit=True): pass def _write_config(self): '''Write the parameters to a file for PhantomJS to read.''' pass def close(self): pass
8
2
11
2
9
0
2
0.26
1
1
0
0
5
2
5
11
75
17
46
14
38
12
27
12
21
3
2
1
8
6,654
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/errors.py
wpull.errors.AuthenticationError
class AuthenticationError(ServerError): '''Username or password error.'''
class AuthenticationError(ServerError): '''Username or password error.'''
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
11
2
0
1
1
0
1
1
1
0
0
5
0
0
6,655
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/errors.py
wpull.errors.ConnectionRefused
class ConnectionRefused(NetworkError): '''Server was online, but nothing was being served.'''
class ConnectionRefused(NetworkError): '''Server was online, but nothing was being served.''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
13
2
0
1
1
0
1
1
1
0
0
5
0
0
6,656
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/errors.py
wpull.errors.DNSNotFound
class DNSNotFound(NetworkError): '''Server's IP address could not be located.'''
class DNSNotFound(NetworkError): '''Server's IP address could not be located.''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
13
2
0
1
1
0
1
1
1
0
0
5
0
0
6,657
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/errors.py
wpull.errors.ExitStatus
class ExitStatus(object): '''Program exit status codes. Attributes: generic_error (1): An unclassified serious or fatal error occurred. parser_error (2): A local document or configuration file could not be parsed. file_io_error (3): A problem with reading/writing a file occurred. network_failure (4): A problem with the network occurred such as a DNS resolver error or a connection was refused. ssl_verification_error (5): A server's SSL/TLS certificate was invalid. authentication_failure (6): A problem with a username or password. protocol_error (7): A problem with communicating with a server occurred. server_error (8): The server had problems fulfilling our requests. ''' generic_error = 1 parser_error = 2 file_io_error = 3 network_failure = 4 ssl_verification_error = 5 authentication_failure = 6 protocol_error = 7 server_error = 8
class ExitStatus(object): '''Program exit status codes. Attributes: generic_error (1): An unclassified serious or fatal error occurred. parser_error (2): A local document or configuration file could not be parsed. file_io_error (3): A problem with reading/writing a file occurred. network_failure (4): A problem with the network occurred such as a DNS resolver error or a connection was refused. ssl_verification_error (5): A server's SSL/TLS certificate was invalid. authentication_failure (6): A problem with a username or password. protocol_error (7): A problem with communicating with a server occurred. server_error (8): The server had problems fulfilling our requests. '''
1
1
0
0
0
0
0
1.56
1
0
0
0
0
0
0
0
24
1
9
9
8
14
9
9
8
0
1
0
0
6,658
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/errors.py
wpull.errors.NetworkError
class NetworkError(OSError): '''A networking error.'''
class NetworkError(OSError): '''A networking error.''' pass
1
1
0
0
0
0
0
1
1
0
0
3
0
0
0
13
2
0
1
1
0
1
1
1
0
0
4
0
0
6,659
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/errors.py
wpull.errors.NetworkTimedOut
class NetworkTimedOut(NetworkError): '''Connection read/write timed out.'''
class NetworkTimedOut(NetworkError): '''Connection read/write timed out.''' pass
1
1
0
0
0
0
0
1
1
0
0
1
0
0
0
13
2
0
1
1
0
1
1
1
0
0
5
0
0
6,660
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/errors.py
wpull.errors.ProtocolError
class ProtocolError(ValueError): '''A protocol was not followed.'''
class ProtocolError(ValueError): '''A protocol was not followed.''' pass
1
1
0
0
0
0
0
1
1
0
0
2
0
0
0
11
2
0
1
1
0
1
1
1
0
0
4
0
0
6,661
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/document/html_test.py
wpull.document.html_test.TestLxmlHTML
class TestLxmlHTML(Mixin, unittest.TestCase): def get_html_parser(self): return LxmlHTMLParser()
class TestLxmlHTML(Mixin, unittest.TestCase): def get_html_parser(self): pass
2
0
2
0
2
0
1
0
2
0
0
0
1
0
1
80
3
0
3
2
1
0
3
2
1
1
2
0
1
6,662
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/network/pool.py
wpull.network.pool.HappyEyeballsConnection
class HappyEyeballsConnection(object): '''Wrapper for happy eyeballs connection.''' def __init__(self, address, connection_factory, resolver, happy_eyeballs_table, is_ssl=False): self._address = address self._connection_factory = connection_factory self._resolver = resolver self._happy_eyeballs_table = happy_eyeballs_table self._primary_connection = None self._secondary_connection = None self._active_connection = None self.key = None self.proxied = False self.tunneled = False self.ssl = is_ssl def __getattr__(self, item): return getattr(self._active_connection, item) def closed(self): if self._active_connection: return self._active_connection.closed() else: return True def close(self): if self._active_connection: self._active_connection.close() def reset(self): if self._active_connection: self._active_connection.reset() @asyncio.coroutine def connect(self): if self._active_connection: yield from self._active_connection.connect() return result = yield from self._resolver.resolve(self._address[0]) primary_host, secondary_host = self._get_preferred_host(result) if not secondary_host: self._primary_connection = self._active_connection = \ self._connection_factory((primary_host, self._address[1])) yield from self._primary_connection.connect() else: yield from self._connect_dual_stack( (primary_host, self._address[1]), (secondary_host, self._address[1]) ) @asyncio.coroutine def _connect_dual_stack(self, primary_address, secondary_address): '''Connect using happy eyeballs.''' self._primary_connection = self._connection_factory(primary_address) self._secondary_connection = self._connection_factory(secondary_address) @asyncio.coroutine def connect_primary(): yield from self._primary_connection.connect() return self._primary_connection @asyncio.coroutine def connect_secondary(): yield from self._secondary_connection.connect() return self._secondary_connection primary_fut = connect_primary() secondary_fut = connect_secondary() failed = False for fut in asyncio.as_completed((primary_fut, secondary_fut)): if not self._active_connection: try: self._active_connection = yield from fut except NetworkError: if not failed: _logger.debug('Original dual stack exception', exc_info=True) failed = True else: raise else: _logger.debug('Got first of dual stack.') else: @asyncio.coroutine def cleanup(): try: conn = yield from fut except NetworkError: pass else: conn.close() _logger.debug('Closed abandoned connection.') asyncio.get_event_loop().create_task(cleanup()) preferred_host = self._active_connection.host self._happy_eyeballs_table.set_preferred( preferred_host, primary_address[0], secondary_address[0]) def _get_preferred_host(self, result: ResolveResult) -> Tuple[str, str]: '''Get preferred host from DNS results.''' host_1 = result.first_ipv4.ip_address if result.first_ipv4 else None host_2 = result.first_ipv6.ip_address if result.first_ipv6 else None if not host_2: return host_1, None elif not host_1: return host_2, None preferred_host = self._happy_eyeballs_table.get_preferred( host_1, host_2) if preferred_host: return preferred_host, None else: return host_1, host_2
class HappyEyeballsConnection(object): '''Wrapper for happy eyeballs connection.''' def __init__(self, address, connection_factory, resolver, happy_eyeballs_table, is_ssl=False): pass def __getattr__(self, item): pass def closed(self): pass def closed(self): pass def reset(self): pass @asyncio.coroutine def connect(self): pass @asyncio.coroutine def _connect_dual_stack(self, primary_address, secondary_address): '''Connect using happy eyeballs.''' pass @asyncio.coroutine def connect_primary(): pass @asyncio.coroutine def connect_secondary(): pass @asyncio.coroutine def cleanup(): pass def _get_preferred_host(self, result: ResolveResult) -> Tuple[str, str]: '''Get preferred host from DNS results.''' pass
17
3
11
1
10
0
2
0.03
1
3
2
0
8
11
8
8
122
22
97
40
79
3
79
34
67
6
1
4
26
6,663
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/converter.py
wpull.converter.BatchDocumentConverter
class BatchDocumentConverter(object): '''Convert all documents in URL table. Args: url_table: An instance of :class:`.database.URLTable`. backup (bool): Whether back up files are created. ''' def __init__(self, html_parser, element_walker, url_table, backup=False): self._url_table = url_table self._backup_enabled = backup self._html_converter = HTMLConverter(html_parser, element_walker, url_table) self._css_converter = CSSConverter(url_table) def convert_all(self): '''Convert all links in URL table.''' for url_record in self._url_table.get_all(): if url_record.status != Status.done: continue self.convert_by_record(url_record) def convert_by_record(self, url_record): '''Convert using given URL Record.''' filename = url_record.filename if not os.path.exists(filename): return if url_record.link_type: if url_record.link_type not in ('css', 'html'): return else: link_type = url_record.link_type else: with open(filename, 'rb') as in_file: if HTMLScraper.is_supported( file=in_file, url_info=url_record.url_info): link_type = 'html' elif CSSScraper.is_supported( file=in_file, url_info=url_record.url_info): link_type = 'css' else: link_type = None _logger.info(__( _('Converting links in file ‘{filename}’ (type={type}).'), filename=filename, type=link_type )) if self._backup_enabled: shutil.copy2(filename, filename + '.orig') temp_filename = filename + '-new' if link_type == 'css': self._css_converter.convert( filename, temp_filename, base_url=url_record.url) elif link_type == 'html': self._html_converter.convert( filename, temp_filename, base_url=url_record.url) else: raise Exception('Unknown link type.') os.remove(filename) os.rename(temp_filename, filename)
class BatchDocumentConverter(object): '''Convert all documents in URL table. Args: url_table: An instance of :class:`.database.URLTable`. backup (bool): Whether back up files are created. ''' def __init__(self, html_parser, element_walker, url_table, backup=False): pass def convert_all(self): '''Convert all links in URL table.''' pass def convert_by_record(self, url_record): '''Convert using given URL Record.''' pass
4
3
19
3
16
1
4
0.15
1
6
5
0
3
4
3
3
66
11
48
13
44
7
34
12
30
9
1
3
13
6,664
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/collections.py
wpull.collections.OrderedDefaultDict
class OrderedDefaultDict(OrderedDict): '''An ordered default dict. http://stackoverflow.com/a/6190500/1524507 ''' def __init__(self, default_factory=None, *args, **kwargs): if default_factory is not None and \ not isinstance(default_factory, collections.Callable): raise TypeError('First argument must be callable') OrderedDict.__init__(self, *args, **kwargs) self.default_factory = default_factory def __getitem__(self, key): try: return OrderedDict.__getitem__(self, key) except KeyError: return self.__missing__(key) def __missing__(self, key): if self.default_factory is None: raise KeyError(key) self[key] = value = self.default_factory() return value def __reduce__(self): if self.default_factory is None: args = tuple() else: args = self.default_factory, return type(self), args, None, None, self.items() def copy(self): return self.__copy__() def __copy__(self): return type(self)(self.default_factory, self) def __deepcopy__(self, memo): return type(self)(self.default_factory, copy.deepcopy(tuple(self.items()))) def __repr__(self): return 'OrderedDefaultDict(%s, %s)' % ( self.default_factory, collections.OrderedDict.__repr__(self))
class OrderedDefaultDict(OrderedDict): '''An ordered default dict. http://stackoverflow.com/a/6190500/1524507 ''' def __init__(self, default_factory=None, *args, **kwargs): pass def __getitem__(self, key): pass def __missing__(self, key): pass def __reduce__(self): pass def copy(self): pass def __copy__(self): pass def __deepcopy__(self, memo): pass def __repr__(self): pass
9
1
4
0
4
0
2
0.09
1
4
0
0
8
1
8
58
44
8
33
12
24
3
29
11
20
2
3
1
12
6,665
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/options_test.py
wpull.application.options_test.TestOptions
class TestOptions(unittest.TestCase): def test_no_args(self): arg_parser = AppArgumentParser(real_exit=False) self.assertRaises(ValueError, arg_parser.parse_args, []) def test_app_sanity(self): arg_items = [ ('--verbose', '--quiet'), ('--timestamp', '--no-clobber'), ('--inet4-only', '--inet6-only'), ('--warc-file=test', '--no-clobber'), ('--warc-file=test', '--timestamping'), ('--warc-file=test', '--continue'), ('--no-iri', '--local-encoding=shiftjis'), ('--no-iri', '--remote-encoding=shiftjis'), ] for arg_item in arg_items: def print_(message=None): print(message) def test_exit(status=0, message=None): raise ValueError(status, message) arg_parser = AppArgumentParser() arg_parser.exit = test_exit arg_parser.print_help = print_ arg_parser.print_usage = print_ try: print(arg_item) arg_parser.parse_args(['http://example.invalid'] + list(arg_item)) except ValueError as error: self.assertEqual(2, error.args[0]) else: self.assertTrue(False)
class TestOptions(unittest.TestCase): def test_no_args(self): pass def test_app_sanity(self): pass def print_(message=None): pass def test_exit(status=0, message=None): pass
5
0
10
1
9
0
2
0
1
3
1
0
2
0
2
74
36
5
31
10
26
0
22
9
17
3
2
2
6
6,666
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/plugin.py
wpull.application.plugin.InterfaceRegistry
class InterfaceRegistry(collections.Mapping): def __init__(self): super().__init__() self._interfaces = {} def __len__(self): return len(self._interfaces) def __getitem__(self, key): return self._interfaces[key] def __iter__(self): return iter(self._interfaces) def register(self, name: Any, interface: Any, category: PluginFunctionCategory): if name in self._interfaces: raise ValueError('Interface already registered') self._interfaces[name] = (interface, category)
class InterfaceRegistry(collections.Mapping): def __init__(self): pass def __len__(self): pass def __getitem__(self, key): pass def __iter__(self): pass def register(self, name: Any, interface: Any, category: PluginFunctionCategory): pass
6
0
3
0
3
0
1
0
1
4
1
0
5
1
5
5
19
5
14
7
8
0
14
7
8
2
1
1
6
6,667
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/plugin.py
wpull.application.plugin.PluginFunctionCategory
class PluginFunctionCategory(enum.Enum): hook = 'hook' event = 'event'
class PluginFunctionCategory(enum.Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
3
0
3
3
2
0
3
3
2
0
4
0
0
6,668
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/plugin.py
wpull.application.plugin.PluginFunctions
class PluginFunctions(enum.Enum): queued_url = 'queued_url' dequeued_url = 'dequeued_url' resolve_dns = 'resolve_dns' resolve_dns_result = 'resolve_dns_result' accept_url = 'accept_url' wait_time = 'wait_time' handle_response = 'handle_response' handle_pre_response = 'handle_pre_response' handle_error = 'handle_error' get_urls = 'get_urls' finishing_statistics = 'finishing_statistics' exit_status = 'exit_status'
class PluginFunctions(enum.Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
13
0
13
13
12
0
13
13
12
0
4
0
0
6,669
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/plugin.py
wpull.application.plugin.WpullPlugin
class WpullPlugin(IPlugin): def __init__(self): super().__init__() self.app_session = None def should_activate(self) -> bool: return True def get_plugin_functions(self) -> Iterator[PluginClientFunctionInfo]: funcs = inspect.getmembers(self) for name, func in funcs: if hasattr(func, 'plugin_attach_name'): yield PluginClientFunctionInfo( func, func.plugin_attach_name, func.plugin_attach_category)
class WpullPlugin(IPlugin): def __init__(self): pass def should_activate(self) -> bool: pass def get_plugin_functions(self) -> Iterator[PluginClientFunctionInfo]: pass
4
0
4
0
4
0
2
0
1
2
0
10
3
1
3
3
15
3
12
7
8
0
11
7
7
3
1
2
5
6,670
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/plugins/arg_warning.plugin.py
wpull.application.plugins.arg_warning.plugin.ArgWarningPlugin
class ArgWarningPlugin(WpullPlugin): UNSAFE_OPTIONS = frozenset(['save_headers', 'no_iri', 'output_document', 'ignore_fatal_errors']) def activate(self): super().activate() self._warn_unsafe_options(self.app_session.args) self._warn_silly_options(self.app_session.args) @classmethod def _warn_unsafe_options(cls, args): '''Print warnings about any enabled hazardous options. This function will print messages complaining about: * ``--save-headers`` * ``--no-iri`` * ``--output-document`` * ``--ignore-fatal-errors`` ''' enabled_options = [] for option_name in cls.UNSAFE_OPTIONS: if getattr(args, option_name): enabled_options.append(option_name) if enabled_options: _logger.warning(__( _('The following unsafe options are enabled: {list}.'), list=enabled_options )) _logger.warning( _('The use of unsafe options may lead to unexpected behavior ' 'or file corruption.')) if not args.retr_symlinks: _logger.warning( _('The --retr-symlinks=off option is a security risk.') ) @classmethod def _warn_silly_options(cls, args): '''Print warnings about any options that may be silly.''' if 'page-requisites' in args.span_hosts_allow \ and not args.page_requisites: _logger.warning( _('Spanning hosts is allowed for page requisites, ' 'but the page requisites option is not on.') ) if 'linked-pages' in args.span_hosts_allow \ and not args.recursive: _logger.warning( _('Spanning hosts is allowed for linked pages, ' 'but the recursive option is not on.') ) if args.warc_file and \ (args.http_proxy or args.https_proxy): _logger.warning(_('WARC specifications do not handle proxies.')) if (args.password or args.ftp_password or args.http_password or args.proxy_password) and \ args.warc_file: _logger.warning( _('Your password is recorded in the WARC file.'))
class ArgWarningPlugin(WpullPlugin): def activate(self): pass @classmethod def _warn_unsafe_options(cls, args): '''Print warnings about any enabled hazardous options. This function will print messages complaining about: * ``--save-headers`` * ``--no-iri`` * ``--output-document`` * ``--ignore-fatal-errors`` ''' pass @classmethod def _warn_silly_options(cls, args): '''Print warnings about any options that may be silly.''' pass
6
2
19
3
14
3
4
0.17
1
1
0
0
1
0
3
6
66
11
47
9
41
8
25
7
21
5
2
2
11
6,671
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/options.py
wpull.application.options.CommaChoiceListArgs
class CommaChoiceListArgs(frozenset): '''Specialized frozenset. This class overrides the ``__contains__`` function to allow the use of the ``in`` operator for ArgumentParser's ``choices`` checking for comma separated lists. The function behaves differently only when the objects compared are `CommaChoiceListArgs`. ''' def __contains__(self, item): if isinstance(item, CommaChoiceListArgs): return item <= self else: return frozenset.__contains__(self, item)
class CommaChoiceListArgs(frozenset): '''Specialized frozenset. This class overrides the ``__contains__`` function to allow the use of the ``in`` operator for ArgumentParser's ``choices`` checking for comma separated lists. The function behaves differently only when the objects compared are `CommaChoiceListArgs`. ''' def __contains__(self, item): pass
2
1
5
0
5
0
2
1
1
0
0
0
1
0
1
31
13
1
6
2
4
6
5
2
3
2
2
1
2
6,672
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/plugins/debug_console.plugin.py
wpull.application.plugins.debug_console.plugin.DebugConsolePlugin
class DebugConsolePlugin(WpullPlugin): def activate(self): super().activate() if self.app_session.args.debug_console_port is None: return application = tornado.web.Application( [(r'/', DebugConsoleHandler)], builder=self ) sock = socket.socket() sock.bind(('localhost', self.app_session.args.debug_console_port)) sock.setblocking(0) sock.listen(1) http_server = tornado.httpserver.HTTPServer(application) http_server.add_socket(sock) _logger.warning(__( _('Opened a debug console at localhost:{port}.'), port=sock.getsockname()[1] )) atexit.register(sock.close)
class DebugConsolePlugin(WpullPlugin): def activate(self): pass
2
0
22
3
19
0
2
0
1
5
1
0
1
0
1
4
23
3
20
5
18
0
14
5
12
2
2
1
2
6,673
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/tasks/conversion.py
wpull.application.tasks.conversion.LinkConversionSetupTask
class LinkConversionSetupTask(ItemTask[AppSession]): @asyncio.coroutine def process(self, session: AppSession): self._build_document_converter(session) @classmethod def _build_document_converter(cls, session: AppSession): '''Build the Document Converter.''' if not session.args.convert_links: return converter = session.factory.new( 'BatchDocumentConverter', session.factory['HTMLParser'], session.factory['ElementWalker'], session.factory['URLTable'], backup=session.args.backup_converted ) return converter
class LinkConversionSetupTask(ItemTask[AppSession]): @asyncio.coroutine def process(self, session: AppSession): pass @classmethod def _build_document_converter(cls, session: AppSession): '''Build the Document Converter.''' pass
5
1
9
2
7
1
2
0.06
1
1
1
0
1
0
2
25
21
4
16
6
11
1
8
4
5
2
4
1
3
6,674
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/tasks/conversion.py
wpull.application.tasks.conversion.LinkConversionTask
class LinkConversionTask(ItemTask[QueuedFileSession]): @asyncio.coroutine def process(self, session: QueuedFileSession): converter = session.app_session.factory.instance_map.get( 'BatchDocumentConverter') if not converter: return converter.convert_by_record(session.url_record)
class LinkConversionTask(ItemTask[QueuedFileSession]): @asyncio.coroutine def process(self, session: QueuedFileSession): pass
3
0
8
2
6
0
2
0
1
1
1
0
1
0
1
24
10
2
8
4
5
0
6
3
4
2
4
1
2
6,675
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/tasks/conversion.py
wpull.application.tasks.conversion.QueuedFileSession
class QueuedFileSession(object): def __init__(self, app_session: AppSession, file_id: int, url_record: URLRecord): self.app_session = app_session self.file_id = file_id self.url_record = url_record
class QueuedFileSession(object): def __init__(self, app_session: AppSession, file_id: int, url_record: URLRecord): pass
2
0
5
0
5
0
1
0
1
3
2
0
1
3
1
1
6
0
6
6
3
0
5
5
3
1
1
0
1
6,676
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/tasks/conversion.py
wpull.application.tasks.conversion.QueuedFileSource
class QueuedFileSource(ItemSource[QueuedFileSession]): def __init__(self, app_session: AppSession): self._app_session = app_session @asyncio.coroutine def get_item(self) -> Optional[QueuedFileSession]: if not self._app_session.args.convert_links: return try: db_item = self._app_session.factory['URLTable'].convert_check_out() except NotFound: return session = QueuedFileSession( self._app_session, db_item[0], db_item[1]) return session
class QueuedFileSource(ItemSource[QueuedFileSession]): def __init__(self, app_session: AppSession): pass @asyncio.coroutine def get_item(self) -> Optional[QueuedFileSession]: pass
4
0
7
1
6
0
2
0
1
3
3
0
2
1
2
25
17
3
14
7
10
0
12
6
9
3
4
1
4
6,677
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/tasks/database.py
wpull.application.tasks.database.DatabaseSetupTask
class DatabaseSetupTask(ItemTask[AppSession]): @asyncio.coroutine def process(self, session: AppSession): if session.args.database_uri: session.factory.class_map[ 'URLTableImplementation'] = GenericSQLURLTable url_table_impl = session.factory.new( 'URLTableImplementation', session.args.database_uri) else: url_table_impl = session.factory.new( 'URLTableImplementation', path=session.args.database) url_table = session.factory.new('URLTable', url_table_impl) # TODO: add a test for this _logger.debug(_('Releasing any in-progress items in database.')) url_table.release()
class DatabaseSetupTask(ItemTask[AppSession]): @asyncio.coroutine def process(self, session: AppSession): pass
3
0
15
2
12
1
2
0.07
1
2
2
0
1
0
1
24
17
2
14
5
11
1
9
4
7
2
4
1
2
6,678
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/tasks/database.py
wpull.application.tasks.database.InputURLTask
class InputURLTask(ItemTask[AppSession]): @asyncio.coroutine def process(self, session: AppSession): url_table = session.factory['URLTable'] url_count = 0 for batch in wpull.util.grouper(self._read_input_urls(session), 1000): urls = url_table.add_many(AddURLInfo(url_info.url, None, None) for url_info in batch if url_info) # TODO: attach hook for notifying progress url_count += len(urls) # TODO: check if database is empty # TODO: add a test for this # if not url_count: # raise ValueError(_('No URLs found in input file.')) @classmethod def _read_input_urls(cls, session: AppSession, default_scheme='http'): '''Read the URLs provided by the user.''' url_string_iter = session.args.urls or () # FIXME: url rewriter isn't created yet url_rewriter = session.factory.get('URLRewriter') if session.args.input_file: if session.args.force_html: lines = cls._input_file_as_html_links(session) else: lines = cls._input_file_as_lines(session) url_string_iter = itertools.chain(url_string_iter, lines) base_url = session.args.base for url_string in url_string_iter: _logger.debug(__('Parsing URL {0}', url_string)) if base_url: url_string = wpull.url.urljoin(base_url, url_string) try: url_info = wpull.url.URLInfo.parse( url_string, default_scheme=default_scheme) _logger.debug(__('Parsed URL {0}', url_info)) if url_rewriter: # TODO: this logic should be a hook url_info = url_rewriter.rewrite(url_info) _logger.debug(__('Rewritten URL {0}', url_info)) yield url_info except ValueError as e: _logger.info(__('Invalid URL {0}: {1}', url_string, e)) @classmethod def _input_file_as_lines(cls, session: AppSession): '''Read lines from input file and return them.''' if session.args.input_file == sys.stdin: input_file = session.args.input_file else: reader = codecs.getreader(session.args.local_encoding or 'utf-8') input_file = reader(session.args.input_file) return input_file @classmethod def _input_file_as_html_links(cls, session: AppSession): '''Read input file as HTML and return the links.''' scrape_result = session.factory['HTMLScraper'].scrape_file( session.args.input_file, encoding=session.args.local_encoding or 'utf-8' ) for context in scrape_result.link_contexts: yield context.link
class InputURLTask(ItemTask[AppSession]): @asyncio.coroutine def process(self, session: AppSession): pass @classmethod def _read_input_urls(cls, session: AppSession, default_scheme='http'): '''Read the URLs provided by the user.''' pass @classmethod def _input_file_as_lines(cls, session: AppSession): '''Read lines from input file and return them.''' pass @classmethod def _input_file_as_html_links(cls, session: AppSession): '''Read input file as HTML and return the links.''' pass
9
3
16
4
11
2
3
0.2
1
4
2
0
1
0
4
27
77
18
49
24
40
10
39
19
34
7
4
3
13
6,679
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/plugins/download_progress.plugin.py
wpull.application.plugins.download_progress.plugin.DownloadProgressPlugin
class DownloadProgressPlugin(WpullPlugin): def __init__(self): super().__init__() self._progress = None def activate(self): super().activate() args = self.app_session.args if args.verbosity in (LOG_VERBOSE, LOG_DEBUG) and args.progress != 'none': stream = new_encoded_stream(args, self.app_session.stderr) bar_style = args.progress == 'bar' if not stream.isatty(): bar_style = False if bar_style: self._progress = BarProgress(stream=stream) else: self._progress = DotProgress(stream=stream) self._attach_event_listeners() def _attach_event_listeners(self): http_client = cast(HTTPClient, self.app_session.factory['HTTPClient']) http_client.event_dispatcher.add_listener( HTTPClient.ClientEvent.new_session, self._http_session_callback ) ftp_client = cast(FTPClient, self.app_session.factory['FTPClient']) ftp_client.event_dispatcher.add_listener( ftp_client.ClientEvent.new_session, self._ftp_session_callback ) def _http_session_callback(self, http_session: HTTPSession): http_session.event_dispatcher.add_listener( HTTPSession.Event.begin_request, self._progress.update_from_begin_request ) http_session.event_dispatcher.add_listener( HTTPSession.Event.begin_response, self._progress.update_from_begin_response ) http_session.event_dispatcher.add_listener( HTTPSession.Event.end_response, self._progress.update_from_end_response ) http_session.event_dispatcher.add_listener( HTTPSession.Event.response_data, self._progress.update_with_data ) def _ftp_session_callback(self, ftp_session: FTPSession): ftp_session.event_dispatcher.add_listener( FTPSession.Event.begin_control, lambda request, connection_reused: self._progress.update_from_begin_request(request)) ftp_session.event_dispatcher.add_listener( FTPSession.Event.begin_transfer, self._progress.update_from_begin_response) ftp_session.event_dispatcher.add_listener( FTPSession.Event.end_transfer, self._progress.update_from_end_response) ftp_session.event_dispatcher.add_listener( FTPSession.Event.transfer_receive_data, self._progress.update_with_data )
class DownloadProgressPlugin(WpullPlugin): def __init__(self): pass def activate(self): pass def _attach_event_listeners(self): pass def _http_session_callback(self, http_session: HTTPSession): pass def _ftp_session_callback(self, ftp_session: FTPSession): pass
6
0
13
1
12
0
2
0
1
5
4
0
5
1
5
8
71
11
60
12
54
0
31
12
25
4
2
2
8
6,680
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/options.py
wpull.application.options.AppHelpFormatter
class AppHelpFormatter(argparse.HelpFormatter): def _metavar_formatter(self, action, default_metavar): # Modified from argparse if action.choices is not None: if action.metavar is not None: result = action.metavar + '=' else: result = '' choice_strs = sorted([str(choice) for choice in action.choices]) if isinstance(action.choices, CommaChoiceListArgs): result += '<%s>' % ','.join(choice_strs) else: result += '{%s}' % ','.join(choice_strs) elif action.metavar is not None: result = action.metavar else: result = default_metavar def format(tuple_size): if isinstance(result, tuple): return result else: return (result,) * tuple_size return format def _get_help_string(self, action): # Modified from argparse help = action.help if '%(default)' not in action.help: if action.default and not isinstance(action.default, bool) \ and action.default is not argparse.SUPPRESS: defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: help += _(' (default: %(default)s)') return help
class AppHelpFormatter(argparse.HelpFormatter): def _metavar_formatter(self, action, default_metavar): pass def format(tuple_size): pass def _get_help_string(self, action): pass
4
0
14
2
12
1
4
0.06
1
4
1
0
2
0
2
28
40
7
31
8
27
2
25
8
21
5
2
3
11
6,681
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/options.py
wpull.application.options.AppArgumentParser
class AppArgumentParser(argparse.ArgumentParser): '''An Argument Parser that builds up the application options.''' # TODO: implement all sane options def __init__(self, *args, real_exit=True, **kwargs): super().__init__( *args, description=_('Wget-compatible web downloader and crawler.'), formatter_class=AppHelpFormatter, **kwargs ) self._real_exit = real_exit self._ssl_version_map = None self._add_app_args() @classmethod def int_0_inf(cls, string): '''Convert string to int. If ``inf`` is supplied, it returns ``0``. ''' if string == 'inf': return 0 try: value = int(string) except ValueError as error: raise argparse.ArgumentTypeError(error) if value < 0: raise argparse.ArgumentTypeError(_('Value must not be negative.')) else: return value @classmethod def int_bytes(cls, string): '''Convert string describing size to int.''' if string[-1] in ('k', 'm'): value = cls.int_0_inf(string[:-1]) unit = string[-1] if unit == 'k': value *= 2 ** 10 else: value *= 2 ** 20 return value else: return cls.int_0_inf(string) @classmethod def comma_list(cls, string): '''Convert a comma separated string to list.''' items = string.split(',') items = list([item.strip() for item in items]) return items @classmethod def comma_choice_list(cls, string): '''Convert a comma separated string to `CommaChoiceListArgs`.''' items = string.split(',') items = CommaChoiceListArgs([item.strip() for item in items]) return items def parse_args(self, args=None, namespace=None): if args is None: args = sys.argv[1:] encoding = self.get_argv_encoding(args) _logger.debug(__('Encoding: {0}', encoding)) args = super().parse_args( args=wpull.string.to_str(args, encoding=encoding), namespace=namespace ) self._post_parse_args(args) return args @classmethod def get_argv_encoding(cls, argv): encoding = 'utf-8' stripped_argv = [ wpull.string.printable_bytes(arg) for arg in wpull.string.to_bytes(argv, encoding='ascii', error='replace') ] try: index = stripped_argv.index(b'--local-encoding') except ValueError: pass else: encoding = stripped_argv[index + 1] return wpull.string.to_str(encoding) def exit(self, status=0, message=None): if self._real_exit: argparse.ArgumentParser.exit(self, status=status, message=message) else: raise ValueError(str(status) + ' ' + str(message)) def _add_app_args(self): self.add_argument( 'urls', nargs='*', metavar='URL', help=_('the URL to be downloaded'), ) self._add_startup_args() self._add_log_and_input_args() self._add_proxy_args() self._add_download_args() self._add_directories_args() self._add_http_args() self._add_ssl_args() self._add_ftp_args() self._add_warc_args() self._add_recursive_args() self._add_accept_args() self._add_proxy_server_args() self._add_phantomjs_args() self._add_youtube_dl_args() def _add_startup_args(self): group = self.add_argument_group(_('startup')) group.add_argument( '-V', '--version', action='version', version=wpull.version.__version__ ) # group.add_argument( # '-b', # '--background', # action='store_true', # help=_('run as background process') # ) # group.add_argument( # '-e', # '--execute', # metavar='COMMAND', # action='append', # help=_('runs Wgetrc COMMAND'), # ) group.add_argument( '--plugin-script', metavar='FILE', help=_('load plugin script from FILE') ) group.add_argument( '--plugin-args', help=_('arguments for the plugin') ) database_group = group.add_mutually_exclusive_group() database_group.add_argument( '--database', metavar='FILE', default=':memory:', help=_('save database tables into FILE instead of memory'), ) database_group.add_argument( '--database-uri', metavar='URI', help=_('save database tables at SQLAlchemy URI instead of memory'), ) group.add_argument( '--concurrent', metavar='N', default=1, type=self.int_0_inf, help=_('run at most N downloads at the same time'), ) group.add_argument( '--debug-console-port', metavar='PORT', type=int, help=_('run a web debug console at given port number') ) group.add_argument( '--debug-manhole', action='store_true', help=_('install Manhole debugging socket') ) group.add_argument( '--ignore-fatal-errors', action='store_true', help=_('ignore all internal fatal exception errors') ) if wpull.resmon.psutil: group.add_argument( '--monitor-disk', type=self.int_bytes, help=_('pause if minimum free disk space is exceeded') ) group.add_argument( '--monitor-memory', type=self.int_bytes, help=_('pause if minimum free memory is exceeded') ) def _add_log_and_input_args(self): group = self.add_argument_group(_('logging and input')) output_log_group = group.add_mutually_exclusive_group() output_log_group.add_argument( '-o', '--output-file', metavar='FILE', help=_('write program messages to FILE') ) output_log_group.add_argument( '-a', '--append-output', metavar='FILE', help=_('append program messages to FILE') ) verbosity_group = group.add_mutually_exclusive_group() verbosity_group.add_argument( '-d', '--debug', dest='verbosity', action='store_const', const=LOG_DEBUG, help=_('print debugging messages') ) verbosity_group.add_argument( '-v', '--verbose', dest='verbosity', action='store_const', const=LOG_VERBOSE, help=_('print informative program messages and detailed progress') ) verbosity_group.add_argument( '-nv', '--no-verbose', dest='verbosity', action='store_const', const=LOG_NO_VERBOSE, help=_('print informative program messages and errors') ) verbosity_group.add_argument( '-q', '--quiet', dest='verbosity', action='store_const', const=LOG_QUIET, help=_('print program error messages') ) verbosity_group.add_argument( '-qq', '--very-quiet', dest='verbosity', action='store_const', const=LOG_VERY_QUIET, help=_('do not print program messages unless critical') ) group.add_argument( '--ascii-print', action='store_true', help=_('print program messages in ASCII only'), ) group.add_argument( '--report-speed', metavar='TYPE', choices=['bits'], help=_('print speed in bits only instead of human formatted units') ) group.add_argument( '-i', '--input-file', metavar='FILE', type=argparse.FileType('rb'), help=_('download URLs listed in FILE'), ) group.add_argument( '-F', '--force-html', action='store_true', help=_('read URL input files as HTML files') ) group.add_argument( '-B', '--base', metavar='URL', help=_('resolves input relative URLs to URL') ) # group.add_argument( # '--config', # metavar='FILE', # type=argparse.FileType('rt'), # help=_('read configuration from FILE'), # ) def _add_proxy_args(self): group = self.add_argument_group(_('proxy')) group.add_argument( '--http-proxy', default=os.environ.get('http_proxy'), help=_('HTTP proxy for HTTP requests') ) group.add_argument( '--https-proxy', default=os.environ.get('https_proxy'), help=_('HTTP proxy for HTTPS requests') ) group.add_argument( '--proxy-user', metavar='USER', help=_('username for proxy "basic" authentication') ) group.add_argument( '--proxy-password', metavar='PASS', help=_('password for proxy "basic" authentication') ) group.add_argument( '--no-proxy', action='store_true', help=_('disable proxy support'), ) group.add_argument( '--proxy-domains', metavar='LIST', type=self.comma_list, help=_('use proxy only from LIST of hostname suffixes') ) group.add_argument( '--proxy-exclude-domains', metavar='LIST', type=self.comma_list, default=os.environ.get('no_proxy'), help=_('don’t use proxy only from LIST of hostname suffixes') ) group.add_argument( '--proxy-hostnames', metavar='LIST', type=self.comma_list, help=_('use proxy only from LIST of hostnames') ) group.add_argument( '--proxy-exclude-hostnames', metavar='LIST', type=self.comma_list, help=_('don’t use proxy only from LIST of hostnames') ) def _add_download_args(self): group = self.add_argument_group('download') group.add_argument( '-t', '--tries', metavar='NUMBER', type=self.int_0_inf, help=_('try NUMBER of times on transient errors'), default=20, ) group.add_argument( '--retry-connrefused', action='store_true', help=_('retry even if the server does not accept connections'), ) group.add_argument( '--retry-dns-error', action='store_true', help=_('retry even if DNS fails to resolve hostname'), ) group.add_argument( '-O', '--output-document', metavar='FILE', type=argparse.FileType('wb'), help=_('stream every document into FILE'), ) clobber_group = group.add_mutually_exclusive_group() clobber_group.add_argument( '-nc', '--no-clobber', action='store_const', dest='clobber_method', const='disable', help=_('don’t use anti-clobbering filenames'), ) group.add_argument( '-c', '--continue', action='store_true', dest='continue_download', help=_('resume downloading a partially-downloaded file'), ) group.add_argument( '--progress', metavar='TYPE', choices=['dot', 'bar', 'none'], default='bar', help=_('choose the type of progress indicator'), ) clobber_group.add_argument( '-N', '--timestamping', action='store_true', help=_('only download files that are newer than local files'), ) group.add_argument( '--no-use-server-timestamps', dest='use_server_timestamps', action='store_false', default=True, help=_('don’t set the last-modified time on files'), ) group.add_argument( '-S', '--server-response', action='store_true', help=_('print the protocol responses from the server'), ) # group.add_argument( # '--spider', # action='store_true', # help=_('don’t download but check if URLs exist'), # ) group.add_argument( '-T', '--timeout', metavar='SECONDS', type=float, help=_('set DNS, connect, read timeout options to SECONDS'), ) group.add_argument( '--dns-timeout', metavar='SECS', type=float, help=_('timeout after SECS seconds for DNS requests'), ) group.add_argument( '--connect-timeout', metavar='SECS', type=float, help=_('timeout after SECS seconds for connection requests'), ) group.add_argument( '--read-timeout', metavar='SECS', default=900, type=float, help=_('timeout after SECS seconds for reading requests'), ) group.add_argument( '--session-timeout', metavar='SECS', type=float, help=_('timeout after SECS seconds for downloading files'), ) group.add_argument( '-w', '--wait', metavar='SECONDS', type=float, default=0.0, help=_('wait SECONDS seconds between requests'), ) group.add_argument( '--waitretry', metavar='SECONDS', type=float, default=10.0, help=_('wait up to SECONDS seconds on retries'), ) group.add_argument( '--random-wait', action='store_true', help=_('randomly perturb the time between requests'), ) group.add_argument( '-Q', '--quota', metavar='NUMBER', type=self.int_bytes, help=_('stop after downloading NUMBER bytes'), ) group.add_argument( '--bind-address', metavar='ADDRESS', help=_('bind to ADDRESS on the local host'), ) group.add_argument( '--limit-rate', metavar='RATE', type=self.int_bytes, help=_('limit download bandwidth to RATE'), ) group.add_argument( '--no-dns-cache', action='store_false', default=True, dest='dns_cache', help=_('disable caching of DNS lookups'), ) group.add_argument( '--rotate-dns', action='store_true', help=_('use different resolved IP addresses on requests'), ) group.add_argument( '--no-skip-getaddrinfo', dest='always_getaddrinfo', action='store_true', help=_("always use the OS's name resolver interface"), ) group.add_argument( '--restrict-file-names', metavar='MODES', type=self.comma_choice_list, choices=CommaChoiceListArgs( ['windows', 'unix', 'lower', 'upper', 'ascii', 'nocontrol'] ), default=['windows'] if os.name == 'nt' else ['unix'], help=_('list of safe filename modes to use'), ) # group.add_argument( # '--ignore-case', # action='store_true', # help=_('use case-insensitivity on URLs'), # ) inet_group = group.add_mutually_exclusive_group() inet_group.add_argument( '-4', '--inet4-only', action='store_const', dest='inet_family', const='IPv4', help=_('connect to IPv4 addresses only'), ) inet_group.add_argument( '-6', '--inet6-only', action='store_const', dest='inet_family', const='IPv6', help=_('connect to IPv6 addresses only'), ) inet_group.add_argument( '--prefer-family', metavar='FAMILY', choices=['none', 'IPv6', 'IPv4'], help=_('prefer to connect to FAMILY IP addresses'), ) group.add_argument( '--user', help=_('username for both FTP and HTTP authentication') ) password_group = group.add_mutually_exclusive_group() password_group.add_argument( '--password', help=_('password for both FTP and HTTP authentication') ) # password_group.add_argument( # '--ask-password', # action='store_true', # help=_('ask for a password on each connection') # ) group.add_argument( '--no-iri', action='store_true', help=_('use ASCII encoding only') ) group.add_argument( '--local-encoding', metavar='ENC', help=_('use ENC as the encoding of input files and options') ) group.add_argument( '--remote-encoding', metavar='ENC', help=_('force decoding documents using codec ENC'), ) # group.add_argument( # '--unlink', # action='store_true', # help=_('unlink file before overwriting it') # ) group.add_argument( '--max-filename-length', metavar='NUMBER', default=160, type=int, help=_('limit filename length to NUMBER characters'), ) def _add_directories_args(self): group = self.add_argument_group(_('directories')) dir_group = group.add_mutually_exclusive_group() dir_group.add_argument( '-nd', '--no-directories', action='store_const', const='no', dest='use_directories', help=_('don’t create directories'), ) dir_group.add_argument( '-x', '--force-directories', action='store_const', const='force', dest='use_directories', help=_('always create directories'), ) group.add_argument( '-nH', '--no-host-directories', dest='host_directories', action='store_false', default=True, help=_('don’t create directories for hostnames') ) group.add_argument( '--protocol-directories', action='store_true', help=_('create directories for URL schemes'), ) group.add_argument( '-P', '--directory-prefix', metavar='PREFIX', default=os.curdir, help=_('save everything under the directory PREFIX'), ) group.add_argument( '--cut-dirs', metavar='NUMBER', type=int, help=_('don’t make NUMBER of leading directories'), ) def _add_http_args(self): group = self.add_argument_group('HTTP') group.add_argument( '--http-user', help=_('username for HTTP authentication') ) group.add_argument( '--http-password', help=_('password for HTTP authentication') ) group.add_argument( '--no-cache', action='store_true', help=_('request server to not use cached version of files'), ) group.add_argument( '--default-page', metavar='NAME', default='index.html', help=_('use NAME as index page if not known'), ) group.add_argument( '-E', '--adjust-extension', action='store_true', help=_('append HTML or CSS file extension if needed') ) group.add_argument( '--ignore-length', action='store_true', help=_('ignore any Content-Length provided by the server') ) group.add_argument( '--header', metavar='STRING', default=[], action='append', help=_('adds STRING to the HTTP header') ) group.add_argument( '--max-redirect', metavar='NUMBER', type=int, help=_('follow only up to NUMBER document redirects'), default=20, ) group.add_argument( '--referer', metavar='URL', help=_('always use URL as the referrer'), ) group.add_argument( '--save-headers', action='store_true', help=_('include server header responses in files'), ) group.add_argument( '-U', '--user-agent', metavar='AGENT', help=_('use AGENT instead of Wpull’s user agent'), ) group.add_argument( '--no-robots', dest='robots', action='store_false', default=True, help=_('ignore robots.txt directives'), ) group.add_argument( '--no-http-keep-alive', dest='http_keep_alive', action='store_false', default=True, help=_('disable persistent HTTP connections') ) group.add_argument( '--no-cookies', dest='cookies', default=True, action='store_false', help=_('disables HTTP cookie support') ) group.add_argument( '--load-cookies', metavar='FILE', help=_('load Mozilla cookies.txt from FILE'), ) group.add_argument( '--save-cookies', metavar='FILE', help=_('save Mozilla cookies.txt to FILE'), ) group.add_argument( '--keep-session-cookies', action='store_true', help=_('include session cookies when saving cookies to file') ) post_group = group.add_mutually_exclusive_group() post_group.add_argument( '--post-data', metavar='STRING', help=_('use POST for all requests with query STRING'), ) post_group.add_argument( '--post-file', metavar='FILE', type=argparse.FileType('r'), help=_('use POST for all requests with query in FILE') ) # group.add_argument( # '--method', # metavar='HTTPMethod', # ) # group.add_argument( # '--body-data', # metavar='STRING', # ) # group.add_argument( # '--body-file', # metavar='FILE' # ) group.add_argument( '--content-disposition', action='store_true', help=_('use filename given in Content-Disposition header') ) group.add_argument( '--content-on-error', action='store_true', help=_('keep error pages') ) # group.add_argument( # '--auth-no-challenge' # ) group.add_argument( '--http-compression', action='store_true', help=_('request servers to use HTTP compression'), ) group.add_argument( '--html-parser', choices=['html5lib'] if IS_PYPY else ['libxml2-lxml', 'html5lib'], default='html5lib', help=_('select HTML parsing library and strategy') ) group.add_argument( '--link-extractors', choices=CommaChoiceListArgs(['html', 'css', 'javascript']), type=self.comma_choice_list, default=['html', 'css', 'javascript'], help=_('specify which link extractors to use') ) group.add_argument( '--escaped-fragment', action='store_true', help=_('rewrite links with hash fragments to escaped fragments') ) group.add_argument( '--strip-session-id', action='store_true', help=_('remove session ID tokens from links') ) def _add_ssl_args(self): self._ssl_version_map = { # PROTOCOL_SSLv23 also selects TLS protocols 'auto': ssl.PROTOCOL_SSLv23 } if hasattr(ssl, 'PROTOCOL_SSLv2'): self._ssl_version_map['SSLv2'] = ssl.PROTOCOL_SSLv2 if hasattr(ssl, 'PROTOCOL_SSLv3'): self._ssl_version_map['SSLv3'] = ssl.PROTOCOL_SSLv3 if hasattr(ssl, 'PROTOCOL_TLSv1'): self._ssl_version_map['TLSv1'] = ssl.PROTOCOL_TLSv1 if hasattr(ssl, 'PROTOCOL_TLSv1_1'): self._ssl_version_map['TLSv1.1'] = ssl.PROTOCOL_TLSv1_1 if hasattr(ssl, 'PROTOCOL_TLSv1_2'): self._ssl_version_map['TLSv1.2'] = ssl.PROTOCOL_TLSv1_2 group = self.add_argument_group('SSL') group.add_argument( '--secure-protocol', metavar='PR', default='auto', choices=sorted(self._ssl_version_map), help=_('specify the version of the SSL protocol to use'), ) group.add_argument( '--https-only', action='store_true', help=_('download only HTTPS URLs') ) group.add_argument( '--no-check-certificate', dest='check_certificate', action='store_false', default=True, help=_('don’t validate SSL server certificates'), ) group.add_argument( '--no-strong-crypto', dest='strong_crypto', action='store_false', default=True, help=_('don’t use secure protocols/ciphers') ) group.add_argument( '--certificate', metavar='FILE', help=_('use FILE containing the local client certificate') ) group.add_argument( '--certificate-type', metavar='TYPE', choices=['PEM'], ) group.add_argument( '--private-key', metavar='FILE', help=_('use FILE containing the local client private key') ) group.add_argument( '--private-key-type', metavar='TYPE', choices=['PEM'], ) group.add_argument( '--ca-certificate', metavar='FILE', default='/etc/ssl/certs/ca-certificates.crt', help=_('load and use CA certificate bundle from FILE'), ) group.add_argument( '--ca-directory', metavar='DIR', default='/etc/ssl/certs/', help=_('load and use CA certificates from DIR'), ) group.add_argument( '--no-use-internal-ca-certs', action='store_false', dest='use_internal_ca_certs', help=_('don’t use CA certificates included with Wpull') ) group.add_argument( '--random-file', metavar='FILE', help=_('use data from FILE to seed the SSL PRNG') ) group.add_argument( '--edg-file', metavar='FILE', help=_('connect to entropy gathering daemon using socket FILE') ) def _add_ftp_args(self): group = self.add_argument_group('FTP') group.add_argument( '--ftp-user', metavar='USER', help=_('username for FTP login'), ) group.add_argument( '--ftp-password', metavar='PASS', help=_('password for FTP login'), ) group.add_argument( '--no-remove-listing', action='store_false', default=True, dest='remove_listing', help=_('keep directory file listings') ) group.add_argument( '--no-glob', action='store_false', default=True, dest='glob', help=_('don’t use filename glob patterns on FTP URLs') ) # group.add_argument( # '--no-passive-ftp', # action='store_true', # ) group.add_argument( '--preserve-permissions', action='store_true', help=_('apply server\'s Unix file permissions on downloaded files') ) group.add_argument( '--retr-symlinks', default='on', nargs='?', choices=BOOLEAN_VALUES, help=_('if disabled, preserve symlinks and run with security risks') ) def _add_warc_args(self): group = self.add_argument_group('WARC') group.add_argument( '--warc-file', metavar='FILENAME', help=_('save WARC file to filename prefixed with FILENAME'), ) group.add_argument( '--warc-append', action='store_true', help=_('append instead of overwrite the output WARC file') ) group.add_argument( '--warc-header', metavar='STRING', action='append', default=[], help=_('include STRING in WARC file metadata'), ) group.add_argument( '--warc-max-size', type=self.int_bytes, metavar='NUMBER', help=_('write sequential WARC files sized about NUMBER bytes') ) group.add_argument( '--warc-move', metavar='DIRECTORY', default=None, help=_('move WARC files to DIRECTORY as they complete') ) group.add_argument( '--warc-cdx', action='store_true', help=_('write CDX file along with the WARC file') ) group.add_argument( '--warc-dedup', metavar='FILE', type=argparse.FileType('rb'), help=_('write revisit records using digests in FILE') ) group.add_argument( '--no-warc-compression', action='store_true', help=_('do not compress the WARC file'), ) group.add_argument( '--no-warc-digests', action='store_false', dest='warc_digests', default=True, help=_('do not compute and save SHA1 hash digests') ) group.add_argument( '--no-warc-keep-log', action='store_false', dest='warc_log', default=True, help=_('do not save a log into the WARC file'), ) group.add_argument( '--warc-tempdir', metavar='DIRECTORY', default=os.curdir, help=_('use temporary DIRECTORY for preparing WARC files'), ) def _add_recursive_args(self): group = self.add_argument_group(_('recursion')) group.add_argument( '-r', '--recursive', action='store_true', help=_('follow links and download them'), ) group.add_argument( '-l', '--level', metavar='NUMBER', type=self.int_0_inf, default=5, help=_('limit recursion depth to NUMBER') ) group.add_argument( '--delete-after', action='store_true', help=_('download files temporarily and delete them after'), ) group.add_argument( '-k', '--convert-links', action='store_true', help=_('rewrite links in files that point to local files') ) group.add_argument( '-K', '--backup-converted', action='store_true', help=_('save original files before converting their links') ) # group.add_argument( # '-m', # '--mirror', # action='store_true', # help=_('use options "-N -r -l inf --no-remove-listing"') # ) group.add_argument( '-p', '--page-requisites', action='store_true', help=_('download objects embedded in pages') ) group.add_argument( '--page-requisites-level', metavar='NUMBER', type=self.int_0_inf, default=5, help=_('limit page-requisites recursion depth to NUMBER') ) # group.add_argument( # '--strict-comments', # action='store_true', # help=_('use strict SGML comment parsing') # ) group.add_argument( '--sitemaps', action='store_true', help=_('download Sitemaps to discover more links') ) def _add_accept_args(self): group = self.add_argument_group(_('filters')) group.add_argument( '-A', '--accept', metavar='LIST', type=self.comma_list, help=_('download only files with suffix in LIST'), ) group.add_argument( '-R', '--reject', metavar='LIST', help=_('don’t download files with suffix in LIST'), ) group.add_argument( '--accept-regex', metavar='REGEX', help=_('download only URLs matching REGEX'), ) group.add_argument( '--reject-regex', metavar='REGEX', help=_('don’t download URLs matching REGEX'), ) group.add_argument( '--regex-type', metavar='TYPE', choices=['pcre'], help=_('use regex TYPE') ) group.add_argument( '-D', '--domains', metavar='LIST', type=self.comma_list, help=_('download only from LIST of hostname suffixes') ) group.add_argument( '--exclude-domains', metavar='LIST', type=self.comma_list, help=_('don’t download from LIST of hostname suffixes') ) group.add_argument( '--hostnames', metavar='LIST', type=self.comma_list, help=_('download only from LIST of hostnames') ) group.add_argument( '--exclude-hostnames', metavar='LIST', type=self.comma_list, help=_('don’t download from LIST of hostnames') ) group.add_argument( '--follow-ftp', action='store_true', help=_('follow links to FTP sites') ) group.add_argument( '--follow-tags', metavar='LIST', type=self.comma_list, help=_('follow only links contained in LIST of HTML tags'), ) group.add_argument( '--ignore-tags', metavar='LIST', type=self.comma_list, help=_('don’t follow links contained in LIST of HTML tags'), ) span_hosts_group = group.add_mutually_exclusive_group() span_hosts_group.add_argument( '-H', '--span-hosts', action='store_true', help=_('follow links and page requisites to other hostnames') ) span_hosts_group.add_argument( '--span-hosts-allow', metavar='LIST', choices=CommaChoiceListArgs(['page-requisites', 'linked-pages']), type=self.comma_choice_list, default=[], help=_('selectively span hosts for resource types in LIST') ) group.add_argument( '-L', '--relative', action='store_true', help=_('follow only relative links') ) group.add_argument( '-I', '--include-directories', metavar='LIST', type=self.comma_list, help=_('download only paths in LIST') ) group.add_argument( '--trust-server-names', action='store_true', help=_('use the last given URL for filename during redirects') ) group.add_argument( '-X', '--exclude-directories', metavar='LIST', type=self.comma_list, help=_('don’t download paths in LIST') ) group.add_argument( '-np', '--no-parent', action='store_true', help=_('don’t follow to parent directories on URL path'), ) group.add_argument( '--no-strong-redirects', dest='strong_redirects', action='store_false', default=True, help=_('don’t implicitly allow span hosts for redirects'), ) def _add_proxy_server_args(self): group = self.add_argument_group(_('proxy server')) group.add_argument( '--proxy-server', action='store_true', help=_('run HTTP proxy server for capturing requests'), ) group.add_argument( '--proxy-server-address', default='localhost', metavar='ADDRESS', help=_('bind the proxy server to ADDRESS') ) group.add_argument( '--proxy-server-port', type=int, default=0, metavar='PORT', help=_('bind the proxy server port to PORT') ) def _add_phantomjs_args(self): group = self.add_argument_group(_('PhantomJS')) group.add_argument( '--phantomjs', action='store_true', help=_('use PhantomJS for loading dynamic pages'), ) group.add_argument( '--phantomjs-exe', metavar='PATH', default='phantomjs', help=_('path of PhantomJS executable') ) group.add_argument( '--phantomjs-max-time', default=900, type=self.int_0_inf, help=_('maximum duration of PhantomJS session') ) group.add_argument( '--phantomjs-scroll', type=int, default=20, metavar='NUM', help=_('scroll the page up to NUM times'), ) group.add_argument( '--phantomjs-wait', type=float, default=1.0, metavar='SEC', help=_('wait SEC seconds between page interactions'), ) group.add_argument( '--no-phantomjs-snapshot', action='store_false', dest='phantomjs_snapshot', default=True, help=_('don’t take dynamic page snapshots'), ) group.add_argument( '--no-phantomjs-smart-scroll', action='store_false', dest='phantomjs_smart_scroll', default=True, help=_('always scroll the page to maximum scroll count option'), ) def _add_youtube_dl_args(self): group = self.add_argument_group(_('youtube-dl')) group.add_argument( '--youtube-dl', action='store_true', help=_('use youtube-dl for downloading videos'), ) group.add_argument( '--youtube-dl-exe', metavar='PATH', default='youtube-dl', help=_('path of youtube-dl executable') ) def _post_parse_args(self, args): if args.warc_file: self._post_warc_args(args) if not args.input_file and not args.urls: self.error(_('no URL provided')) self._post_ssl_args(args) if not args.recursive: args.robots = False if args.no_iri and (args.local_encoding or args.remote_encoding): self.error(_('disabling IRI support forces use of ASCII encoding')) elif args.no_iri: args.local_encoding = 'ascii' args.remote_encoding = 'ascii' if not args.verbosity: if args.concurrent > 1: args.verbosity = LOG_NO_VERBOSE else: args.verbosity = LOG_VERBOSE if (args.proxy_user or args.proxy_password) and not \ (args.proxy_user and args.proxy_password): self.error(_('both username and password must be supplied')) assert args.retr_symlinks in BOOLEAN_VALUES args.retr_symlinks = args.retr_symlinks in BOOLEAN_TRUE_VALUES def _post_warc_args(self, args): option_names = ('clobber_method', 'timestamping', 'continue_download') for option_name in option_names: if vars(args).get(option_name): self.error( _('WARC output cannot be combined with {option_name}.') .format(option_name=option_name) ) if args.warc_move and not os.path.isdir(args.warc_move): self.error('WARC destination {path} is not a directory.' .format(path=args.warc_move)) def _post_ssl_args(self, args): if args.secure_protocol: args.secure_protocol = self._ssl_version_map[args.secure_protocol] if args.certificate and not os.path.exists(args.certificate): self.error(_('certificate file not found')) if args.private_key and not os.path.exists(args.private_key): self.error(_('private key file not found'))
class AppArgumentParser(argparse.ArgumentParser): '''An Argument Parser that builds up the application options.''' def __init__(self, *args, real_exit=True, **kwargs): pass @classmethod def int_0_inf(cls, string): '''Convert string to int. If ``inf`` is supplied, it returns ``0``. ''' pass @classmethod def int_bytes(cls, string): '''Convert string describing size to int.''' pass @classmethod def comma_list(cls, string): '''Convert a comma separated string to list.''' pass @classmethod def comma_choice_list(cls, string): '''Convert a comma separated string to `CommaChoiceListArgs`.''' pass def parse_args(self, args=None, namespace=None): pass @classmethod def get_argv_encoding(cls, argv): pass def exit(self, status=0, message=None): pass def _add_app_args(self): pass def _add_startup_args(self): pass def _add_log_and_input_args(self): pass def _add_proxy_args(self): pass def _add_download_args(self): pass def _add_directories_args(self): pass def _add_http_args(self): pass def _add_ssl_args(self): pass def _add_ftp_args(self): pass def _add_warc_args(self): pass def _add_recursive_args(self): pass def _add_accept_args(self): pass def _add_proxy_server_args(self): pass def _add_phantomjs_args(self): pass def _add_youtube_dl_args(self): pass def _post_parse_args(self, args): pass def _post_warc_args(self, args): pass def _post_ssl_args(self, args): pass
32
5
50
1
46
3
2
0.06
1
10
2
0
21
2
26
76
1,333
52
1,203
69
1,171
78
322
63
295
9
3
2
56
6,682
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/hook.py
wpull.application.hook.HookableMixin
class HookableMixin(object): def __init__(self): super().__init__() self.event_dispatcher = EventDispatcher() self.hook_dispatcher = HookDispatcher(event_dispatcher_transclusion=self.event_dispatcher) def connect_plugin(self, plugin: WpullPlugin): for func, name, category in plugin.get_plugin_functions(): if category == PluginFunctionCategory.hook: if self.hook_dispatcher.is_registered(name): _logger.debug('Connected hook %s %s', name, func) self.hook_dispatcher.connect(name, func) elif self.event_dispatcher.is_registered(name): raise RuntimeError('Plugin event ‘{name}’ cannot be attached as a hook function.'.format(name=name)) elif category == PluginFunctionCategory.event and self.event_dispatcher.is_registered(name): _logger.debug('Connected event %s %s', name, func) self.event_dispatcher.add_listener(name, func)
class HookableMixin(object): def __init__(self): pass def connect_plugin(self, plugin: WpullPlugin): pass
3
0
8
1
8
0
4
0
1
6
4
15
2
2
2
2
18
2
16
6
13
0
14
6
11
6
1
3
7
6,683
ArchiveTeam/wpull
ArchiveTeam_wpull/test/fuzz_fusil/runner.py
runner.FuzzedHttpServer
class FuzzedHttpServer(HttpServer): SAMPLE_FILENAMES = ( 'krokozyabry.css', 'soup.html', 'mojibake.html', 'styles.css', 'krokozyabry.html', 'webtv.net_tvfoutreach_cocountdownto666.html', 'many_urls.html', 'xkcd_1.html', 'mojibake.css', ) def __init__(self, *args, **kwargs): HttpServer.__init__(self, *args, **kwargs) self._config_light = MangleConfig(min_op=0, max_op=50) self._config_heavy = MangleConfig(min_op=0, max_op=500) self._data_samples = [] for filename in self.SAMPLE_FILENAMES: path = os.path.join( os.path.dirname(os.path.abspath(__file__)), '../', '../', 'wpull', 'testing', 'samples', filename ) with open(path, 'rb') as in_file: self._data_samples.append(in_file.read()) self._data_samples.append( "<html><body><p>Hello World!</p></body></html>") def serveRequest(self, client, request): url = request.uri[1:] if not url: url = "index.html" choice = random.randint(0, 1) self.logger.info('request choice: ' + str(choice), self) if choice == 1: self.serveData( client, 200, "OK", random.choice(self._data_samples)) else: self.error404(client, url) def serveData(self, client, code, code_text, data=None, content_type="text/html"): data_choice = random.random() header_choice = random.random() http_headers = [] if random.random() < 0.2: new_content_type = random.choice( ['text/html', 'image/png', 'text/css']) self.logger.info( 'Mangle content_type {0} -> {1}'.format( content_type, new_content_type), self ) content_type = new_content_type if data and data_choice < 0.5: self.logger.info('Mangle content: YES', self) data = self.mangle_data(data, self._config_heavy) if random.random() < 0.2: self.logger.info('Mangle gzip: YES', self) datafile = BytesIO() with gzip.GzipFile(fileobj=datafile, mode='wb') as gzip_file: gzip_file.write(bytes(data)) data = self.mangle_data(datafile.getvalue(), self._config_light) http_headers.append(('Content-Encoding', 'gzip')) if data: data_len = len(data) else: data_len = 0 http_headers.extend([ ("Server", "Fusil"), ("Pragma", "no-cache"), ("Content-Type", content_type), ("Content-Length", str(data_len)), ]) try: header = "HTTP/%s %s %s\r\n" % (self.http_version, code, code_text) for key, value in http_headers: header += "%s: %s\r\n" % (key, value) header += "\r\n" if header_choice < 0.3: self.logger.info('Mangle header: YES', self) header = self.mangle_data(header, self._config_light) if data: data = header + data else: data = header client.sendBytes(data) client.close() except (ServerClientDisconnect, socket.error): self.clientDisconnection(client) def mangle_data(self, data, config): mangler = Mangle(config, bytearray(data)) mangler.run() self.logger.info( 'Mangled data: ' + repr(mangler.data), self ) return mangler.data
class FuzzedHttpServer(HttpServer): def __init__(self, *args, **kwargs): pass def serveRequest(self, client, request): pass def serveData(self, client, code, code_text, data=None, content_type="text/html"): pass def mangle_data(self, data, config): pass
5
0
26
5
21
0
4
0
1
4
0
0
4
3
4
4
121
25
96
25
90
0
63
22
58
9
1
2
15
6,684
ArchiveTeam/wpull
ArchiveTeam_wpull/test/fuzz_fusil/runner.py
runner.Fuzzer
class Fuzzer(Application): def setupProject(self): self.project.debugger.enabled = False self.config.process_max_user_process = 50 FuzzedHttpServer(self.project, 8898) process = ProjectProcess( self.project, ['python3', '-X', 'faulthandler', '-m', 'wpull', '127.0.0.1:8898', '--timeout', '2.0', '--tries', '1', ], ) process.env.set( 'PYTHONPATH', os.path.join( os.path.abspath(os.path.dirname(__file__)), '..', '..') ) WatchProcessSpecificStatusCode(process) stdout_watcher = WatchStdout(process) stdout_watcher.ignoreRegex( r'WARNING Invalid content length: invalid literal for int' ) stdout_watcher.ignoreRegex( r'WARNING Unable to parse URL ' ) stdout_watcher.ignoreRegex( r'WARNING Failed to read document at ' ) stdout_watcher.ignoreRegex( r'WARNING Content overrun' ) stdout_watcher.ignoreRegex( r'ERROR Fetching ' ) stdout_watcher.ignoreRegex( r'DEBUG ' ) stdout_watcher.ignoreRegex( r'INFO Fetch(ed|ing) ' )
class Fuzzer(Application): def setupProject(self): pass
2
0
44
4
40
0
1
0
1
2
2
0
1
0
1
1
45
4
41
4
39
0
16
4
14
1
1
0
1
6,685
ArchiveTeam/wpull
ArchiveTeam_wpull/test/fuzz_fusil/runner.py
runner.Fuzzer
class Fuzzer(Application): def setupProject(self): self.project.debugger.enabled = False self.config.process_max_user_process = 50 FuzzedHttpServer(self.project, 8898) process = ProjectProcess( self.project, ['python3', '-X', 'faulthandler', '-m', 'wpull', '127.0.0.1:8898', '--timeout', '2.0', '--tries', '1', ], ) process.env.set( 'PYTHONPATH', os.path.join( os.path.abspath(os.path.dirname(__file__)), '..', '..') ) WatchProcessSpecificStatusCode(process) stdout_watcher = WatchStdout(process) stdout_watcher.ignoreRegex( r'WARNING Invalid content length: invalid literal for int' ) stdout_watcher.ignoreRegex( r'WARNING Unable to parse URL ' ) stdout_watcher.ignoreRegex( r'WARNING Failed to read document at ' ) stdout_watcher.ignoreRegex( r'WARNING Content overrun' ) stdout_watcher.ignoreRegex( r'ERROR Fetching ' ) stdout_watcher.ignoreRegex( r'DEBUG ' ) stdout_watcher.ignoreRegex( r'INFO Fetch(ed|ing) ' )
class Fuzzer(Application): def setupProject(self): pass
2
0
78
5
73
0
1
0
1
2
1
0
1
1
1
1
79
5
74
9
72
0
25
8
23
1
1
0
1
6,686
ArchiveTeam/wpull
ArchiveTeam_wpull/test/fuzz_fusil_2/runner.py
runner.Fuzzer
class Fuzzer(Application): def setupProject(self): self.project.debugger.enabled = False self.config.use_cpu_probe = False self.config.process_max_user_process = 50 port = 8848 seed = random.randint(0, 60000) timeout = 60 * 60 server_process = ProjectProcess( self.project, [ 'python3', '-m', 'huhhttp', '--port', str(port), '--seed', str(seed), '--fuzz-period', '500', '--restart-interval', '250', ], timeout=timeout ) WatchProcess(server_process) process = ProjectProcess( self.project, [ 'python3', '-X', 'faulthandler', '-m', 'wpull', '127.0.0.1:{0}'.format(port), '--timeout', '5', '--warc-file', 'fusil-test', '-r', '--debug', '--page-requisites', '--delete-after', '--tries', '2', '--retry-connrefused', '--database', 'wpull.db', '--span-hosts-allow', 'page-requisites,linked-pages', '--no-check-certificate', '--concurrent', str(random.randint(1, 10)), ], timeout=timeout ) process.env.set( 'PYTHONPATH', os.path.join( os.path.abspath(os.path.dirname(__file__)), '..', '..') ) process.env.set('OBJGRAPH_DEBUG', '1') process.env.set('FILE_LEAK_DEBUG', '1') WatchProcessSpecificStatusCode(process) stdout_watcher = WatchStdout(process) stdout_watcher.max_nb_line = None stdout_watcher.ignoreRegex( r'WARNING Invalid content length: invalid literal for int' ) stdout_watcher.ignoreRegex( r'WARNING Unable to parse URL ' ) stdout_watcher.ignoreRegex( r'WARNING Failed to read document at ' ) stdout_watcher.ignoreRegex( r'WARNING Content overrun' ) stdout_watcher.ignoreRegex( r'ERROR Fetching ' ) stdout_watcher.ignoreRegex( r'DEBUG ' ) stdout_watcher.ignoreRegex( r'INFO Fetch(ed|ing) ' ) stdout_watcher.ignoreRegex( r'lsof: WARNING: ' )
class Fuzzer(Application): def setupProject(self): pass
2
0
44
4
40
0
1
0
1
2
2
0
1
0
1
1
45
4
41
4
39
0
16
4
14
1
1
0
1
6,687
ArchiveTeam/wpull
ArchiveTeam_wpull/test/fuzz_fusil_2/runner.py
runner.Fuzzer
class Fuzzer(Application): def setupProject(self): self.project.debugger.enabled = False self.config.use_cpu_probe = False self.config.process_max_user_process = 50 port = 8848 seed = random.randint(0, 60000) timeout = 60 * 60 server_process = ProjectProcess( self.project, [ 'python3', '-m', 'huhhttp', '--port', str(port), '--seed', str(seed), '--fuzz-period', '500', '--restart-interval', '250', ], timeout=timeout ) WatchProcess(server_process) process = ProjectProcess( self.project, [ 'python3', '-X', 'faulthandler', '-m', 'wpull', '127.0.0.1:{0}'.format(port), '--timeout', '5', '--warc-file', 'fusil-test', '-r', '--debug', '--page-requisites', '--delete-after', '--tries', '2', '--retry-connrefused', '--database', 'wpull.db', '--span-hosts-allow', 'page-requisites,linked-pages', '--no-check-certificate', '--concurrent', str(random.randint(1, 10)), ], timeout=timeout ) process.env.set( 'PYTHONPATH', os.path.join( os.path.abspath(os.path.dirname(__file__)), '..', '..') ) process.env.set('OBJGRAPH_DEBUG', '1') process.env.set('FILE_LEAK_DEBUG', '1') WatchProcessSpecificStatusCode(process) stdout_watcher = WatchStdout(process) stdout_watcher.max_nb_line = None stdout_watcher.ignoreRegex( r'WARNING Invalid content length: invalid literal for int' ) stdout_watcher.ignoreRegex( r'WARNING Unable to parse URL ' ) stdout_watcher.ignoreRegex( r'WARNING Failed to read document at ' ) stdout_watcher.ignoreRegex( r'WARNING Content overrun' ) stdout_watcher.ignoreRegex( r'ERROR Fetching ' ) stdout_watcher.ignoreRegex( r'DEBUG ' ) stdout_watcher.ignoreRegex( r'INFO Fetch(ed|ing) ' ) stdout_watcher.ignoreRegex( r'lsof: WARNING: ' )
class Fuzzer(Application): def setupProject(self): pass
2
0
78
5
73
0
1
0
1
2
1
0
1
1
1
1
79
5
74
9
72
0
25
8
23
1
1
0
1
6,688
ArchiveTeam/wpull
ArchiveTeam_wpull/test/fuzz_fusil_2/runner.py
runner.WatchProcessSpecificStatusCode
class WatchProcessSpecificStatusCode(WatchProcess): def computeScore(self, status): if status in (4, 6, 7, 8): print('Change status', status, 'to 0.') status = 0 return WatchProcess.computeScore(self, status)
class WatchProcessSpecificStatusCode(WatchProcess): def computeScore(self, status): pass
2
0
6
1
5
0
2
0
1
0
0
0
1
0
1
1
7
1
6
2
4
0
6
2
4
2
1
1
2
6,689
ArchiveTeam/wpull
ArchiveTeam_wpull/test/fuzz_fusil_2/runner.py
runner.WatchProcessSpecificStatusCode
class WatchProcessSpecificStatusCode(WatchProcess): def computeScore(self, status): if status in (4, 6, 7, 8): print('Change status', status, 'to 0.') status = 0 return WatchProcess.computeScore(self, status)
class WatchProcessSpecificStatusCode(WatchProcess): def computeScore(self, status): pass
2
0
6
1
5
0
2
0
1
0
0
0
1
0
1
1
7
1
6
2
4
0
6
2
4
2
1
1
2
6,690
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/app.py
wpull.application.app.Application
class Application(HookableMixin): '''Default non-interactive application user interface. This class manages process signals and displaying warnings. ''' ERROR_CODE_MAP = OrderedDict([ (AuthenticationError, ExitStatus.authentication_failure), (ServerError, ExitStatus.server_error), (ProtocolError, ExitStatus.protocol_error), (SSLVerificationError, ExitStatus.ssl_verification_error), (DNSNotFound, ExitStatus.network_failure), (ConnectionRefused, ExitStatus.network_failure), (NetworkError, ExitStatus.network_failure), (OSError, ExitStatus.file_io_error), (IOError, ExitStatus.file_io_error), # ExitStatus.parse_error is handled by the ArgumentParse and is not # needed here. # Anything else is ExitStatus.generic_error. ]) '''Mapping of error types to exit status.''' EXPECTED_EXCEPTIONS = ( ServerError, ProtocolError, SSLVerificationError, DNSNotFound, ConnectionRefused, NetworkError, OSError, IOError, HookStop, StopIteration, SystemExit, KeyboardInterrupt, ) '''Exception classes that are not crashes.''' class Event(enum.Enum): pipeline_begin = 'pipeline_begin' pipeline_end = 'pipeline_end' def __init__(self, pipeline_series: PipelineSeries): super().__init__() self._pipeline_series = pipeline_series self._exit_code = 0 self._current_pipeline = None self._state = ApplicationState.ready self.event_dispatcher.register(self.Event.pipeline_begin) self.event_dispatcher.register(self.Event.pipeline_end) @property def exit_code(self) -> int: return self._exit_code @exit_code.setter def exit_code(self, new_code: int): self._exit_code = new_code def setup_signal_handlers(self): '''Setup Ctrl+C and SIGTERM handlers.''' if platform.system() == 'Windows': _logger.warning(_( 'Graceful stopping with Unix signals is not supported ' 'on this OS.' )) return event_loop = asyncio.get_event_loop() graceful_called = False def graceful_stop_callback(): nonlocal graceful_called if graceful_called: forceful_stop_callback() return graceful_called = True _logger.info(_('Stopping once all requests complete...')) _logger.info(_('Interrupt again to force stopping immediately.')) self.stop() def forceful_stop_callback(): _logger.info(_('Forcing immediate stop...')) logging.raiseExceptions = False event_loop.stop() event_loop.add_signal_handler(signal.SIGINT, graceful_stop_callback) event_loop.add_signal_handler(signal.SIGTERM, forceful_stop_callback) def stop(self): if self._state == ApplicationState.running: _logger.debug('Application stopping') self._state = ApplicationState.stopping if self._current_pipeline: self._current_pipeline.stop() def run_sync(self) -> int: '''Run the application. This function is blocking. Returns: int: The exit status. ''' exit_status = asyncio.get_event_loop().run_until_complete(self.run()) # The following loop close procedure should avoid deadlock while # allowing all callbacks to process before close() asyncio.get_event_loop().stop() asyncio.get_event_loop().run_forever() asyncio.get_event_loop().close() return exit_status @asyncio.coroutine def run(self): if self._state != ApplicationState.ready: raise RuntimeError('Application is not ready') self._state = ApplicationState.running for pipeline in self._pipeline_series.pipelines: self._current_pipeline = pipeline if self._state == ApplicationState.stopping and pipeline.skippable: continue self.event_dispatcher.notify(self.Event.pipeline_begin, pipeline) try: yield from pipeline.process() except Exception as error: if isinstance(error, StopIteration): raise is_expected = isinstance(error, self.EXPECTED_EXCEPTIONS) show_traceback = not is_expected if show_traceback: _logger.exception('Fatal exception.') else: try: text = '{}: {}'.format(type(error).__name__, error) except AttributeError: text = str(error) _logger.error(text) self._update_exit_code_from_error(error) if not is_expected: self._print_crash_message() self._print_report_bug_message() break self.event_dispatcher.notify(self.Event.pipeline_end, pipeline) self._current_pipeline = None self._state = ApplicationState.stopping if self._exit_code == ExitStatus.ssl_verification_error: self._print_ssl_error() _logger.info(_('Exiting with status {0}.'), self._exit_code) self._state = ApplicationState.stopped return self._exit_code def _update_exit_code_from_error(self, error): '''Set the exit code based on the error type. Args: error (:class:`Exception`): An exception instance. ''' for error_type, exit_code in self.ERROR_CODE_MAP.items(): if isinstance(error, error_type): self.update_exit_code(exit_code) break else: self.update_exit_code(ExitStatus.generic_error) def update_exit_code(self, code: int): '''Set the exit code if it is serious than before. Args: code: The exit code. ''' if code: if self._exit_code: self._exit_code = min(self._exit_code, code) else: self._exit_code = code @classmethod def _print_ssl_error(cls): '''Print an invalid SSL certificate warning.''' _logger.info(_('A SSL certificate could not be verified.')) _logger.info(_('To ignore and proceed insecurely, ' 'use ‘--no-check-certificate’.')) @classmethod def _print_crash_message(cls): '''Print crashed message.''' _logger.critical(_('Sorry, Wpull unexpectedly crashed.')) @classmethod def _print_report_bug_message(cls): '''Print report the bug message.''' _logger.critical(_( 'Please report this problem to the authors at Wpull\'s ' 'issue tracker so it may be fixed. ' 'If you know how to program, maybe help us fix it? ' 'Thank you for helping us help you help us all.' ))
class Application(HookableMixin): '''Default non-interactive application user interface. This class manages process signals and displaying warnings. ''' class Event(enum.Enum): def __init__(self, pipeline_series: PipelineSeries): pass @property def exit_code(self) -> int: pass @exit_code.setter def exit_code(self) -> int: pass def setup_signal_handlers(self): '''Setup Ctrl+C and SIGTERM handlers.''' pass def graceful_stop_callback(): pass def forceful_stop_callback(): pass def stop(self): pass def run_sync(self) -> int: '''Run the application. This function is blocking. Returns: int: The exit status. ''' pass @asyncio.coroutine def run_sync(self) -> int: pass def _update_exit_code_from_error(self, error): '''Set the exit code based on the error type. Args: error (:class:`Exception`): An exception instance. ''' pass def update_exit_code(self, code: int): '''Set the exit code if it is serious than before. Args: code: The exit code. ''' pass @classmethod def _print_ssl_error(cls): '''Print an invalid SSL certificate warning.''' pass @classmethod def _print_crash_message(cls): '''Print crashed message.''' pass @classmethod def _print_report_bug_message(cls): '''Print report the bug message.''' pass
22
8
13
2
9
1
2
0.19
1
12
4
0
9
4
12
14
212
46
139
40
116
27
106
33
89
10
2
4
31
6,691
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/app.py
wpull.application.app.ApplicationState
class ApplicationState(enum.Enum): ready = 'ready' running = 'running' stopping = 'stopping' stopped = 'stopped'
class ApplicationState(enum.Enum): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
49
5
0
5
5
4
0
5
5
4
0
4
0
0
6,692
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/builder.py
wpull.application.builder.Builder
class Builder(object): '''Application builder. Args: args: Options from :class:`argparse.ArgumentParser` ''' def __init__(self, args, unit_test=False): self._args = args self._factory = Factory({ 'Application': Application, 'BatchDocumentConverter': BatchDocumentConverter, 'BandwidthLimiter': BandwidthLimiter, 'HTTPClient': HTTPClient, 'CookieJar': CookieJar, 'CookieJarWrapper': CookieJarWrapper, 'CookiePolicy': DeFactoCookiePolicy, 'ConnectionPool': ConnectionPool, 'CSSScraper': CSSScraper, 'DemuxDocumentScraper': DemuxDocumentScraper, 'DemuxURLFilter': DemuxURLFilter, 'FTPProcessor': FTPProcessor, 'ElementWalker': ElementWalker, 'FetchRule': FetchRule, 'FileWriter': NullWriter, 'FTPClient': FTPClient, 'FTPProcessorFetchParams': FTPProcessorFetchParams, 'HTTPProxyServer': HTTPProxyServer, 'HTMLParser': NotImplemented, 'HTMLScraper': HTMLScraper, 'JavaScriptScraper': JavaScriptScraper, 'PathNamer': PathNamer, 'PhantomJSDriver': PhantomJSDriver, 'PhantomJSCoprocessor': PhantomJSCoprocessor, 'PipelineSeries': PipelineSeries, 'ProcessingRule': ProcessingRule, 'Processor': DelegateProcessor, 'ProxyCoprocessor': ProxyCoprocessor, 'ProxyHostFilter': ProxyHostFilter, 'RedirectTracker': RedirectTracker, 'Request': Request, 'Resolver': Resolver, 'ResourceMonitor': ResourceMonitor, 'ResultRule': ResultRule, 'RobotsTxtChecker': RobotsTxtChecker, 'RobotsTxtPool': RobotsTxtPool, 'SitemapScraper': SitemapScraper, 'Statistics': Statistics, 'URLInfo': URLInfo, 'URLTable': URLTableHookWrapper, 'URLTableImplementation': SQLURLTable, 'URLRewriter': URLRewriter, 'Waiter': LinearWaiter, 'WARCRecorder': WARCRecorder, 'WebClient': WebClient, 'WebProcessor': WebProcessor, 'WebProcessorFetchParams': WebProcessorFetchParams, 'YoutubeDlCoprocessor': YoutubeDlCoprocessor, }) self._unit_test = unit_test @property def factory(self): '''Return the Factory. Returns: Factory: An :class:`.factory.Factory` instance. ''' return self._factory def build(self) -> Application: '''Put the application together. ''' pipelines = self._build_pipelines() self._factory.new('Application', pipelines) return self._factory['Application'] def _build_pipelines(self) -> PipelineSeries: app_session = AppSession(self._factory, self._args, self.get_stderr()) app_start_pipeline = Pipeline( AppSource(app_session), [ LoggingSetupTask(), DatabaseSetupTask(), ParserSetupTask(), WARCVisitsTask(), SSLContextTask(), ResmonSetupTask(), StatsStartTask(), URLFiltersSetupTask(), NetworkSetupTask(), ClientSetupTask(), WARCRecorderSetupTask(), FileWriterSetupTask(), ProcessorSetupTask(), ProxyServerSetupTask(), CoprocessorSetupTask(), LinkConversionSetupTask(), PluginSetupTask(), InputURLTask(), URLFiltersPostURLImportSetupTask(), ]) url_item_source = URLItemSource(app_session) download_pipeline = Pipeline( url_item_source, [ ProcessTask(), ResmonSleepTask(), BackgroundAsyncTask(), CheckQuotaTask(), ] ) download_stop_pipeline = Pipeline( AppSource(app_session), [ StatsStopTask() ]) download_stop_pipeline.skippable = True queued_file_source = QueuedFileSource(app_session) conversion_pipeline = Pipeline( queued_file_source, [ LinkConversionTask() ] ) conversion_pipeline.skippable = True app_stop_pipeline = Pipeline( AppSource(app_session), [ BackgroundAsyncCleanupTask(), AppStopTask(), WARCRecorderTeardownTask(), CookieJarTeardownTask(), LoggingShutdownTask(), ]) pipeline_series = self._factory.new( 'PipelineSeries', ( app_start_pipeline, download_pipeline, download_stop_pipeline, conversion_pipeline, app_stop_pipeline )) pipeline_series.concurrency_pipelines.add(download_pipeline) return pipeline_series def build_and_run(self): '''Build and run the application. Returns: int: The exit status. ''' app = self.build() exit_code = app.run_sync() return exit_code def get_stderr(self): '''Return stderr or something else if under unit testing.''' if self._unit_test: return sys.stdout else: return sys.stderr
class Builder(object): '''Application builder. Args: args: Options from :class:`argparse.ArgumentParser` ''' def __init__(self, args, unit_test=False): pass @property def factory(self): '''Return the Factory. Returns: Factory: An :class:`.factory.Factory` instance. ''' pass def build(self) -> Application: '''Put the application together. ''' pass def _build_pipelines(self) -> PipelineSeries: pass def build_and_run(self): '''Build and run the application. Returns: int: The exit status. ''' pass def get_stderr(self): '''Return stderr or something else if under unit testing.''' pass
8
5
26
2
22
2
1
0.11
1
77
76
0
6
3
6
6
170
19
136
23
128
15
33
22
26
2
1
1
7
6,693
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/factory.py
wpull.application.factory.Factory
class Factory(collections.Mapping, object): '''Allows selection of classes and keeps track of instances. This class behaves like a mapping. Keys are names of classes and values are instances. ''' def __init__(self, class_map=None): super().__init__() self._class_map = class_map or {} self._instance_map = {} @property def class_map(self): '''A mapping of names to class types.''' return self._class_map @property def instance_map(self): '''A mapping of names to instances.''' return self._instance_map def set(self, name, class_): '''Set the callable or class to be used. Args: name (str): The name of the class. class_: The class or a callable factory function. ''' self._class_map[name] = class_ def __getitem__(self, key): return self._instance_map[key] def __iter__(self): return iter(self._instance_map) def __len__(self): return len(self._instance_map) def new(self, name, *args, **kwargs): '''Create an instance. Args: name (str): The name of the class args: The arguments to pass to the class. kwargs: The keyword arguments to pass to the class. Returns: instance ''' if name in self._instance_map: raise ValueError('Instance {0} is already initialized' .format(name)) instance = self._class_map[name](*args, **kwargs) self._instance_map[name] = instance return instance def is_all_initialized(self): '''Return whether all the instances have been initialized. Returns: bool ''' return frozenset(self._class_map.keys()) == \ frozenset(self._instance_map.keys())
class Factory(collections.Mapping, object): '''Allows selection of classes and keeps track of instances. This class behaves like a mapping. Keys are names of classes and values are instances. ''' def __init__(self, class_map=None): pass @property def class_map(self): '''A mapping of names to class types.''' pass @property def instance_map(self): '''A mapping of names to instances.''' pass def set(self, name, class_): '''Set the callable or class to be used. Args: name (str): The name of the class. class_: The class or a callable factory function. ''' pass def __getitem__(self, key): pass def __iter__(self): pass def __len__(self): pass def new(self, name, *args, **kwargs): '''Create an instance. Args: name (str): The name of the class args: The arguments to pass to the class. kwargs: The keyword arguments to pass to the class. Returns: instance ''' pass def is_all_initialized(self): '''Return whether all the instances have been initialized. Returns: bool ''' pass
12
6
6
1
3
2
1
0.79
2
3
0
0
9
2
9
9
66
14
29
15
17
23
25
13
15
2
1
1
10
6,694
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/factory_test.py
wpull.application.factory_test.TestFactory
class TestFactory(unittest.TestCase): def test_factory(self): factory = Factory() factory.set('dict', dict) self.assertNotIn('dict', factory) self.assertFalse(factory.is_all_initialized()) my_instance = factory.new('dict', [('hi', 'hello')]) self.assertIn('dict', factory) self.assertEqual(my_instance, factory['dict']) self.assertTrue(factory.is_all_initialized()) self.assertEqual(1, len(factory)) self.assertEqual(['dict'], list(iter(factory))) self.assertEqual(my_instance, factory.instance_map['dict']) with self.assertRaises(ValueError): factory.new('dict', [('hi', 'hello')])
class TestFactory(unittest.TestCase): def test_factory(self): pass
2
0
19
5
14
0
1
0
1
4
1
0
1
0
1
73
20
5
15
4
13
0
15
4
13
1
2
1
1
6,695
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/hook.py
wpull.application.hook.Actions
class Actions(enum.Enum): '''Actions for handling responses and errors. Attributes: NORMAL (normal): Use Wpull's original behavior. RETRY (retry): Retry this item (as if an error has occurred). FINISH (finish): Consider this item as done; don't do any further processing on it. STOP (stop): Raises :class:`HookStop` to stop the Engine from running. ''' NORMAL = 'normal' RETRY = 'retry' FINISH = 'finish' STOP = 'stop'
class Actions(enum.Enum): '''Actions for handling responses and errors. Attributes: NORMAL (normal): Use Wpull's original behavior. RETRY (retry): Retry this item (as if an error has occurred). FINISH (finish): Consider this item as done; don't do any further processing on it. STOP (stop): Raises :class:`HookStop` to stop the Engine from running. ''' pass
1
1
0
0
0
0
0
1.6
1
0
0
0
0
0
0
49
14
1
5
5
4
8
5
5
4
0
4
0
0
6,696
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/hook.py
wpull.application.hook.EventDispatcher
class EventDispatcher(collections.abc.Mapping): def __init__(self): self._callbacks = {} def __getitem__(self, key): return self._callbacks[key] def __iter__(self): return iter(self._callbacks) def __len__(self): return len(self._callbacks) def register(self, name: str): if name in self._callbacks: raise ValueError('Event already registered') self._callbacks[name] = set() def unregister(self, name: str): del self._callbacks[name] def add_listener(self, name: str, callback): self._callbacks[name].add(callback) def remove_listener(self, name: str, callback): self._callbacks[name].remove(callback) def notify(self, name: str, *args, **kwargs): for callback in self._callbacks[name]: callback(*args, **kwargs) def is_registered(self, name: str) -> bool: return name in self._callbacks
class EventDispatcher(collections.abc.Mapping): def __init__(self): pass def __getitem__(self, key): pass def __iter__(self): pass def __len__(self): pass def register(self, name: str): pass def unregister(self, name: str): pass def add_listener(self, name: str, callback): pass def remove_listener(self, name: str, callback): pass def notify(self, name: str, *args, **kwargs): pass def is_registered(self, name: str) -> bool: pass
11
0
2
0
2
0
1
0
1
4
0
0
10
1
10
44
34
10
24
13
13
0
24
13
13
2
6
1
12
6,697
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/hook.py
wpull.application.hook.HookAlreadyConnectedError
class HookAlreadyConnectedError(ValueError): '''A callback is already connected to the hook.'''
class HookAlreadyConnectedError(ValueError): '''A callback is already connected to the hook.''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
11
2
0
1
1
0
1
1
1
0
0
4
0
0
6,698
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/hook.py
wpull.application.hook.HookDisconnected
class HookDisconnected(RuntimeError): '''No callback is connected.'''
class HookDisconnected(RuntimeError): '''No callback is connected.''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
11
2
0
1
1
0
1
1
1
0
0
4
0
0
6,699
ArchiveTeam/wpull
ArchiveTeam_wpull/wpull/application/hook.py
wpull.application.hook.HookDispatcher
class HookDispatcher(collections.abc.Mapping): '''Dynamic callback hook system.''' def __init__(self, event_dispatcher_transclusion: Optional['EventDispatcher']=None): super().__init__() self._callbacks = {} self._event_dispatcher = event_dispatcher_transclusion def __getitem__(self, key): return self._callbacks[key] def __iter__(self): return iter(self._callbacks) def __len__(self): return len(self._callbacks) def register(self, name: str): '''Register hooks that can be connected.''' if name in self._callbacks: raise ValueError('Hook already registered') self._callbacks[name] = None if self._event_dispatcher is not None: self._event_dispatcher.register(name) def unregister(self, name: str): '''Unregister hook.''' del self._callbacks[name] if self._event_dispatcher is not None: self._event_dispatcher.unregister(name) def connect(self, name, callback): '''Add callback to hook.''' if not self._callbacks[name]: self._callbacks[name] = callback else: raise HookAlreadyConnectedError('Callback hook already connected.') def disconnect(self, name: str): '''Remove callback from hook.''' self._callbacks[name] = None def call(self, name: str, *args, **kwargs): '''Invoke the callback.''' if self._event_dispatcher is not None: self._event_dispatcher.notify(name, *args, **kwargs) if self._callbacks[name]: return self._callbacks[name](*args, **kwargs) else: raise HookDisconnected('No callback is connected.') @asyncio.coroutine def call_async(self, name: str, *args, **kwargs): '''Invoke the callback.''' if self._event_dispatcher is not None: self._event_dispatcher.notify(name, *args, **kwargs) if self._callbacks[name]: return (yield from self._callbacks[name](*args, **kwargs)) else: raise HookDisconnected('No callback is connected.') def is_connected(self, name: str) -> bool: '''Return whether the hook is connected.''' return bool(self._callbacks[name]) def is_registered(self, name: str) -> bool: return name in self._callbacks
class HookDispatcher(collections.abc.Mapping): '''Dynamic callback hook system.''' def __init__(self, event_dispatcher_transclusion: Optional['EventDispatcher']=None): pass def __getitem__(self, key): pass def __iter__(self): pass def __len__(self): pass def register(self, name: str): '''Register hooks that can be connected.''' pass def unregister(self, name: str): '''Unregister hook.''' pass def connect(self, name, callback): '''Add callback to hook.''' pass def disconnect(self, name: str): '''Remove callback from hook.''' pass def call(self, name: str, *args, **kwargs): '''Invoke the callback.''' pass @asyncio.coroutine def call_async(self, name: str, *args, **kwargs): '''Invoke the callback.''' pass def is_connected(self, name: str) -> bool: '''Return whether the hook is connected.''' pass def is_registered(self, name: str) -> bool: pass
14
8
5
0
4
1
2
0.17
1
6
2
0
12
2
12
46
71
16
47
16
33
8
43
15
30
3
6
1
20