id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,500 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.testIndexes.persons
|
class persons(Collection):
_fields = {
"name": Field(),
"Description": Field(),
"geo": Field(),
"skip": Field()
}
|
class persons(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 7 | 0 | 7 | 2 | 6 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
6,501 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_collection_type_creation.Coly
|
class Coly(Collection):
pass
|
class Coly(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 0 |
6,502 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_collection_type_creation.Edgy
|
class Edgy(Edges):
pass
|
class Edgy(Edges):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
6,503 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_document_cache.DummyDoc
|
class DummyDoc(object):
def __init__(self, key):
self._key = key
self.hhh = "hhh"
self.store = {
"a": 1
}
def __getitem__(self, k):
return self.store[k]
def __setitem__(self, k, v):
self.store[k] = v
def __repr__(self):
return repr(self._key)
|
class DummyDoc(object):
def __init__(self, key):
pass
def __getitem__(self, k):
pass
def __setitem__(self, k, v):
pass
def __repr__(self):
pass
| 5 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 4 | 3 | 4 | 4 | 16 | 3 | 13 | 8 | 8 | 0 | 11 | 8 | 6 | 1 | 1 | 0 | 4 |
6,504 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_document_has_field.theCol
|
class theCol(Collection):
_fields = {
'address': {
'street': Field(),
}
}
|
class theCol(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 6 | 0 | 6 | 2 | 5 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
6,505 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_fields_on_set.Col_on_set
|
class Col_on_set(Collection):
_validation = {
"on_save": False,
"on_set": True,
"allow_foreign_fields": False
}
_fields = {
"str": Field(validators=[VAL.Length(50, 51)]),
"notNull": Field(validators=[VAL.NotNull()]),
"nestedStr": {
"str": Field(validators=[VAL.Length(50, 51)])
}
}
|
class Col_on_set(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 14 | 1 | 13 | 3 | 12 | 0 | 3 | 3 | 2 | 0 | 2 | 0 | 0 |
6,506 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_fill_default_on_save.theCol
|
class theCol(Collection):
_fields = {
"name": Field(default="Paper"),
"dct1": {
"num": Field(default=13),
"dct2": {
"str": Field(default='string'),
}
}
}
_validation = {
"on_save": True,
"on_set": True,
"allow_foreign_fields": False
}
|
class theCol(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 16 | 1 | 15 | 3 | 14 | 0 | 3 | 3 | 2 | 0 | 2 | 0 | 0 |
6,507 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_get_edges.Human
|
class Human(Collection):
_fields = {
"number": Field()
}
|
class Human(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
6,508 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_get_edges.Relation
|
class Relation(Edges):
_fields = {
"number": Field()
}
|
class Relation(Edges):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
6,509 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_graph.Friend
|
class Friend(Edges):
_fields = {
"number": Field()
}
|
class Friend(Edges):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
6,510 |
ArangoDB-Community/pyArango
|
ArangoDB-Community_pyArango/pyArango/users.py
|
pyArango.users.Users
|
class Users(object):
"""This one manages users."""
def __init__(self, connection):
self.connection = connection
def getURL(self):
return "%s/user" % (self.connection.getURL())
def createUser(self, username, password):
u = User(self)
u["username"] = username
u["password"] = password
return u
def fetchAllUsers(self, rawResults = False):
"""Returns all available users. if rawResults, the result will be a list of python dicts instead of User objects"""
r = self.connection.session.get(self.getURL())
if r.status_code == 200:
data = r.json()
if rawResults:
return data["result"]
else:
res = []
for resu in data["result"]:
u = User(self, resu)
res.append(u)
return res
else:
raise ConnectionError("Unable to get user list", r.url, r.status_code)
def fetchUser(self, username, rawResults = False):
"""Returns a single user. if rawResults, the result will be a list of python dicts instead of User objects"""
url = "%s/%s" % (self.getURL(), username)
r = self.connection.session.get(url)
if r.status_code == 200:
data = r.json()
if rawResults:
return data["result"]
else:
u = User(self, data)
return u
else:
raise KeyError("Unable to get user: %s" % username)
def __getitem__(self, k):
return self.fetchUser(k)
|
class Users(object):
'''This one manages users.'''
def __init__(self, connection):
pass
def getURL(self):
pass
def createUser(self, username, password):
pass
def fetchAllUsers(self, rawResults = False):
'''Returns all available users. if rawResults, the result will be a list of python dicts instead of User objects'''
pass
def fetchUser(self, username, rawResults = False):
'''Returns a single user. if rawResults, the result will be a list of python dicts instead of User objects'''
pass
def __getitem__(self, k):
pass
| 7 | 3 | 7 | 0 | 6 | 0 | 2 | 0.08 | 1 | 3 | 1 | 0 | 6 | 1 | 6 | 6 | 47 | 6 | 38 | 18 | 31 | 3 | 34 | 18 | 27 | 4 | 1 | 3 | 11 |
6,511 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_graph.MyGraph
|
class MyGraph(Graph):
_edgeDefinitions = (EdgeDefinition("Friend", fromCollections=[
"Humans"], toCollections=["Humans"]), )
_orphanedCollections = []
|
class MyGraph(Graph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 4 | 1 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 2 | 0 | 0 |
6,512 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_save_edge.Human
|
class Human(Collection):
_fields = {
"name": Field()
}
|
class Human(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
6,513 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_save_edge.Relation
|
class Relation(Edges):
_fields = {
"ctype": Field()
}
|
class Relation(Edges):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
6,514 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_to_default.theCol
|
class theCol(Collection):
_fields = {
'address': {
'street': Field(default="Paper street"),
},
"name": Field(default="Tyler Durden")
}
|
class theCol(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 7 | 0 | 7 | 2 | 6 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
6,515 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_traversal.knows
|
class knows(Edges):
_fields = {
"number": Field()
}
|
class knows(Edges):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
6,516 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_traversal.knows_graph
|
class knows_graph(Graph):
_edgeDefinitions = (EdgeDefinition("knows", fromCollections=[
"persons"], toCollections=["persons"]), )
_orphanedCollections = []
|
class knows_graph(Graph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 4 | 1 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 2 | 0 | 0 |
6,517 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_traversal.persons
|
class persons(Collection):
_fields = {
"name": Field()
}
|
class persons(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
6,518 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_unvalidated_nested_fields.Col_on_save
|
class Col_on_save(Collection):
_validation = {
"on_save": True,
"on_set": False,
"allow_foreign_fields": True
}
_fields = {
"str": Field(validators=[String_val()]),
"nestedSomething": Field()
}
|
class Col_on_save(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 11 | 1 | 10 | 3 | 9 | 0 | 3 | 3 | 2 | 0 | 2 | 0 | 0 |
6,519 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_unvalidated_nested_fields.String_val
|
class String_val(VAL.Validator):
def validate(self, value):
if not isinstance(value, bytes) and not isinstance(value, str):
raise ValidationError("Field value must be a string")
return True
|
class String_val(VAL.Validator):
def validate(self, value):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 3 | 1 | 0 | 1 | 0 | 1 | 1 | 6 | 1 | 5 | 2 | 3 | 0 | 5 | 2 | 3 | 2 | 1 | 1 | 2 |
6,520 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_users_credentials.persons
|
class persons(Collection):
pass
|
class persons(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 0 |
6,521 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_validation_default_inlavid_value.keyTest.Col
|
class Col(Collection):
_validation = {
"on_save": "wrong",
}
|
class Col(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
6,522 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/examples/createSocialGraph.py
|
createSocialGraph.Social.male
|
class male(Collection):
_fields = {
"name": Field()
}
|
class male(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
6,523 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/examples/createSocialGraph.py
|
createSocialGraph.Social.female
|
class female(Collection):
_fields = {
"name": Field()
}
|
class female(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
6,524 |
ArangoDB-Community/pyArango
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArangoDB-Community_pyArango/pyArango/tests/tests.py
|
pyArango.tests.tests.pyArangoTests.test_graph.Humans
|
class Humans(Collection):
_fields = {
"name": Field()
}
|
class Humans(Collection):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
6,525 |
ArangoDB-Community/pyArango
|
ArangoDB-Community_pyArango/pyArango/validation.py
|
pyArango.validation.Int
|
class Int(Validator):
"""The value must be an integer"""
def validate(self, value):
if not isinstance(value, int):
raise ValidationError("%s is not a valid integer" % value)
return True
|
class Int(Validator):
'''The value must be an integer'''
def validate(self, value):
pass
| 2 | 1 | 4 | 0 | 4 | 0 | 2 | 0.2 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 4 | 6 | 0 | 5 | 2 | 3 | 1 | 5 | 2 | 3 | 2 | 2 | 1 | 2 |
6,526 |
ArangoDB-Community/pyArango
|
ArangoDB-Community_pyArango/pyArango/validation.py
|
pyArango.validation.Validator
|
class Validator(object):
"""All validators must inherit from this class"""
def __init__(self, *args, **kwrags):
pass
def validate(self, value):
"""The only function that a validator must implement. Must return True if erevything went well or a ValidationError otherwise"""
raise NotImplemented("Should be implemented in child")
def __str__(self):
"""This function should be redifined in child to give a quick overview of the validator"""
return self.__class__.__name__
|
class Validator(object):
'''All validators must inherit from this class'''
def __init__(self, *args, **kwrags):
pass
def validate(self, value):
'''The only function that a validator must implement. Must return True if erevything went well or a ValidationError otherwise'''
pass
def __str__(self):
'''This function should be redifined in child to give a quick overview of the validator'''
pass
| 4 | 3 | 3 | 0 | 2 | 1 | 1 | 0.43 | 1 | 0 | 0 | 9 | 3 | 0 | 3 | 3 | 12 | 2 | 7 | 4 | 3 | 3 | 7 | 4 | 3 | 1 | 1 | 0 | 3 |
6,527 |
ArangoDB-Community/pyArango
|
ArangoDB-Community_pyArango/pyArango/validation.py
|
pyArango.validation.String
|
class String(Validator):
"""The value must be a string or unicode"""
def validate(self, value):
if not isinstance(value, str) and not isinstance(value, unicode):
raise ValidationError("%s is not a valid string" % value)
return True
|
class String(Validator):
'''The value must be a string or unicode'''
def validate(self, value):
pass
| 2 | 1 | 4 | 0 | 4 | 0 | 2 | 0.2 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 4 | 6 | 0 | 5 | 2 | 3 | 1 | 5 | 2 | 3 | 2 | 2 | 1 | 2 |
6,528 |
ArangoDB-Community/pyArango
|
ArangoDB-Community_pyArango/pyArango/validation.py
|
pyArango.validation.Range
|
class Range(Validator):
"""The value must une [lower, upper] range"""
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def validate(self, value):
if value < self.lower or value > self.upper:
raise ValidationError("%s is not in [%s, %s]" % (value, self.lower, self.upper))
def __str__(self):
return "%s[%s, %s]" % (self.__class__.__name__, self.minLen, self.maxLen)
|
class Range(Validator):
'''The value must une [lower, upper] range'''
def __init__(self, lower, upper):
pass
def validate(self, value):
pass
def __str__(self):
pass
| 4 | 1 | 3 | 0 | 3 | 0 | 1 | 0.11 | 1 | 1 | 1 | 0 | 3 | 2 | 3 | 6 | 12 | 2 | 9 | 6 | 5 | 1 | 9 | 6 | 5 | 2 | 2 | 1 | 4 |
6,529 |
ArangoDB-Community/pyArango
|
ArangoDB-Community_pyArango/pyArango/validation.py
|
pyArango.validation.Numeric
|
class Numeric(Validator):
"""checks if the value is numerical"""
def validate(self, value):
try:
float(value)
except:
raise ValidationError("%s is not valid numerical value" % value)
return True
|
class Numeric(Validator):
'''checks if the value is numerical'''
def validate(self, value):
pass
| 2 | 1 | 6 | 0 | 6 | 0 | 2 | 0.14 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 4 | 8 | 0 | 7 | 2 | 5 | 1 | 7 | 2 | 5 | 2 | 2 | 1 | 2 |
6,530 |
ArangoDB-Community/pyArango
|
ArangoDB-Community_pyArango/pyArango/validation.py
|
pyArango.validation.NotNull
|
class NotNull(Validator):
"""Checks that the Field has a non null value. False is not considered a Null Value"""
def __init__(self, reject_zero=False, reject_empty_string=True):
self.reject_zero = reject_zero
self.reject_empty_string = reject_empty_string
def validate(self, value):
if value is None or ( (value == 0 and type(value) != bool ) and self.reject_zero) or (value == "" and self.reject_empty_string):
raise ValidationError("Field can't have a null value, got: '%s'" % value)
return True
|
class NotNull(Validator):
'''Checks that the Field has a non null value. False is not considered a Null Value'''
def __init__(self, reject_zero=False, reject_empty_string=True):
pass
def validate(self, value):
pass
| 3 | 1 | 4 | 0 | 4 | 0 | 2 | 0.13 | 1 | 3 | 1 | 0 | 2 | 2 | 2 | 5 | 10 | 1 | 8 | 5 | 5 | 1 | 8 | 5 | 5 | 2 | 2 | 1 | 3 |
6,531 |
ArangoDB-Community/pyArango
|
ArangoDB-Community_pyArango/pyArango/validation.py
|
pyArango.validation.Email
|
class Email(Validator):
"""Checks if the field contains an emailaddress"""
def validate(self, value):
import re
pat = '^[A-z0-9._-]+@[A-z0-9.-]+\.[A-z]{2,4}$'
if re.match(pat, value) is None:
raise ValidationError("The email address: %s is invalid" % value)
return True
|
class Email(Validator):
'''Checks if the field contains an emailaddress'''
def validate(self, value):
pass
| 2 | 1 | 6 | 0 | 6 | 0 | 2 | 0.14 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 4 | 8 | 0 | 7 | 4 | 4 | 1 | 7 | 4 | 4 | 2 | 2 | 1 | 2 |
6,532 |
ArangoDB-Community/pyArango
|
ArangoDB-Community_pyArango/pyArango/validation.py
|
pyArango.validation.Enumeration
|
class Enumeration(Validator):
"""The value must be in the allowed ones"""
def __init__(self, allowed):
self.allowed = set(allowed)
def validate(self, value):
if value not in self.allowed:
raise ValidationError("%s is not among the allowed values %s" % (value, self.allowed))
return True
|
class Enumeration(Validator):
'''The value must be in the allowed ones'''
def __init__(self, allowed):
pass
def validate(self, value):
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 2 | 0.14 | 1 | 2 | 1 | 0 | 2 | 1 | 2 | 5 | 9 | 1 | 7 | 4 | 4 | 1 | 7 | 4 | 4 | 2 | 2 | 1 | 3 |
6,533 |
ArangoDB-Community/pyArango
|
ArangoDB-Community_pyArango/pyArango/validation.py
|
pyArango.validation.Length
|
class Length(Validator):
"""validates that the value length is between given bounds"""
def __init__(self, minLen, maxLen):
self.minLen = minLen
self.maxLen = maxLen
def validate(self, value):
try:
length = len(value)
except:
raise ValidationError("Field '%s' of type '%s' has no length" % (value, type(value)))
if self.minLen <= len(value) and len(value) <= self.maxLen:
return True
raise ValidationError("Field must have a length in ['%s';'%s'] got: '%s'" % (self.minLen, self.maxLen, len(value)))
def __str__(self):
return "%s[%s, %s]" % (self.__class__.__name__, self.minLen, self.maxLen)
|
class Length(Validator):
'''validates that the value length is between given bounds'''
def __init__(self, minLen, maxLen):
pass
def validate(self, value):
pass
def __str__(self):
pass
| 4 | 1 | 5 | 0 | 4 | 0 | 2 | 0.07 | 1 | 2 | 1 | 0 | 3 | 2 | 3 | 6 | 18 | 3 | 14 | 7 | 10 | 1 | 14 | 7 | 10 | 3 | 2 | 1 | 5 |
6,534 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/testing/integration/sample_user_scripts/extensive.plugin.py
|
extensive.plugin.Plugin
|
class Plugin(WpullPlugin):
def __init__(self):
super().__init__()
self.counter = 0
self.injected_url_found = False
self.got_redirected_page = False
def activate(self):
super().activate()
self.app_session.factory['PipelineSeries'].concurrency = 2
@hook(PluginFunctions.resolve_dns)
def resolve_dns(self, host: str):
print('resolve_dns', host)
assert host == 'localhost'
return '127.0.0.1'
@hook(PluginFunctions.accept_url)
def accept_url(self, item_session: ItemSession, verdict: bool, reasons: dict):
url_info = item_session.request.url_info
url_record = item_session.url_record
print('accept_url', url_info)
assert url_info
if 'mailto:' in url_info.url:
assert not verdict
assert not reasons['filters']['SchemeFilter']
else:
assert url_info.path in (
'/robots.txt', '/', '/post/',
'/%95%B6%8E%9A%89%BB%82%AF/',
'/static/style.css', '/wolf',
'/some_page', '/some_page/',
'/mordor',
)
assert reasons['filters']['SchemeFilter']
for name, passed in reasons['filters'].items():
assert name
if url_info.path == '/':
assert not url_record.inline_level
assert verdict
elif url_info.path == '/post/':
assert not verdict
verdict = True
elif url_info.path == '/static/style.css':
assert url_record.inline_level
elif url_info.path == '/robots.txt':
verdict = False
return verdict
@event(PluginFunctions.queued_url)
def queued_url(self, url_info: URLInfo):
print('queued_url', url_info)
assert url_info.url
self.counter += 1
assert self.counter > 0
@event(PluginFunctions.dequeued_url)
def dequeued_url(self, url_info: URLInfo, record_info: URLRecord):
print('dequeued_url', url_info)
assert url_info.url
assert record_info.url
self.counter -= 1
assert self.counter >= 0
@hook(PluginFunctions.handle_pre_response)
def handle_pre_response(self, item_session: ItemSession):
if item_session.request.url_info.path == '/mordor':
return Actions.FINISH
return Actions.NORMAL
@hook(PluginFunctions.handle_response)
def handle_response(self, item_session: ItemSession):
url_info = item_session.request.url_info
print('handle_response', url_info)
assert isinstance(item_session.response, HTTPResponse)
response = cast(HTTPResponse, item_session.response)
if url_info.path == '/':
assert response.body.size
assert response.status_code == 200
elif url_info.path == '/post/':
assert response.status_code == 200
self.injected_url_found = True
return Actions.FINISH
elif url_info.path == '/some_page/':
self.got_redirected_page = True
return Actions.NORMAL
@hook(PluginFunctions.handle_error)
def handle_error(self, item_session: ItemSession, error: BaseException):
print('handle_response', item_session.request.url, error)
return Actions.NORMAL
@event(PluginFunctions.get_urls)
def get_urls(self, item_session: ItemSession):
filename = item_session.response.body.name
url_info = item_session.request.url_info
print('get_urls', filename)
assert filename
assert os.path.isfile(filename)
assert url_info.url
if url_info.path == '/':
item_session.add_child_url(
'http://localhost:' + str(url_info.port) + '/post/',
inline=True,
post_data='text=hello',
replace=True
)
item_session.add_child_url('..malformed')
@hook(PluginFunctions.wait_time)
def wait_time(self, seconds: float, item_session: ItemSession, error: Optional[Exception] = None):
assert seconds >= 0
return 0
@event(PluginFunctions.finishing_statistics)
def finish_statistics(self, app_session: AppSession, statistics: Statistics):
print('finish_statistics', statistics.start_time)
assert statistics.start_time
assert statistics.stop_time
print('queue counter', self.counter)
assert self.counter == 0
@hook(PluginFunctions.exit_status)
def exit_status(self, app_session: AppSession, exit_code: int):
assert exit_code == 4
assert self.injected_url_found
assert self.got_redirected_page
print('exit_status', exit_code)
return 42
|
class Plugin(WpullPlugin):
def __init__(self):
pass
def activate(self):
pass
@hook(PluginFunctions.resolve_dns)
def resolve_dns(self, host: str):
pass
@hook(PluginFunctions.accept_url)
def accept_url(self, item_session: ItemSession, verdict: bool, reasons: dict):
pass
@event(PluginFunctions.queued_url)
def queued_url(self, url_info: URLInfo):
pass
@event(PluginFunctions.dequeued_url)
def dequeued_url(self, url_info: URLInfo, record_info: URLRecord):
pass
@hook(PluginFunctions.handle_pre_response)
def handle_pre_response(self, item_session: ItemSession):
pass
@hook(PluginFunctions.handle_response)
def handle_response(self, item_session: ItemSession):
pass
@hook(PluginFunctions.handle_error)
def handle_error(self, item_session: ItemSession, error: BaseException):
pass
@event(PluginFunctions.get_urls)
def get_urls(self, item_session: ItemSession):
pass
@hook(PluginFunctions.wait_time)
def wait_time(self, seconds: float, item_session: ItemSession, error: Optional[Exception] = None):
pass
@event(PluginFunctions.finishing_statistics)
def finish_statistics(self, app_session: AppSession, statistics: Statistics):
pass
@hook(PluginFunctions.exit_status)
def exit_status(self, app_session: AppSession, exit_code: int):
pass
| 25 | 0 | 9 | 1 | 8 | 0 | 2 | 0 | 1 | 14 | 6 | 0 | 13 | 3 | 13 | 16 | 143 | 26 | 117 | 35 | 92 | 0 | 89 | 24 | 75 | 7 | 2 | 1 | 24 |
6,535 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.IgnoreFileWriter
|
class IgnoreFileWriter(BaseFileWriter):
'''File writer that ignores files that already exist.'''
@property
def session_class(self):
return IgnoreFileWriterSession
|
class IgnoreFileWriter(BaseFileWriter):
'''File writer that ignores files that already exist.'''
@property
def session_class(self):
pass
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 25 | 5 | 0 | 4 | 3 | 1 | 1 | 3 | 2 | 1 | 1 | 5 | 0 | 1 |
6,536 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.IgnoreFileWriterSession
|
class IgnoreFileWriterSession(BaseFileWriterSession):
def process_request(self, request):
if not self._filename or not os.path.exists(self._filename):
return super().process_request(request)
|
class IgnoreFileWriterSession(BaseFileWriterSession):
def process_request(self, request):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 43 | 4 | 0 | 4 | 2 | 2 | 0 | 4 | 2 | 2 | 2 | 5 | 1 | 2 |
6,537 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.MuxBody
|
class MuxBody(Body):
'''Writes data into a second file.'''
def __init__(self, stream: BinaryIO, **kwargs):
super().__init__(**kwargs)
self._stream = stream
def write(self, data: bytes) -> int:
self._stream.write(data)
return super().__getattr__('write')(data)
def writelines(self, lines):
for line in lines:
self._stream.write(line)
return super().__getattr__('writelines')(lines)
def flush(self):
self._stream.flush()
return super().__getattr__('flush')()
def close(self):
self._stream.close()
return super().__getattr__('close')()
|
class MuxBody(Body):
'''Writes data into a second file.'''
def __init__(self, stream: BinaryIO, **kwargs):
pass
def write(self, data: bytes) -> int:
pass
def writelines(self, lines):
pass
def flush(self):
pass
def close(self):
pass
| 6 | 1 | 3 | 0 | 3 | 0 | 1 | 0.06 | 1 | 4 | 0 | 0 | 5 | 1 | 5 | 11 | 22 | 4 | 17 | 8 | 11 | 1 | 17 | 8 | 11 | 2 | 2 | 1 | 6 |
6,538 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.NullWriter
|
class NullWriter(BaseWriter):
'''File writer that doesn't write files.'''
def session(self) -> NullWriterSession:
return NullWriterSession()
|
class NullWriter(BaseWriter):
'''File writer that doesn't write files.'''
def session(self) -> NullWriterSession:
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 22 | 4 | 0 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 4 | 0 | 1 |
6,539 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.NullWriterSession
|
class NullWriterSession(BaseWriterSession):
def process_request(self, request):
return request
def process_response(self, response):
return response
def discard_document(self, response):
pass
def save_document(self, response):
pass
def extra_resource_path(self, suffix):
pass
|
class NullWriterSession(BaseWriterSession):
def process_request(self, request):
pass
def process_response(self, response):
pass
def discard_document(self, response):
pass
def save_document(self, response):
pass
def extra_resource_path(self, suffix):
pass
| 6 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 30 | 15 | 4 | 11 | 6 | 5 | 0 | 11 | 6 | 5 | 1 | 4 | 0 | 5 |
6,540 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.BaseFileWriterSession
|
class BaseFileWriterSession(BaseWriterSession):
'''Base class for File Writer Sessions.'''
def __init__(self, path_namer: PathNamer,
file_continuing: bool,
headers_included: bool,
local_timestamping: bool,
adjust_extension: bool,
content_disposition: bool,
trust_server_names: bool):
self._path_namer = path_namer
self._file_continuing = file_continuing
self._headers_included = headers_included
self._local_timestamping = local_timestamping
self._adjust_extension = adjust_extension
self._content_disposition = content_disposition
self._trust_server_names = trust_server_names
self._filename = None
self._file_continue_requested = False
@classmethod
def open_file(cls, filename: str, response: BaseResponse, mode='wb+'):
'''Open a file object on to the Response Body.
Args:
filename: The path where the file is to be saved
response: Response
mode: The file mode
This function will create the directories if not exist.
'''
_logger.debug('Saving file to {0}, mode={1}.',
filename, mode)
dir_path = os.path.dirname(filename)
if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
response.body = Body(open(filename, mode))
@classmethod
def set_timestamp(cls, filename: str, response: HTTPResponse):
'''Set the Last-Modified timestamp onto the given file.
Args:
filename: The path of the file
response: Response
'''
last_modified = response.fields.get('Last-Modified')
if not last_modified:
return
try:
last_modified = email.utils.parsedate(last_modified)
except ValueError:
_logger.exception('Failed to parse date.')
return
last_modified = time.mktime(last_modified)
os.utime(filename, (time.time(), last_modified))
@classmethod
def save_headers(cls, filename: str, response: HTTPResponse):
'''Prepend the HTTP response header to the file.
Args:
filename: The path of the file
response: Response
'''
new_filename = filename + '-new'
with open('wb') as new_file:
new_file.write(response.header())
with wpull.util.reset_file_offset(response.body):
response.body.seek(0)
shutil.copyfileobj(response.body, new_file)
os.remove(filename)
os.rename(new_filename, filename)
def process_request(self, request: BaseRequest):
if not self._filename:
self._filename = self._compute_filename(request)
if self._file_continuing and self._filename:
self._process_file_continue_request(request)
return request
def _compute_filename(self, request: BaseRequest):
'''Get the appropriate filename from the request.'''
path = self._path_namer.get_filename(request.url_info)
if os.path.isdir(path):
path += '.f'
else:
dir_name, name = os.path.split(path)
path = os.path.join(anti_clobber_dir_path(dir_name), name)
return path
def _process_file_continue_request(self, request: BaseRequest):
'''Modify the request to resume downloading file.'''
if os.path.exists(self._filename):
size = os.path.getsize(self._filename)
request.set_continue(size)
self._file_continue_requested = True
_logger.debug('Continue file from {0}.', size)
else:
_logger.debug('No file to continue.')
def process_response(self, response: BaseResponse):
if not self._filename:
return
if response.request.url_info.scheme == 'ftp':
response = cast(FTPResponse, response)
if self._file_continue_requested:
self._process_file_continue_ftp_response(response)
else:
self.open_file(self._filename, response)
else:
response = cast(HTTPResponse, response)
code = response.status_code
if self._file_continue_requested:
self._process_file_continue_response(response)
elif 200 <= code <= 299 or 400 <= code:
if self._trust_server_names:
self._rename_with_last_response(response)
if self._content_disposition:
self._rename_with_content_disposition(response)
if self._adjust_extension:
self._append_filename_extension(response)
self.open_file(self._filename, response)
def _process_file_continue_response(self, response: HTTPResponse):
'''Process a partial content response.'''
code = response.status_code
if code == http.client.PARTIAL_CONTENT:
self.open_file(self._filename, response, mode='ab+')
else:
self._raise_cannot_continue_error()
def _process_file_continue_ftp_response(self, response: FTPResponse):
'''Process a restarted content response.'''
if response.request.restart_value and response.restart_value:
self.open_file(self._filename, response, mode='ab+')
else:
self._raise_cannot_continue_error()
def _raise_cannot_continue_error(self):
'''Raise an error when server cannot continue a file.'''
# XXX: I cannot find where wget refuses to resume a file
# when the server does not support range requests. Wget has
# enums that appear to define this case, it is checked throughout
# the code, but the HTTP function doesn't even use them.
# FIXME: unit test is needed for this case
raise IOError(
_('Server not able to continue file download: {filename}.')
.format(filename=self._filename))
def _append_filename_extension(self, response: BaseResponse):
'''Append an HTML/CSS file suffix as needed.'''
if not self._filename:
return
if response.request.url_info.scheme not in ('http', 'https'):
return
if not re.search(r'\.[hH][tT][mM][lL]?$', self._filename) and \
HTMLReader.is_response(response):
self._filename += '.html'
elif not re.search(r'\.[cC][sS][sS]$', self._filename) and \
CSSReader.is_response(response):
self._filename += '.css'
def _rename_with_content_disposition(self, response: HTTPResponse):
'''Rename using the Content-Disposition header.'''
if not self._filename:
return
if response.request.url_info.scheme not in ('http', 'https'):
return
header_value = response.fields.get('Content-Disposition')
if not header_value:
return
filename = parse_content_disposition(header_value)
if filename:
dir_path = os.path.dirname(self._filename)
new_filename = self._path_namer.safe_filename(filename)
self._filename = os.path.join(dir_path, new_filename)
def _rename_with_last_response(self, response):
if not self._filename:
return
if response.request.url_info.scheme not in ('http', 'https'):
return
self._filename = self._compute_filename(response.request)
def save_document(self, response: BaseResponse):
if self._filename and os.path.exists(self._filename):
if self._headers_included:
self.save_headers(self._filename, response)
if self._local_timestamping and \
response.request.url_info.scheme in ('http', 'https'):
self.set_timestamp(self._filename, cast(HTTPResponse, response))
return self._filename
def discard_document(self, response: BaseResponse):
if self._filename and os.path.exists(self._filename):
os.remove(self._filename)
def extra_resource_path(self, suffix: str) -> str:
if self._filename:
return self._filename + suffix
|
class BaseFileWriterSession(BaseWriterSession):
'''Base class for File Writer Sessions.'''
def __init__(self, path_namer: PathNamer,
file_continuing: bool,
headers_included: bool,
local_timestamping: bool,
adjust_extension: bool,
content_disposition: bool,
trust_server_names: bool):
pass
@classmethod
def open_file(cls, filename: str, response: BaseResponse, mode='wb+'):
'''Open a file object on to the Response Body.
Args:
filename: The path where the file is to be saved
response: Response
mode: The file mode
This function will create the directories if not exist.
'''
pass
@classmethod
def set_timestamp(cls, filename: str, response: HTTPResponse):
'''Set the Last-Modified timestamp onto the given file.
Args:
filename: The path of the file
response: Response
'''
pass
@classmethod
def save_headers(cls, filename: str, response: HTTPResponse):
'''Prepend the HTTP response header to the file.
Args:
filename: The path of the file
response: Response
'''
pass
def process_request(self, request: BaseRequest):
pass
def _compute_filename(self, request: BaseRequest):
'''Get the appropriate filename from the request.'''
pass
def _process_file_continue_request(self, request: BaseRequest):
'''Modify the request to resume downloading file.'''
pass
def process_response(self, response: BaseResponse):
pass
def _process_file_continue_response(self, response: HTTPResponse):
'''Process a partial content response.'''
pass
def _process_file_continue_ftp_response(self, response: FTPResponse):
'''Process a restarted content response.'''
pass
def _raise_cannot_continue_error(self):
'''Raise an error when server cannot continue a file.'''
pass
def _append_filename_extension(self, response: BaseResponse):
'''Append an HTML/CSS file suffix as needed.'''
pass
def _rename_with_content_disposition(self, response: HTTPResponse):
'''Rename using the Content-Disposition header.'''
pass
def _rename_with_last_response(self, response):
pass
def save_document(self, response: BaseResponse):
pass
def discard_document(self, response: BaseResponse):
pass
def extra_resource_path(self, suffix: str) -> str:
pass
| 21 | 11 | 12 | 2 | 9 | 2 | 3 | 0.2 | 1 | 9 | 6 | 4 | 14 | 9 | 17 | 42 | 231 | 51 | 150 | 49 | 123 | 30 | 127 | 39 | 109 | 9 | 4 | 3 | 49 |
6,541 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.AntiClobberFileWriterSession
|
class AntiClobberFileWriterSession(BaseFileWriterSession):
def _compute_filename(self, request: BaseRequest):
original_filename = self._path_namer.get_filename(request.url_info)
dir_name, filename = os.path.split(original_filename)
original_filename = os.path.join(
anti_clobber_dir_path(dir_name), filename
)
candidate_filename = original_filename
for suffix in itertools.count():
if suffix:
candidate_filename = '{0}.{1}'.format(original_filename,
suffix)
if not os.path.exists(candidate_filename):
return candidate_filename
|
class AntiClobberFileWriterSession(BaseFileWriterSession):
def _compute_filename(self, request: BaseRequest):
pass
| 2 | 0 | 15 | 2 | 13 | 0 | 4 | 0 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 43 | 16 | 2 | 14 | 6 | 12 | 0 | 11 | 6 | 9 | 4 | 5 | 2 | 4 |
6,542 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.AntiClobberFileWriter
|
class AntiClobberFileWriter(BaseFileWriter):
'''File writer that downloads to a new filename if the original exists.'''
@property
def session_class(self):
return AntiClobberFileWriterSession
|
class AntiClobberFileWriter(BaseFileWriter):
'''File writer that downloads to a new filename if the original exists.'''
@property
def session_class(self):
pass
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 25 | 5 | 0 | 4 | 3 | 1 | 1 | 3 | 2 | 1 | 1 | 5 | 0 | 1 |
6,543 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/warc/recorder_test.py
|
wpull.warc.recorder_test.TestWARC
|
class TestWARC(unittest.TestCase, TempDirMixin):
def setUp(self):
self.set_up_temp_dir()
def tearDown(self):
self.tear_down_temp_dir()
def validate_warc(self, filename, ignore_minor_error=False):
proc = subprocess.Popen(
[sys.executable, '-m', 'warcat', 'verify', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout_data, stderr_data = proc.communicate()
output = stderr_data + stdout_data
output = output.decode('utf8', 'replace')
if not proc.returncode:
return
if not ignore_minor_error:
raise Exception('Validation failed {}'.format(output))
else:
if re.search(r'(VerifyProblem:.+ True\))|(.+Error:)', output):
raise Exception('Validation failed\n{}'.format(output))
def test_warc_recorder(self):
file_prefix = 'asdf'
warc_filename = 'asdf.warc'
cdx_filename = 'asdf.cdx'
warc_recorder = WARCRecorder(
file_prefix,
params=WARCRecorderParams(
compress=False,
extra_fields=[('Extra-field', 'my_extra_field')],
cdx=True,
),
)
request = HTTPRequest('http://example.com/')
request.prepare_for_send()
request.address = ('0.0.0.0', 80)
request.prepare_for_send()
response = HTTPResponse(200, 'OK')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
response.body.write(b'KITTEH DOGE')
session = warc_recorder.new_http_recorder_session()
session.begin_request(request)
session.request_data(request.to_bytes())
session.end_request(request)
session.begin_response(response)
session.response_data(response.to_bytes())
session.response_data(response.body.content())
session.end_response(response)
session.close()
_logger.info('FINISHED')
warc_recorder.close()
with open(warc_filename, 'rb') as in_file:
warc_file_content = in_file.read()
with open(cdx_filename, 'rb') as in_file:
cdx_file_content = in_file.read()
self.assertTrue(warc_file_content.startswith(b'WARC/1.0'))
self.assertIn(b'WARC-Type: warcinfo\r\n', warc_file_content)
self.assertIn(b'Content-Type: application/warc-fields',
warc_file_content)
self.assertIn(b'WARC-Date: ', warc_file_content)
self.assertIn(b'WARC-Record-ID: <urn:uuid:', warc_file_content)
self.assertIn(b'WARC-Block-Digest: sha1:', warc_file_content)
self.assertIn(b'WARC-Payload-Digest: sha1:', warc_file_content)
self.assertIn(b'WARC-Type: request\r\n', warc_file_content)
self.assertIn(b'WARC-Target-URI: http://', warc_file_content)
self.assertIn(b'Content-Type: application/http;msgtype=request',
warc_file_content)
self.assertIn(b'WARC-Type: response', warc_file_content)
self.assertIn(b'WARC-Concurrent-To: <urn:uuid:', warc_file_content)
self.assertIn(b'Content-Type: application/http;msgtype=response',
warc_file_content)
self.assertIn(
'Wpull/{0}'.format(wpull.version.__version__).encode('utf-8'),
warc_file_content
)
self.assertIn(
'Python/{0}'.format(
wpull.util.python_version()).encode('utf-8'),
warc_file_content
)
self.assertIn(b'Extra-Field: my_extra_field', warc_file_content)
self.assertIn(b'GET / HTTP', warc_file_content)
self.assertIn(b'KITTEH DOGE', warc_file_content)
self.assertIn(b'FINISHED', warc_file_content)
self.assertIn(b'WARC-Target-URI: urn:X-wpull:log', warc_file_content)
self.assertIn(b'Content-Length:', warc_file_content)
self.assertNotIn(b'Content-Length: 0', warc_file_content)
cdx_lines = cdx_file_content.split(b'\n')
cdx_labels = cdx_lines[0].strip().split(b' ')
cdx_fields = cdx_lines[1].split(b' ')
print(cdx_lines)
self.assertEqual(3, len(cdx_lines))
self.assertEqual(10, len(cdx_labels))
self.assertEqual(9, len(cdx_fields))
self.assertTrue(cdx_lines[0].startswith(b' CDX'))
self.assertEqual(b'http://example.com/', cdx_fields[0])
self.assertEqual(b'-', cdx_fields[2])
self.assertEqual(b'200', cdx_fields[3])
self.assertNotEqual(b'-', cdx_fields[4])
self.assertNotEqual(b'0', cdx_fields[5])
self.assertNotEqual(b'0', cdx_fields[6])
self.assertEqual(
os.path.basename(warc_filename), cdx_fields[7].decode('ascii'))
length = int(cdx_fields[5])
offset = int(cdx_fields[6])
with open(warc_filename, 'rb') as in_file:
in_file.seek(offset)
data = in_file.read(length)
assert len(data) == length
self.assertEqual(b'WARC/1.0', data[:8])
self.assertIn(b'KITTEH DOGE', data)
self.validate_warc(warc_filename)
def test_warc_recorder_ftp(self):
file_prefix = 'asdf'
warc_filename = 'asdf.warc'
warc_recorder = WARCRecorder(
file_prefix,
params=WARCRecorderParams(compress=False)
)
request = FTPRequest('ftp://example.com/example.txt')
request.address = ('0.0.0.0', 80)
response = FTPResponse()
response.reply = FTPReply(200, 'OK')
response.body = Body()
response.data_address = ('0.0.0.0', 12345)
with wpull.util.reset_file_offset(response.body):
response.body.write(b'KITTEH DOGE')
session = warc_recorder.new_ftp_recorder_session()
session.begin_control(request)
session.control_send_data(b'GIMMEH example.txt')
session.control_receive_data(b'200 OK, no need to yell.')
session.begin_transfer(response)
session.transfer_receive_data(b'KITTEH DOGE')
session.end_transfer(response)
session.end_control(response)
session.close()
warc_recorder.close()
with open(warc_filename, 'rb') as in_file:
warc_file_content = in_file.read()
self.assertTrue(warc_file_content.startswith(b'WARC/1.0'))
self.assertIn(b'WARC-Type: warcinfo\r\n', warc_file_content)
self.assertIn(b'Content-Type: application/warc-fields',
warc_file_content)
self.assertIn(b'WARC-Date: ', warc_file_content)
self.assertIn(b'WARC-Record-ID: <urn:uuid:', warc_file_content)
self.assertIn(b'WARC-Block-Digest: sha1:', warc_file_content)
self.assertNotIn(b'WARC-Payload-Digest: sha1:', warc_file_content)
self.assertIn(b'WARC-Type: resource\r\n', warc_file_content)
self.assertIn(b'WARC-Target-URI: ftp://', warc_file_content)
self.assertIn(b'Content-Type: application/octet-stream',
warc_file_content)
self.assertIn(b'WARC-Type: metadata', warc_file_content)
self.assertIn(b'WARC-Concurrent-To: <urn:uuid:', warc_file_content)
self.assertIn(b'Content-Type: text/x-ftp-control-conversation',
warc_file_content)
self.assertIn(
'Wpull/{0}'.format(wpull.version.__version__).encode('utf-8'),
warc_file_content
)
self.assertIn(
'Python/{0}'.format(
wpull.util.python_version()).encode('utf-8'),
warc_file_content
)
self.assertIn(b'KITTEH DOGE', warc_file_content)
self.assertIn(b'* Opening control connection to', warc_file_content)
self.assertIn(b'* Kept control connection to', warc_file_content)
self.assertIn(b'* Opened data connection to ', warc_file_content)
self.assertIn(b'* Closed data connection to ', warc_file_content)
self.assertIn(b'> GIMMEH example.txt', warc_file_content)
self.assertIn(b'< 200 OK, no need to yell.', warc_file_content)
# Ignore Concurrent Record ID not seen yet
self.validate_warc(warc_filename, ignore_minor_error=True)
with open(warc_filename, 'r+b') as in_file:
# Intentionally modify the contents
in_file.seek(355)
in_file.write(b'f')
with self.assertRaises(Exception):
# Sanity check that it actually raises error on bad digest
self.validate_warc(warc_filename, ignore_minor_error=True)
def test_warc_recorder_max_size(self):
file_prefix = 'asdf'
cdx_filename = 'asdf.cdx'
warc_recorder = WARCRecorder(
file_prefix,
params=WARCRecorderParams(
compress=False,
extra_fields=[('Extra-field', 'my_extra_field')],
cdx=True, max_size=1,
)
)
request = HTTPRequest('http://example.com/1')
request.address = ('0.0.0.0', 80)
response = HTTPResponse(200, 'OK')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
response.body.write(b'KITTEH DOGE')
session = warc_recorder.new_http_recorder_session()
session.begin_request(request)
session.request_data(request.to_bytes())
session.end_request(request)
session.begin_response(response)
session.response_data(response.to_bytes())
session.response_data(response.body.content())
session.end_response(response)
session.close()
request = HTTPRequest('http://example.com/2')
request.address = ('0.0.0.0', 80)
response = HTTPResponse(200, 'OK')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
response.body.write(b'DOGE KITTEH')
session = warc_recorder.new_http_recorder_session()
session.begin_request(request)
session.request_data(request.to_bytes())
session.end_request(request)
session.begin_response(response)
session.response_data(response.to_bytes())
session.response_data(response.body.content())
session.end_response(response)
session.close()
_logger.info('FINISHED')
warc_recorder.close()
with open('asdf-00000.warc', 'rb') as in_file:
warc_file_content = in_file.read()
self.assertTrue(warc_file_content.startswith(b'WARC/1.0'))
self.assertIn(b'WARC-Type: warcinfo', warc_file_content)
self.assertIn(b'KITTEH DOGE', warc_file_content)
with open('asdf-00001.warc', 'rb') as in_file:
warc_file_content = in_file.read()
self.assertTrue(warc_file_content.startswith(b'WARC/1.0'))
self.assertIn(b'WARC-Type: warcinfo', warc_file_content)
self.assertIn(b'DOGE KITTEH', warc_file_content)
with open(cdx_filename, 'rb') as in_file:
cdx_file_content = in_file.read()
cdx_lines = cdx_file_content.split(b'\n')
cdx_labels = cdx_lines[0].strip().split(b' ')
print(cdx_lines)
self.assertEqual(4, len(cdx_lines))
self.assertEqual(10, len(cdx_labels))
self.assertIn(b'http://example.com/1', cdx_file_content)
self.assertIn(b'http://example.com/2', cdx_file_content)
with open('asdf-meta.warc', 'rb') as in_file:
meta_file_content = in_file.read()
self.assertIn(b'FINISHED', meta_file_content)
self.validate_warc('asdf-00000.warc')
self.validate_warc('asdf-00001.warc')
self.validate_warc('asdf-meta.warc')
def test_warc_recorder_rollback(self):
warc_filename = 'asdf.warc'
warc_prefix = 'asdf'
with open(warc_filename, 'wb') as warc_file:
warc_file.write(b'a' * 10)
warc_recorder = WARCRecorder(
warc_prefix,
params=WARCRecorderParams(
compress=False,
)
)
request = HTTPRequest('http://example.com/')
request.address = ('0.0.0.0', 80)
response = HTTPResponse(200, 'OK')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
response.body.write(b'KITTEH DOGE')
session = warc_recorder.new_http_recorder_session()
session.begin_request(request)
session.request_data(request.to_bytes())
class BadRecord(WARCRecord):
def __init__(self, original_record):
super().__init__()
self.block_file = original_record.block_file
self.fields = original_record.fields
def __iter__(self):
for dummy in range(1000):
yield b"where's my elephant?"
raise OSError('Oops')
session._request_record = BadRecord(session._request_record)
original_offset = os.path.getsize(warc_filename)
with self.assertRaises((OSError, IOError)):
session.end_request(request)
new_offset = os.path.getsize(warc_filename)
self.assertEqual(new_offset, original_offset)
self.assertFalse(os.path.exists(warc_filename + '-wpullinc'))
_logger.debug('original offset {0}'.format(original_offset))
def test_warc_recorder_journal(self):
warc_filename = 'asdf.warc'
warc_prefix = 'asdf'
warc_recorder = WARCRecorder(
warc_prefix,
params=WARCRecorderParams(
compress=False,
)
)
request = HTTPRequest('http://example.com/')
request.address = ('0.0.0.0', 80)
response = HTTPResponse(200, 'OK')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
response.body.write(b'KITTEH DOGE')
session = warc_recorder.new_http_recorder_session()
session.begin_request(request)
session.request_data(request.to_bytes())
test_instance = self
class MockRecord(WARCRecord):
def __init__(self, original_record):
super().__init__()
self.block_file = original_record.block_file
self.fields = original_record.fields
def __iter__(self):
print(list(os.walk('.')))
test_instance.assertTrue(
os.path.exists(warc_filename + '-wpullinc')
)
for dummy in range(1000):
yield b"where's my elephant?"
session._request_record = MockRecord(session._request_record)
session.end_request(request)
self.assertFalse(os.path.exists(warc_filename + '-wpullinc'))
def test_warc_recorder_journal_raise_error(self):
warc_filename = 'asdf.warc'
warc_prefix = 'asdf'
with open(warc_filename + '-wpullinc', 'w'):
pass
with self.assertRaises(OSError):
WARCRecorder(
warc_prefix,
params=WARCRecorderParams(
compress=False,
)
)
def test_cdx_dedup(self):
url_table = URLTable()
warc_recorder = WARCRecorder(
'asdf',
params=WARCRecorderParams(
compress=False, cdx=True, url_table=url_table
)
)
url_table.add_visits([
(
'http://example.com/fennec',
'<urn:uuid:8a534d31-bd06-4056-8a0f-bdc5fd611036>',
'B62D734VFEKIDLFAB7TTSCSZF64BKAYJ'
)
])
request = HTTPRequest('http://example.com/fennec')
request.address = ('0.0.0.0', 80)
response = HTTPResponse(200, 'OK')
response.body = Body()
revisit_response_header_size = len(response.to_bytes())
with wpull.util.reset_file_offset(response.body):
response.body.write(b'kitbit')
session = warc_recorder.new_http_recorder_session()
session.begin_request(request)
session.request_data(request.to_bytes())
session.end_request(request)
session.begin_response(response)
session.response_data(response.to_bytes())
session.response_data(response.body.content())
session.end_response(response)
session.close()
request = HTTPRequest('http://example.com/horse')
request.address = ('0.0.0.0', 80)
response = HTTPResponse(200, 'OKaaaaaaaaaaaaaaaaaaaaaaaaaa')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
response.body.write(b'kitbit')
session = warc_recorder.new_http_recorder_session()
session.begin_request(request)
session.request_data(request.to_bytes())
session.end_request(request)
session.begin_response(response)
session.response_data(response.to_bytes())
session.response_data(response.body.content())
session.end_response(response)
session.close()
_logger.info('FINISHED')
warc_recorder.close()
with open('asdf.warc', 'rb') as in_file:
warc_file_content = in_file.read()
with open('asdf.cdx', 'rb') as in_file:
cdx_file_content = in_file.read()
self.assertTrue(warc_file_content.startswith(b'WARC/1.0'))
self.assertIn(b'WARC-Type: revisit\r\n', warc_file_content)
self.assertIn(
b'WARC-Refers-To: '
b'<urn:uuid:8a534d31-bd06-4056-8a0f-bdc5fd611036>\r\n',
warc_file_content
)
self.assertIn(b'WARC-Truncated: length\r\n', warc_file_content)
self.assertIn(
b'WARC-Profile: http://netpreserve.org/warc/1.0/revisit/'
b'identical-payload-digest\r\n',
warc_file_content
)
self.assertIn(
b'Content-Length: ' +
str(revisit_response_header_size).encode('ascii') + b'\r\n',
warc_file_content
)
self.assertIn(
b'WARC-Target-URI: http://example.com/fennec\r\n',
warc_file_content
)
self.assertIn(
b'WARC-Target-URI: http://example.com/horse\r\n', warc_file_content
)
self.assertEqual(
1,
warc_file_content.count(b'kitbit')
)
self.assertIn(b'http://example.com/horse ', cdx_file_content)
def test_warc_move(self):
file_prefix = 'asdf'
warc_filename = 'asdf.warc'
cdx_filename = 'asdf.cdx'
os.mkdir('./blah/')
warc_recorder = WARCRecorder(
file_prefix,
params=WARCRecorderParams(
compress=False,
cdx=True,
move_to='./blah/'
),
)
warc_recorder.close()
self.assertTrue(os.path.exists('./blah/' + warc_filename))
self.assertTrue(os.path.exists('./blah/' + cdx_filename))
def test_warc_move_max_size(self):
file_prefix = 'asdf'
cdx_filename = 'asdf.cdx'
os.mkdir('./blah/')
warc_recorder = WARCRecorder(
file_prefix,
params=WARCRecorderParams(
compress=False,
cdx=True,
move_to='./blah/',
max_size=1,
),
)
request = HTTPRequest('http://example.com/1')
request.address = ('0.0.0.0', 80)
response = HTTPResponse(200, 'OK')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
response.body.write(b'BLAH')
session = warc_recorder.new_http_recorder_session()
session.begin_request(request)
session.request_data(request.to_bytes())
session.end_request(request)
session.begin_response(response)
session.response_data(response.to_bytes())
session.response_data(response.body.content())
session.end_response(response)
session.close()
warc_recorder.close()
self.assertTrue(os.path.exists('./blah/asdf-00000.warc'))
self.assertTrue(os.path.exists('./blah/asdf-00001.warc'))
self.assertTrue(os.path.exists('./blah/asdf-meta.warc'))
self.assertTrue(os.path.exists('./blah/' + cdx_filename))
def test_warc_max_size_and_append(self):
file_prefix = 'asdf'
with open('asdf-00000.warc', 'w'):
pass
with open('asdf-00001.warc', 'w'):
pass
warc_recorder = WARCRecorder(
file_prefix,
params=WARCRecorderParams(
compress=False,
max_size=1,
appending=True
),
)
request = HTTPRequest('http://example.com/1')
request.address = ('0.0.0.0', 80)
response = HTTPResponse(200, 'OK')
response.body = Body()
with wpull.util.reset_file_offset(response.body):
response.body.write(b'BLAH')
session = warc_recorder.new_http_recorder_session()
session.begin_request(request)
session.request_data(request.to_bytes())
session.end_request(request)
session.begin_response(response)
session.response_data(response.to_bytes())
session.response_data(response.body.content())
session.end_response(response)
session.close()
warc_recorder.close()
self.assertTrue(os.path.exists('asdf-00000.warc'))
self.assertTrue(os.path.exists('asdf-00001.warc'))
self.assertTrue(os.path.exists('asdf-00002.warc'))
self.assertTrue(os.path.exists('asdf-00003.warc'))
self.assertTrue(os.path.exists('asdf-meta.warc'))
self.assertEqual(0, os.path.getsize('asdf-00000.warc'))
self.assertEqual(0, os.path.getsize('asdf-00001.warc'))
self.assertNotEqual(0, os.path.getsize('asdf-00002.warc'))
self.assertNotEqual(0, os.path.getsize('asdf-00003.warc'))
self.assertNotEqual(0, os.path.getsize('asdf-meta.warc'))
|
class TestWARC(unittest.TestCase, TempDirMixin):
def setUp(self):
pass
def tearDown(self):
pass
def validate_warc(self, filename, ignore_minor_error=False):
pass
def test_warc_recorder(self):
pass
def test_warc_recorder_ftp(self):
pass
def test_warc_recorder_max_size(self):
pass
def test_warc_recorder_rollback(self):
pass
class BadRecord(WARCRecord):
def __init__(self, original_record):
pass
def __iter__(self):
pass
def test_warc_recorder_journal(self):
pass
class MockRecord(WARCRecord):
def __init__(self, original_record):
pass
def __iter__(self):
pass
def test_warc_recorder_journal_raise_error(self):
pass
def test_cdx_dedup(self):
pass
def test_warc_move(self):
pass
def test_warc_move_max_size(self):
pass
def test_warc_max_size_and_append(self):
pass
| 20 | 0 | 37 | 7 | 30 | 0 | 1 | 0.01 | 2 | 9 | 4 | 0 | 13 | 0 | 13 | 88 | 625 | 122 | 500 | 107 | 480 | 3 | 387 | 102 | 367 | 4 | 2 | 2 | 22 |
6,544 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/urlrewrite.py
|
wpull.urlrewrite.URLRewriter
|
class URLRewriter(object):
'''Clean up URLs.'''
def __init__(self, hash_fragment: bool=False, session_id: bool=False):
self._hash_fragment_enabled = hash_fragment
self._session_id_enabled = session_id
def rewrite(self, url_info: URLInfo) -> URLInfo:
'''Rewrite the given URL.'''
if url_info.scheme not in ('http', 'https'):
return url_info
if self._session_id_enabled:
url = '{scheme}://{authority}{path}?{query}#{fragment}'.format(
scheme=url_info.scheme,
authority=url_info.authority,
path=strip_path_session_id(url_info.path),
query=strip_query_session_id(url_info.query),
fragment=url_info.fragment,
)
url_info = parse_url_or_log(url) or url_info
if self._hash_fragment_enabled and url_info.fragment.startswith('!'):
if url_info.query:
url = '{}&_escaped_fragment_={}'.format(url_info.url,
url_info.fragment[1:])
else:
url = '{}?_escaped_fragment_={}'.format(url_info.url,
url_info.fragment[1:])
url_info = parse_url_or_log(url) or url_info
return url_info
|
class URLRewriter(object):
'''Clean up URLs.'''
def __init__(self, hash_fragment: bool=False, session_id: bool=False):
pass
def rewrite(self, url_info: URLInfo) -> URLInfo:
'''Rewrite the given URL.'''
pass
| 3 | 2 | 15 | 2 | 12 | 1 | 3 | 0.12 | 1 | 2 | 1 | 0 | 2 | 2 | 2 | 2 | 33 | 6 | 25 | 6 | 22 | 3 | 16 | 6 | 13 | 5 | 1 | 2 | 6 |
6,545 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/urlrewrite_test.py
|
wpull.urlrewrite_test.TestURLRewrite
|
class TestURLRewrite(unittest.TestCase):
def test_rewriter(self):
rewriter = URLRewriter(hash_fragment=True, session_id=True)
self.assertEquals(
'http://example.com/',
rewriter.rewrite(URLInfo.parse('http://example.com/')).url
)
self.assertEquals(
'http://example.com/',
rewriter.rewrite(URLInfo.parse('http://example.com/#hashtag!')).url
)
self.assertEquals(
'https://groups.google.com/forum/?_escaped_fragment_=forum/python-tulip',
rewriter.rewrite(URLInfo.parse('https://groups.google.com/forum/#!forum/python-tulip')).url
)
self.assertEquals(
'https://groups.google.com/forum/?stupid_hash_fragments&_escaped_fragment_=forum/python-tulip',
rewriter.rewrite(URLInfo.parse(
'https://groups.google.com/forum/?stupid_hash_fragments#!forum/python-tulip'
)).url
)
self.assertEquals(
'https://groups.google.com/forum/?stupid_hash_fragments=farts&_escaped_fragment_=forum/python-tulip',
rewriter.rewrite(URLInfo.parse(
'https://groups.google.com/forum/?stupid_hash_fragments=farts#!forum/python-tulip'
)).url
)
self.assertEquals(
'http://example.com/',
rewriter.rewrite(URLInfo.parse(
'http://example.com/?sid=0123456789abcdefghijklemopqrstuv'
)).url
)
self.assertEquals(
'http://example.com/?horse=dog&',
rewriter.rewrite(URLInfo.parse(
'http://example.com/?horse=dog&sid=0123456789abcdefghijklemopqrstuv'
)).url
)
def test_strip_session_id_from_url_path(self):
self.assertEqual(
'/asdf',
strip_path_session_id("/asdf"),
)
self.assertEqual(
'/asdf/asdf.aspx',
strip_path_session_id("/asdf/asdf.aspx"),
)
self.assertEqual(
strip_path_session_id("/(S(4hqa0555fwsecu455xqckv45))/mileg.aspx"),
'/mileg.aspx',
'Check ASP_SESSIONID2'
)
self.assertEqual(
strip_path_session_id("/(4hqa0555fwsecu455xqckv45)/mileg.aspx"),
'/mileg.aspx',
'Check ASP_SESSIONID2 (again)'
)
self.assertEqual(
strip_path_session_id("/(a(4hqa0555fwsecu455xqckv45)S(4hqa0555fwsecu455xqckv45)f(4hqa0555fwsecu455xqckv45))/mileg.aspx?page=sessionschedules"),
'/mileg.aspx?page=sessionschedules',
'Check ASP_SESSIONID3'
)
self.assertEqual(
strip_path_session_id("/photos/36050182@N05/"),
'/photos/36050182@N05/',
"'@' in path"
)
def test_strip_session_id_from_url_query(self):
str32id = "0123456789abcdefghijklemopqrstuv"
url = "jsessionid=" + str32id
self.assertEqual(
strip_query_session_id(url),
''
)
url = "jsessionid=" + str32id + '0'
self.assertEqual(
strip_query_session_id(url),
'jsessionid=0123456789abcdefghijklemopqrstuv0',
"Test that we don't strip if not 32 chars only."
)
url = "jsessionid=" + str32id + "&x=y"
self.assertEqual(
strip_query_session_id(url),
'x=y',
"Test what happens when followed by another key/value pair."
)
url = "one=two&jsessionid=" + str32id + "&x=y"
self.assertEqual(
strip_query_session_id(url),
'one=two&x=y',
"Test what happens when followed by another key/value pair and"
"prefixed by a key/value pair."
)
url = "one=two&jsessionid=" + str32id
self.assertEqual(
strip_query_session_id(url),
'one=two&',
"Test what happens when prefixed by a key/value pair."
)
url = "aspsessionidABCDEFGH=" + "ABCDEFGHIJKLMNOPQRSTUVWX" + "&x=y"
self.assertEqual(
strip_query_session_id(url),
'x=y',
"Test aspsession."
)
url = "phpsessid=" + str32id + "&x=y"
self.assertEqual(
strip_query_session_id(url),
'x=y',
"Test archive phpsession."
)
url = "one=two&phpsessid=" + str32id + "&x=y"
self.assertEqual(
strip_query_session_id(url),
'one=two&x=y',
"With prefix too."
)
url = "one=two&phpsessid=" + str32id
self.assertEqual(
strip_query_session_id(url),
'one=two&',
"With only prefix"
)
url = "sid=9682993c8daa2c5497996114facdc805" + "&x=y";
self.assertEqual(
strip_query_session_id(url),
'x=y',
"Test sid."
)
url = "sid=9682993c8daa2c5497996114facdc805" + "&" + "jsessionid=" + str32id
self.assertEqual(
strip_query_session_id(url),
'',
"Igor test."
)
url = "CFID=1169580&CFTOKEN=48630702&dtstamp=22%2F08%2F2006%7C06%3A58%3A11"
self.assertEqual(
strip_query_session_id(url),
'dtstamp=22%2F08%2F2006%7C06%3A58%3A11'
)
url = "CFID=12412453&CFTOKEN=15501799&dt=19_08_2006_22_39_28"
self.assertEqual(
strip_query_session_id(url),
'dt=19_08_2006_22_39_28'
)
url = "CFID=14475712&CFTOKEN=2D89F5AF-3048-2957-DA4EE4B6B13661AB&r=468710288378&m=forgotten"
self.assertEqual(
strip_query_session_id(url),
'r=468710288378&m=forgotten'
)
url = "CFID=16603925&CFTOKEN=2AE13EEE-3048-85B0-56CEDAAB0ACA44B8"
self.assertEqual(
strip_query_session_id(url),
''
)
url = "CFID=4308017&CFTOKEN=63914124&requestID=200608200458360%2E39414378"
self.assertEqual(
strip_query_session_id(url),
'requestID=200608200458360%2E39414378'
)
|
class TestURLRewrite(unittest.TestCase):
def test_rewriter(self):
pass
def test_strip_session_id_from_url_path(self):
pass
def test_strip_session_id_from_url_query(self):
pass
| 4 | 0 | 60 | 7 | 53 | 1 | 1 | 0.02 | 1 | 2 | 2 | 0 | 3 | 0 | 3 | 75 | 185 | 24 | 161 | 7 | 157 | 4 | 51 | 7 | 47 | 1 | 2 | 0 | 3 |
6,546 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/util.py
|
wpull.util.ASCIIStreamWriter
|
class ASCIIStreamWriter(codecs.StreamWriter):
'''A Stream Writer that encodes everything to ASCII.
By default, the replacement character is a Python backslash sequence.
'''
DEFAULT_ERROR = 'backslashreplace'
def __init__(self, stream, errors=DEFAULT_ERROR):
codecs.StreamWriter.__init__(self, stream, errors)
def encode(self, instance, errors=DEFAULT_ERROR):
return instance.encode('ascii', errors)
def decode(self, instance, errors=DEFAULT_ERROR):
return instance.encode('ascii', errors)
def write(self, instance):
if hasattr(instance, 'encode'):
instance = instance.encode('ascii', self.errors)
if hasattr(instance, 'decode'):
instance = instance.decode('ascii', self.errors)
self.stream.write(instance)
def writelines(self, list_instance):
for item in list_instance:
self.write(item)
|
class ASCIIStreamWriter(codecs.StreamWriter):
'''A Stream Writer that encodes everything to ASCII.
By default, the replacement character is a Python backslash sequence.
'''
def __init__(self, stream, errors=DEFAULT_ERROR):
pass
def encode(self, instance, errors=DEFAULT_ERROR):
pass
def decode(self, instance, errors=DEFAULT_ERROR):
pass
def write(self, instance):
pass
def writelines(self, list_instance):
pass
| 6 | 1 | 3 | 0 | 3 | 0 | 2 | 0.18 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 16 | 28 | 8 | 17 | 8 | 11 | 3 | 17 | 8 | 11 | 3 | 2 | 1 | 8 |
6,547 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/util.py
|
wpull.util.GzipPickleStream
|
class GzipPickleStream(PickleStream):
'''gzip compressed pickle stream.'''
def __init__(self, filename=None, file=None, mode='rb', **kwargs):
if file:
self._gzip_file = gzip.GzipFile(fileobj=file, mode=mode)
else:
self._gzip_file = gzip.GzipFile(filename, mode=mode)
super().__init__(file=self._gzip_file, mode=mode, **kwargs)
def close(self):
self._gzip_file.close()
super().close()
|
class GzipPickleStream(PickleStream):
'''gzip compressed pickle stream.'''
def __init__(self, filename=None, file=None, mode='rb', **kwargs):
pass
def close(self):
pass
| 3 | 1 | 5 | 1 | 5 | 0 | 2 | 0.1 | 1 | 2 | 0 | 0 | 2 | 1 | 2 | 7 | 13 | 2 | 10 | 4 | 7 | 1 | 9 | 4 | 6 | 2 | 2 | 1 | 3 |
6,548 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/util.py
|
wpull.util.PickleStream
|
class PickleStream(object):
'''Pickle stream helper.'''
def __init__(self, filename=None, file=None, mode='rb',
protocol=pickle.DEFAULT_PROTOCOL):
if file:
self._file = file
else:
self._file = open(filename, mode)
self._protocol = protocol
def dump(self, obj):
'''Pickle an object.'''
pickle.dump(obj, self._file, protocol=self._protocol)
def load(self):
'''Unpickle an object.'''
return pickle.load(self._file)
def iter_load(self):
'''Unpickle objects.'''
while True:
try:
yield pickle.load(self._file)
except EOFError:
break
def close(self):
'''Close stream.'''
self._file.close()
|
class PickleStream(object):
'''Pickle stream helper.'''
def __init__(self, filename=None, file=None, mode='rb',
protocol=pickle.DEFAULT_PROTOCOL):
pass
def dump(self, obj):
'''Pickle an object.'''
pass
def load(self):
'''Unpickle an object.'''
pass
def iter_load(self):
'''Unpickle objects.'''
pass
def close(self):
'''Close stream.'''
pass
| 6 | 5 | 5 | 0 | 4 | 1 | 2 | 0.25 | 1 | 1 | 0 | 1 | 5 | 2 | 5 | 5 | 30 | 5 | 20 | 9 | 13 | 5 | 18 | 8 | 12 | 3 | 1 | 2 | 8 |
6,549 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/util_test.py
|
wpull.util_test.TestUtil
|
class TestUtil(unittest.TestCase):
def test_datetime_str(self):
self.assertEqual(20, len(datetime_str()))
def test_parse_iso8601_str(self):
self.assertEqual(10, parse_iso8601_str('1970-01-01T00:00:10Z'))
def test_python_version(self):
version_string = python_version()
nums = tuple([int(n) for n in version_string.split('.')])
self.assertEqual(3, len(nums))
self.assertEqual(nums, sys.version_info[0:3])
def test_filter_pem(self):
unclean = (b'Kitten\n'
b'-----BEGIN CERTIFICATE-----\n'
b'ABCDEFG\n'
b'-----END CERTIFICATE-----\n'
b'Puppy\n'
b'-----BEGIN CERTIFICATE-----\n'
b'QWERTY\n'
b'-----END CERTIFICATE-----\n'
b'Kit\n')
clean = {
(
b'-----BEGIN CERTIFICATE-----\n'
b'ABCDEFG\n'
b'-----END CERTIFICATE-----\n'
),
(
b'-----BEGIN CERTIFICATE-----\n'
b'QWERTY\n'
b'-----END CERTIFICATE-----\n'
)
}
self.assertEqual(clean, filter_pem(unclean))
def test_is_acsii(self):
self.assertTrue(is_ascii('abc'))
self.assertFalse(is_ascii('😤'))
def test_close_on_error(self):
class MyObject(object):
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def oops(self):
with close_on_error(self.close):
raise ValueError()
my_object = MyObject()
self.assertRaises(ValueError, my_object.oops)
self.assertTrue(my_object.closed)
def test_get_exception_message(self):
self.assertEqual('oops', get_exception_message(ValueError('oops')))
try:
raise ValueError('oops')
except ValueError as error:
self.assertEqual('oops', get_exception_message(error))
self.assertEqual('ValueError', get_exception_message(ValueError()))
try:
raise ValueError
except ValueError as error:
self.assertEqual('ValueError', get_exception_message(error))
try:
raise ValueError()
except ValueError as error:
self.assertEqual('ValueError', get_exception_message(error))
self.assertEqual(
'NoNameservers', get_exception_message(NoNameservers())
)
try:
raise NoNameservers
except NoNameservers as error:
self.assertEqual(
'NoNameservers', get_exception_message(error)
)
try:
raise NoNameservers()
except NoNameservers as error:
self.assertEqual(
'NoNameservers', get_exception_message(error)
)
def test_pickle_stream_filename(self):
with tempfile.TemporaryDirectory() as temp_dir:
filename = os.path.join(temp_dir, 'blah.pickle')
stream = GzipPickleStream(filename, mode='wb')
for num in range(10):
stream.dump(num)
stream = GzipPickleStream(filename, mode='rb')
for num, obj in enumerate(stream.iter_load()):
self.assertEqual(num, obj)
def test_pickle_stream_file_obj(self):
with tempfile.TemporaryDirectory() as temp_dir:
filename = os.path.join(temp_dir, 'blah.pickle')
file = open(filename, mode='wb+')
stream = GzipPickleStream(file=file, mode='wb')
for num in range(10):
stream.dump(num)
stream = GzipPickleStream(file=file, mode='rb')
for num, obj in enumerate(stream.iter_load()):
self.assertEqual(num, obj)
|
class TestUtil(unittest.TestCase):
def test_datetime_str(self):
pass
def test_parse_iso8601_str(self):
pass
def test_python_version(self):
pass
def test_filter_pem(self):
pass
def test_is_acsii(self):
pass
def test_close_on_error(self):
pass
class MyObject(object):
def __init__(self):
pass
def close(self):
pass
def oops(self):
pass
def test_get_exception_message(self):
pass
def test_pickle_stream_filename(self):
pass
def test_pickle_stream_file_obj(self):
pass
| 14 | 0 | 10 | 2 | 9 | 0 | 2 | 0 | 1 | 8 | 2 | 0 | 9 | 0 | 9 | 81 | 123 | 26 | 97 | 32 | 83 | 0 | 72 | 29 | 58 | 6 | 2 | 2 | 21 |
6,550 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/version_test.py
|
wpull.version_test.TestVersion
|
class TestVersion(unittest.TestCase):
def test_valid_version_str(self):
StrictVersion(wpull.version.__version__)
def test_version_string_buidler(self):
self.assertEquals(
(0, 0, 0, 'final', 0),
get_version_tuple('0.0')
)
self.assertEquals(
(0, 1, 0, 'final', 0),
get_version_tuple('0.1')
)
self.assertEquals(
(0, 1, 1, 'final', 0),
get_version_tuple('0.1.1')
)
self.assertEquals(
(0, 1, 1, 'alpha', 0),
get_version_tuple('0.1.1a0')
)
self.assertEquals(
(0, 1, 0, 'beta', 0),
get_version_tuple('0.1b0')
)
self.assertEquals(
(0, 1, 0, 'candidate', 3),
get_version_tuple('0.1c3')
)
self.assertEquals(
(1, 0, 0, 'final', 0),
get_version_tuple('1.0')
)
self.assertEquals(
(100000, 0, 0, 'final', 0),
get_version_tuple('100000.0')
)
|
class TestVersion(unittest.TestCase):
def test_valid_version_str(self):
pass
def test_version_string_buidler(self):
pass
| 3 | 0 | 18 | 0 | 18 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 74 | 37 | 1 | 36 | 3 | 33 | 0 | 12 | 3 | 9 | 1 | 2 | 0 | 2 |
6,551 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/waiter.py
|
wpull.waiter.LinearWaiter
|
class LinearWaiter(Waiter):
'''A linear back-off waiter.
Args:
wait: The normal delay time
random_wait: If True, randomly perturb the delay time within a factor
of 0.5 and 1.5
max_wait: The maximum delay time
This waiter will increment by values of 1 second.
'''
def __init__(self, wait=0.0, random_wait=False, max_wait=10.0):
self._wait = wait
self._current = wait
self._random = random_wait
self._max_wait = max_wait
def get(self):
if self._random:
return self._current * random.uniform(0.5, 1.5)
else:
return self._current
def increment(self):
self._current = min(self._max_wait, self._current + 1)
def reset(self):
self._current = self._wait
|
class LinearWaiter(Waiter):
'''A linear back-off waiter.
Args:
wait: The normal delay time
random_wait: If True, randomly perturb the delay time within a factor
of 0.5 and 1.5
max_wait: The maximum delay time
This waiter will increment by values of 1 second.
'''
def __init__(self, wait=0.0, random_wait=False, max_wait=10.0):
pass
def get(self):
pass
def increment(self):
pass
def reset(self):
pass
| 5 | 1 | 4 | 0 | 4 | 0 | 1 | 0.53 | 1 | 0 | 0 | 0 | 4 | 4 | 4 | 27 | 28 | 5 | 15 | 9 | 10 | 8 | 14 | 9 | 9 | 2 | 4 | 1 | 5 |
6,552 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/waiter_test.py
|
wpull.waiter_test.TestWaiter
|
class TestWaiter(unittest.TestCase):
def test_linear_waiter(self):
waiter = LinearWaiter()
self.assertEqual(0.0, waiter.get())
for dummy in range(5):
waiter.increment()
self.assertEqual(5.0, waiter.get())
for dummy in range(50):
waiter.increment()
self.assertEqual(10.0, waiter.get())
|
class TestWaiter(unittest.TestCase):
def test_linear_waiter(self):
pass
| 2 | 0 | 13 | 4 | 9 | 0 | 3 | 0 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 73 | 14 | 4 | 10 | 4 | 8 | 0 | 10 | 4 | 8 | 3 | 2 | 1 | 3 |
6,553 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/warc/format.py
|
wpull.warc.format.WARCRecord
|
class WARCRecord(object):
'''A record in a WARC file.
Attributes:
fields: An instance of :class:`.namevalue.NameValueRecord`.
block_file: A file object. May be None.
'''
VERSION = 'WARC/1.0'
WARC_TYPE = 'WARC-Type'
CONTENT_TYPE = 'Content-Type'
WARC_DATE = 'WARC-Date'
WARC_RECORD_ID = 'WARC-Record-ID'
WARCINFO = 'warcinfo'
WARC_FIELDS = 'application/warc-fields'
REQUEST = 'request'
RESPONSE = 'response'
REVISIT = 'revisit'
TYPE_REQUEST = 'application/http;msgtype=request'
TYPE_RESPONSE = 'application/http;msgtype=response'
SAME_PAYLOAD_DIGEST_URI = \
'http://netpreserve.org/warc/1.0/revisit/identical-payload-digest'
NAME_OVERRIDES = frozenset([
'WARC-Date',
'WARC-Type',
'WARC-Record-ID',
'WARC-Concurrent-To',
'WARC-Refers-To',
'Content-Length',
'Content-Type',
'WARC-Target-URI',
'WARC-Block-Digest',
'WARC-IP-Address',
'WARC-Filename',
'WARC-Warcinfo-ID',
'WARC-Payload-Digest',
'WARC-Truncated',
'WARC-Filename',
'WARC-Profile',
'WARC-Identified-Payload-Type',
'WARC-Segment-Origin-ID',
'WARC-Segment-Number',
'WARC-Segment-Total-Length',
])
'''Field name case normalization overrides because hanzo's warc-tools do
not adequately conform to specifications.'''
def __init__(self):
self.fields = NameValueRecord(normalize_overrides=self.NAME_OVERRIDES)
self.block_file = None
def set_common_fields(self, warc_type: str, content_type: str):
'''Set the required fields for the record.'''
self.fields[self.WARC_TYPE] = warc_type
self.fields[self.CONTENT_TYPE] = content_type
self.fields[self.WARC_DATE] = wpull.util.datetime_str()
self.fields[self.WARC_RECORD_ID] = '<{0}>'.format(uuid.uuid4().urn)
def set_content_length(self):
'''Find and set the content length.
.. seealso:: :meth:`compute_checksum`.
'''
if not self.block_file:
self.fields['Content-Length'] = '0'
return
with wpull.util.reset_file_offset(self.block_file):
wpull.util.seek_file_end(self.block_file)
self.fields['Content-Length'] = str(self.block_file.tell())
def compute_checksum(self, payload_offset: Optional[int]=None):
'''Compute and add the checksum data to the record fields.
This function also sets the content length.
'''
if not self.block_file:
self.fields['Content-Length'] = '0'
return
block_hasher = hashlib.sha1()
payload_hasher = hashlib.sha1()
with wpull.util.reset_file_offset(self.block_file):
if payload_offset is not None:
data = self.block_file.read(payload_offset)
block_hasher.update(data)
while True:
data = self.block_file.read(4096)
if data == b'':
break
block_hasher.update(data)
payload_hasher.update(data)
content_length = self.block_file.tell()
content_hash = block_hasher.digest()
self.fields['WARC-Block-Digest'] = 'sha1:{0}'.format(
base64.b32encode(content_hash).decode()
)
if payload_offset is not None:
payload_hash = payload_hasher.digest()
self.fields['WARC-Payload-Digest'] = 'sha1:{0}'.format(
base64.b32encode(payload_hash).decode()
)
self.fields['Content-Length'] = str(content_length)
def __iter__(self):
'''Iterate the record as bytes.'''
yield self.VERSION.encode()
yield b'\r\n'
yield bytes(self.fields)
yield b'\r\n'
with wpull.util.reset_file_offset(self.block_file):
while True:
data = self.block_file.read(4096)
if data == b'':
break
yield data
yield b'\r\n\r\n'
def __bytes__(self):
'''Return the record as bytes.'''
return b''.join(iter(self))
def get_http_header(self) -> Response:
'''Return the HTTP header.
It only attempts to read the first 4 KiB of the payload.
Returns:
Response, None: Returns an instance of
:class:`.http.request.Response` or None.
'''
with wpull.util.reset_file_offset(self.block_file):
data = self.block_file.read(4096)
match = re.match(br'(.*?\r?\n\r?\n)', data)
if not match:
return
status_line, dummy, field_str = match.group(1).partition(b'\n')
try:
version, code, reason = Response.parse_status_line(status_line)
except ValueError:
return
response = Response(status_code=code, reason=reason, version=version)
try:
response.fields.parse(field_str, strict=False)
except ValueError:
return
return response
|
class WARCRecord(object):
'''A record in a WARC file.
Attributes:
fields: An instance of :class:`.namevalue.NameValueRecord`.
block_file: A file object. May be None.
'''
def __init__(self):
pass
def set_common_fields(self, warc_type: str, content_type: str):
'''Set the required fields for the record.'''
pass
def set_content_length(self):
'''Find and set the content length.
.. seealso:: :meth:`compute_checksum`.
'''
pass
def compute_checksum(self, payload_offset: Optional[int]=None):
'''Compute and add the checksum data to the record fields.
This function also sets the content length.
'''
pass
def __iter__(self):
'''Iterate the record as bytes.'''
pass
def __bytes__(self):
'''Return the record as bytes.'''
pass
def get_http_header(self) -> Response:
'''Return the HTTP header.
It only attempts to read the first 4 KiB of the payload.
Returns:
Response, None: Returns an instance of
:class:`.http.request.Response` or None.
'''
pass
| 8 | 7 | 16 | 3 | 10 | 2 | 3 | 0.2 | 1 | 6 | 2 | 2 | 7 | 2 | 7 | 7 | 162 | 30 | 110 | 36 | 102 | 22 | 84 | 36 | 76 | 6 | 1 | 3 | 18 |
6,554 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/warc/format_test.py
|
wpull.warc.format_test.TestWARC
|
class TestWARC(unittest.TestCase):
def test_read_cdx(self):
data = io.BytesIO(b' CDX a A b\nhi hello foxes?\n')
for record in read_cdx(data, encoding='ascii'):
self.assertEqual(record['a'], 'hi')
self.assertEqual(record['A'], 'hello')
self.assertEqual(record['b'], 'foxes?')
|
class TestWARC(unittest.TestCase):
def test_read_cdx(self):
pass
| 2 | 0 | 6 | 0 | 6 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 73 | 7 | 0 | 7 | 4 | 5 | 0 | 7 | 4 | 5 | 2 | 2 | 1 | 2 |
6,555 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/warc/recorder.py
|
wpull.warc.recorder.BaseWARCRecorderSession
|
class BaseWARCRecorderSession(object):
'''Base WARC recorder session.'''
def __init__(self, recorder, temp_dir=None, url_table=None):
self._recorder = recorder
self._temp_dir = temp_dir
self._url_table = url_table
def _new_temp_file(self, hint='warcrecsess'):
'''Return new temp file.'''
return wpull.body.new_temp_file(
directory=self._temp_dir, hint=hint
)
def close(self):
self._recorder.flush_session()
|
class BaseWARCRecorderSession(object):
'''Base WARC recorder session.'''
def __init__(self, recorder, temp_dir=None, url_table=None):
pass
def _new_temp_file(self, hint='warcrecsess'):
'''Return new temp file.'''
pass
def close(self):
pass
| 4 | 2 | 4 | 0 | 3 | 0 | 1 | 0.18 | 1 | 0 | 0 | 2 | 3 | 3 | 3 | 3 | 15 | 2 | 11 | 7 | 7 | 2 | 9 | 7 | 5 | 1 | 1 | 0 | 3 |
6,556 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/warc/recorder.py
|
wpull.warc.recorder.FTPWARCRecorderSession
|
class FTPWARCRecorderSession(BaseWARCRecorderSession):
'''FTP WARC Recorder Session.'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._request = None
self._control_record = None
self._response_record = None
def close(self, error=None):
super().close()
if self._control_record and self._control_record.block_file:
self._control_record.block_file.close()
if self._response_record and self._response_record.block_file:
self._response_record.block_file.close()
def begin_control(self, request: FTPRequest, connection_reused: bool=False):
self._request = request
self._control_record = record = WARCRecord()
record.set_common_fields('metadata', 'text/x-ftp-control-conversation')
record.fields['WARC-Target-URI'] = request.url_info.url
record.fields['WARC-IP-Address'] = request.address[0]
record.block_file = self._new_temp_file('warcctrl')
hostname, port = self._request_hostname_port()
if connection_reused:
connection_string = 'Reusing control connection to {hostname}:{port}'
else:
connection_string = 'Opening control connection to {hostname}:{port}'
self._write_control_event(
connection_string.format(hostname=hostname, port=port)
)
def end_control(self, response: FTPResponse, connection_closed=False):
hostname, port = self._request_hostname_port()
if connection_closed:
connection_string = 'Closed control connection to {hostname}:{port}'
else:
connection_string = 'Kept control connection to {hostname}:{port}'
self._write_control_event(
connection_string.format(hostname=hostname, port=port)
)
self._control_record.block_file.seek(0)
self._recorder.set_length_and_maybe_checksums(self._control_record)
self._recorder.write_record(self._control_record)
def control_send_data(self, data):
text = textwrap.indent(
data.decode('utf-8', errors='surrogateescape'),
'> ', predicate=lambda line: True
)
self._control_record.block_file.write(
text.encode('utf-8', errors='surrogateescape')
)
if not data.endswith(b'\n'):
self._control_record.block_file.write(b'\n')
def control_receive_data(self, data):
text = textwrap.indent(
data.decode('utf-8', errors='surrogateescape'),
'< ', predicate=lambda line: True
)
self._control_record.block_file.write(
text.encode('utf-8', errors='surrogateescape')
)
if not data.endswith(b'\n'):
self._control_record.block_file.write(b'\n')
def _write_control_event(self, text):
text = textwrap.indent(text, '* ', predicate=lambda line: True)
self._control_record.block_file.write(
text.encode('utf-8', errors='surrogateescape')
)
if not text.endswith('\n'):
self._control_record.block_file.write(b'\n')
def _request_hostname_port(self):
hostname = self._request.address[0]
if ':' in hostname:
hostname = '[{}]'.format(hostname)
port = self._request.address[1]
return hostname, port
def begin_transfer(self, response: FTPResponse):
hostname, port = response.data_address
self._write_control_event(
'Opened data connection to {hostname}:{port}'
.format(hostname=hostname, port=port)
)
self._response_record = record = WARCRecord()
record.set_common_fields('resource', 'application/octet-stream')
record.fields['WARC-Target-URI'] = self._request.url_info.url
record.fields['WARC-IP-Address'] = self._request.address[0]
record.fields['WARC-Concurrent-To'] = self._control_record.fields[
WARCRecord.WARC_RECORD_ID]
record.block_file = self._new_temp_file('warcresp')
def transfer_receive_data(self, data: bytes):
self._response_record.block_file.write(data)
def end_transfer(self, response: FTPResponse):
hostname, port = response.data_address
self._write_control_event(
'Closed data connection to {hostname}:{port}'
.format(hostname=hostname, port=port)
)
self._response_record.block_file.seek(0)
self._recorder.set_length_and_maybe_checksums(self._response_record)
self._recorder.write_record(self._response_record)
|
class FTPWARCRecorderSession(BaseWARCRecorderSession):
'''FTP WARC Recorder Session.'''
def __init__(self, *args, **kwargs):
pass
def close(self, error=None):
pass
def begin_control(self, request: FTPRequest, connection_reused: bool=False):
pass
def end_control(self, response: FTPResponse, connection_closed=False):
pass
def control_send_data(self, data):
pass
def control_receive_data(self, data):
pass
def _write_control_event(self, text):
pass
def _request_hostname_port(self):
pass
def begin_transfer(self, response: FTPResponse):
pass
def transfer_receive_data(self, data: bytes):
pass
def end_transfer(self, response: FTPResponse):
pass
| 12 | 1 | 10 | 2 | 9 | 0 | 2 | 0.01 | 1 | 4 | 1 | 0 | 11 | 3 | 11 | 14 | 125 | 28 | 96 | 27 | 84 | 1 | 71 | 25 | 59 | 3 | 2 | 1 | 19 |
6,557 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/warc/recorder.py
|
wpull.warc.recorder.HTTPWARCRecorderSession
|
class HTTPWARCRecorderSession(BaseWARCRecorderSession):
'''HTTP WARC Recorder Session.'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._request = None
self._request_record = None
self._response_record = None
self._response_temp_file = self._new_temp_file(hint='warcsesrsp')
def close(self):
super().close()
if self._response_temp_file:
self._response_temp_file.close()
if self._request_record and self._request_record.block_file:
self._request_record.block_file.close()
if self._response_record and self._response_record.block_file:
self._response_record.block_file.close()
def begin_request(self, request: HTTPRequest):
assert re.match(
r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[a-f0-9:.]+)$',
request.address[0]), \
'IP address needed, got {}'.format(request.address[0])
self._request = request
self._request_record = record = WARCRecord()
record.set_common_fields(WARCRecord.REQUEST, WARCRecord.TYPE_REQUEST)
record.fields['WARC-Target-URI'] = request.url_info.url
record.fields['WARC-IP-Address'] = request.address[0]
record.block_file = self._new_temp_file(hint='warcsesreq')
def request_data(self, data: bytes):
self._request_record.block_file.write(data)
def end_request(self, request: HTTPRequest):
payload_offset = len(request.to_bytes())
self._request_record.block_file.seek(0)
self._recorder.set_length_and_maybe_checksums(
self._request_record, payload_offset=payload_offset
)
self._recorder.write_record(self._request_record)
def begin_response(self, response: HTTPResponse):
assert re.match(
r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[a-f0-9:.]+)$',
self._request.address[0]), \
'IP address needed, got {}'.format(self._request.address[0])
self._response_record = record = WARCRecord()
record.set_common_fields(WARCRecord.RESPONSE, WARCRecord.TYPE_RESPONSE)
record.fields['WARC-Target-URI'] = self._request.url_info.url
record.fields['WARC-IP-Address'] = self._request.address[0]
record.fields['WARC-Concurrent-To'] = self._request_record.fields[
WARCRecord.WARC_RECORD_ID]
record.block_file = self._response_temp_file
def response_data(self, data: bytes):
self._response_temp_file.write(data)
def end_response(self, response: HTTPResponse):
payload_offset = len(response.to_bytes())
self._response_record.block_file.seek(0)
self._recorder.set_length_and_maybe_checksums(
self._response_record,
payload_offset=payload_offset
)
if self._url_table is not None:
self._record_revisit(payload_offset)
self._recorder.write_record(self._response_record)
def _record_revisit(self, payload_offset: int):
'''Record the revisit if possible.'''
fields = self._response_record.fields
ref_record_id = self._url_table.get_revisit_id(
fields['WARC-Target-URI'],
fields.get('WARC-Payload-Digest', '').upper().replace('SHA1:', '')
)
if ref_record_id:
try:
self._response_record.block_file.truncate(payload_offset)
except TypeError:
self._response_record.block_file.seek(0)
data = self._response_record.block_file.read(payload_offset)
self._response_record.block_file.truncate()
self._response_record.block_file.seek(0)
self._response_record.block_file.write(data)
self._recorder.set_length_and_maybe_checksums(
self._response_record
)
fields[WARCRecord.WARC_TYPE] = WARCRecord.REVISIT
fields['WARC-Refers-To'] = ref_record_id
fields['WARC-Profile'] = WARCRecord.SAME_PAYLOAD_DIGEST_URI
fields['WARC-Truncated'] = 'length'
|
class HTTPWARCRecorderSession(BaseWARCRecorderSession):
'''HTTP WARC Recorder Session.'''
def __init__(self, *args, **kwargs):
pass
def close(self):
pass
def begin_request(self, request: HTTPRequest):
pass
def request_data(self, data: bytes):
pass
def end_request(self, request: HTTPRequest):
pass
def begin_response(self, response: HTTPResponse):
pass
def response_data(self, data: bytes):
pass
def end_response(self, response: HTTPResponse):
pass
def _record_revisit(self, payload_offset: int):
'''Record the revisit if possible.'''
pass
| 10 | 2 | 11 | 2 | 9 | 0 | 2 | 0.02 | 1 | 5 | 1 | 0 | 9 | 4 | 9 | 12 | 106 | 23 | 81 | 21 | 71 | 2 | 64 | 19 | 54 | 4 | 2 | 2 | 15 |
6,558 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/warc/recorder.py
|
wpull.warc.recorder.WARCRecorder
|
class WARCRecorder(object):
'''Record to WARC file.
Args:
filename (str): The filename (without the extension).
params (:class:`WARCRecorderParams`): Parameters.
'''
CDX_DELIMINATOR = ' '
'''Default CDX delimiter.'''
DEFAULT_SOFTWARE_STRING = 'Wpull/{0} Python/{1}'.format(
wpull.version.__version__, wpull.util.python_version()
)
'''Default software string.'''
def __init__(self, filename, params=None):
self._prefix_filename = filename
self._params = params or WARCRecorderParams()
self._warcinfo_record = None
self._sequence_num = 0
self._log_temp_file = None
self._log_handler = None
self._warc_filename = None
self._cdx_filename = None
self._check_journals_and_maybe_raise()
if params.log:
self._setup_log()
self._start_new_warc_file()
if self._params.cdx:
self._start_new_cdx_file()
def _check_journals_and_maybe_raise(self):
'''Check if any journal files exist and raise an error.'''
files = list(glob.glob(self._prefix_filename + '*-wpullinc'))
if files:
raise OSError('WARC file {} is incomplete.'.format(files[0]))
def _start_new_warc_file(self, meta=False):
'''Create and set as current WARC file.'''
if self._params.max_size and not meta and self._params.appending:
while True:
self._warc_filename = self._generate_warc_filename()
if os.path.exists(self._warc_filename):
_logger.debug('Skip {0}', self._warc_filename)
self._sequence_num += 1
else:
break
else:
self._warc_filename = self._generate_warc_filename(meta=meta)
_logger.debug('WARC file at {0}', self._warc_filename)
if not self._params.appending:
wpull.util.truncate_file(self._warc_filename)
self._warcinfo_record = WARCRecord()
self._populate_warcinfo(self._params.extra_fields)
self.write_record(self._warcinfo_record)
def _generate_warc_filename(self, meta=False):
'''Return a suitable WARC filename.'''
if self._params.max_size is None:
sequence_name = ''
elif meta:
sequence_name = '-meta'
else:
sequence_name = '-{0:05d}'.format(self._sequence_num)
if self._params.compress:
extension = 'warc.gz'
else:
extension = 'warc'
return '{0}{1}.{2}'.format(
self._prefix_filename, sequence_name, extension
)
def _start_new_cdx_file(self):
'''Create and set current CDX file.'''
self._cdx_filename = '{0}.cdx'.format(self._prefix_filename)
if not self._params.appending:
wpull.util.truncate_file(self._cdx_filename)
self._write_cdx_header()
elif not os.path.exists(self._cdx_filename):
self._write_cdx_header()
def _populate_warcinfo(self, extra_fields=None):
'''Add the metadata to the Warcinfo record.'''
self._warcinfo_record.set_common_fields(
WARCRecord.WARCINFO, WARCRecord.WARC_FIELDS)
info_fields = NameValueRecord(wrap_width=1024)
info_fields['Software'] = self._params.software_string \
or self.DEFAULT_SOFTWARE_STRING
info_fields['format'] = 'WARC File Format 1.0'
info_fields['conformsTo'] = \
'http://bibnum.bnf.fr/WARC/WARC_ISO_28500_version1_latestdraft.pdf'
if extra_fields:
for name, value in extra_fields:
info_fields.add(name, value)
self._warcinfo_record.block_file = io.BytesIO(
bytes(info_fields) + b'\r\n')
self._warcinfo_record.compute_checksum()
def _setup_log(self):
'''Set up the logging file.'''
logger = logging.getLogger()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self._log_temp_file = NamedTemporaryFile(
prefix='tmp-wpull-warc-',
dir=self._params.temp_dir,
suffix='.log.gz',
delete=False,
)
self._log_temp_file.close() # For Windows
self._log_handler = handler = logging.StreamHandler(
io.TextIOWrapper(
gzip.GzipFile(
filename=self._log_temp_file.name, mode='wb'
),
encoding='utf-8'
)
)
logger.setLevel(logging.DEBUG)
logger.debug('Wpull needs the root logger level set to DEBUG.')
handler.setFormatter(formatter)
logger.addHandler(handler)
handler.setLevel(logging.INFO)
def listen_to_http_client(self, client: HTTPClient):
client.event_dispatcher.add_listener(HTTPClient.ClientEvent.new_session,
self._http_session_callback)
def _http_session_callback(self, http_session: HTTPSession):
recorder_session = self.new_http_recorder_session()
http_session.event_dispatcher.add_listener(
HTTPSession.Event.begin_request, recorder_session.begin_request)
http_session.event_dispatcher.add_listener(
HTTPSession.Event.request_data, recorder_session.request_data)
http_session.event_dispatcher.add_listener(
HTTPSession.Event.end_request, recorder_session.end_request)
http_session.event_dispatcher.add_listener(
HTTPSession.Event.begin_response, recorder_session.begin_response)
http_session.event_dispatcher.add_listener(
HTTPSession.Event.response_data, recorder_session.response_data)
http_session.event_dispatcher.add_listener(
HTTPSession.Event.end_response, recorder_session.end_response)
http_session.event_dispatcher.add_listener(
HTTPSession.SessionEvent.end_session,
lambda error: recorder_session.close()
)
def new_http_recorder_session(self) -> 'HTTPWARCRecorderSession':
return HTTPWARCRecorderSession(
self, temp_dir=self._params.temp_dir,
url_table=self._params.url_table
)
def listen_to_ftp_client(self, client: FTPClient):
client.event_dispatcher.add_listener(FTPClient.ClientEvent.new_session,
self._ftp_session_callback)
def _ftp_session_callback(self, ftp_session: FTPSession):
recorder_session = self.new_ftp_recorder_session()
ftp_session.event_dispatcher.add_listener(
FTPSession.Event.begin_control, recorder_session.begin_control)
ftp_session.event_dispatcher.add_listener(
FTPSession.Event.control_receive_data,
recorder_session.control_receive_data)
ftp_session.event_dispatcher.add_listener(
FTPSession.Event.control_send_data,
recorder_session.control_send_data)
ftp_session.event_dispatcher.add_listener(
FTPSession.Event.end_control, recorder_session.end_control)
ftp_session.event_dispatcher.add_listener(
FTPSession.Event.begin_transfer, recorder_session.begin_transfer)
ftp_session.event_dispatcher.add_listener(
FTPSession.Event.transfer_receive_data,
recorder_session.transfer_receive_data)
ftp_session.event_dispatcher.add_listener(
FTPSession.Event.end_transfer, recorder_session.end_transfer)
ftp_session.event_dispatcher.add_listener(
FTPSession.SessionEvent.end_session,
lambda error: recorder_session.close()
)
def new_ftp_recorder_session(self) -> 'FTPWARCRecorderSession':
return FTPWARCRecorderSession(
self, temp_dir=self._params.temp_dir,
url_table=self._params.url_table
)
def flush_session(self):
if self._params.max_size is not None \
and os.path.getsize(self._warc_filename) > self._params.max_size:
self._sequence_num += 1
if self._params.move_to is not None:
self._move_file_to_dest_dir(self._warc_filename)
_logger.debug('Starting new warc file due to max size.')
self._start_new_warc_file()
def _move_file_to_dest_dir(self, filename):
'''Move the file to the ``move_to`` directory.'''
assert self._params.move_to
if os.path.isdir(self._params.move_to):
_logger.debug('Moved {} to {}.', self._warc_filename,
self._params.move_to)
shutil.move(filename, self._params.move_to)
else:
_logger.error('{} is not a directory; not moving {}.',
self._params.move_to, filename)
def set_length_and_maybe_checksums(self, record, payload_offset=None):
'''Set the content length and possibly the checksums.'''
if self._params.digests:
record.compute_checksum(payload_offset)
else:
record.set_content_length()
def write_record(self, record):
'''Append the record to the WARC file.'''
# FIXME: probably not a good idea to modifiy arguments passed to us
# TODO: add extra gzip headers that wget uses
record.fields['WARC-Warcinfo-ID'] = self._warcinfo_record.fields[
WARCRecord.WARC_RECORD_ID]
_logger.debug('Writing WARC record {0}.',
record.fields['WARC-Type'])
if self._params.compress:
open_func = gzip.GzipFile
else:
open_func = open
# Use getsize to get actual file size. Avoid tell() because it may
# not be the raw file position.
if os.path.exists(self._warc_filename):
before_offset = os.path.getsize(self._warc_filename)
else:
before_offset = 0
journal_filename = self._warc_filename + '-wpullinc'
with open(journal_filename, 'w') as file:
file.write('wpull-journal-version:1\n')
file.write('offset:{}\n'.format(before_offset))
try:
with open_func(self._warc_filename, mode='ab') as out_file:
for data in record:
out_file.write(data)
except (OSError, IOError) as error:
_logger.info(
_('Rolling back file {filename} to length {length}.'),
filename=self._warc_filename, length=before_offset
)
with open(self._warc_filename, mode='wb') as out_file:
out_file.truncate(before_offset)
raise error
finally:
os.remove(journal_filename)
after_offset = os.path.getsize(self._warc_filename)
if self._cdx_filename:
raw_file_offset = before_offset
raw_file_record_size = after_offset - before_offset
self._write_cdx_field(
record, raw_file_record_size, raw_file_offset
)
def close(self):
'''Close the WARC file and clean up any logging handlers.'''
if self._log_temp_file:
self._log_handler.flush()
logger = logging.getLogger()
logger.removeHandler(self._log_handler)
self._log_handler.stream.close()
log_record = WARCRecord()
log_record.block_file = gzip.GzipFile(
filename=self._log_temp_file.name
)
log_record.set_common_fields('resource', 'text/plain')
log_record.fields['WARC-Target-URI'] = \
'urn:X-wpull:log'
if self._params.max_size is not None:
if self._params.move_to is not None:
self._move_file_to_dest_dir(self._warc_filename)
self._start_new_warc_file(meta=True)
self.set_length_and_maybe_checksums(log_record)
self.write_record(log_record)
log_record.block_file.close()
try:
os.remove(self._log_temp_file.name)
except OSError:
_logger.exception('Could not close log temp file.')
self._log_temp_file = None
self._log_handler.close()
self._log_handler = None
if self._params.move_to is not None:
self._move_file_to_dest_dir(self._warc_filename)
if self._cdx_filename and self._params.move_to is not None:
self._move_file_to_dest_dir(self._cdx_filename)
def _write_cdx_header(self):
'''Write the CDX header.
It writes the fields:
1. a: original URL
2. b: UNIX timestamp
3. m: MIME Type from the HTTP Content-type
4. s: response code
5. k: new style checksum
6. S: raw file record size
7. V: offset in raw file
8. g: filename of raw file
9. u: record ID
'''
with open(self._cdx_filename, mode='a', encoding='utf-8') as out_file:
out_file.write(self.CDX_DELIMINATOR)
out_file.write(self.CDX_DELIMINATOR.join((
'CDX',
'a', 'b', 'm', 's',
'k', 'S', 'V', 'g',
'u'
)))
out_file.write('\n')
def _write_cdx_field(self, record, raw_file_record_size, raw_file_offset):
'''Write the CDX field if needed.'''
if record.fields[WARCRecord.WARC_TYPE] != WARCRecord.RESPONSE \
or not re.match(r'application/http; *msgtype *= *response',
record.fields[WARCRecord.CONTENT_TYPE]):
return
url = record.fields['WARC-Target-URI']
_logger.debug('Writing CDX record {0}.', url)
http_header = record.get_http_header()
if http_header:
mime_type = self.parse_mimetype(
http_header.fields.get('Content-Type', '')
) or '-'
response_code = str(http_header.status_code)
else:
mime_type = '-'
response_code = '-'
timestamp = str(int(
wpull.util.parse_iso8601_str(record.fields[WARCRecord.WARC_DATE])
))
checksum = record.fields.get('WARC-Payload-Digest', '')
if checksum.startswith('sha1:'):
checksum = checksum.replace('sha1:', '', 1)
else:
checksum = '-'
raw_file_record_size_str = str(raw_file_record_size)
raw_file_offset_str = str(raw_file_offset)
filename = os.path.basename(self._warc_filename)
record_id = record.fields[WARCRecord.WARC_RECORD_ID]
fields_strs = (
url,
timestamp,
mime_type,
response_code,
checksum,
raw_file_record_size_str,
raw_file_offset_str,
filename,
record_id
)
with open(self._cdx_filename, mode='a', encoding='utf-8') as out_file:
out_file.write(self.CDX_DELIMINATOR.join(fields_strs))
out_file.write('\n')
@classmethod
def parse_mimetype(cls, value):
'''Return the MIME type from a Content-Type string.
Returns:
str, None: A string in the form ``type/subtype`` or None.
'''
match = re.match(r'([a-zA-Z0-9-]+/[a-zA-Z0-9-]+)', value)
if match:
return match.group(1)
|
class WARCRecorder(object):
'''Record to WARC file.
Args:
filename (str): The filename (without the extension).
params (:class:`WARCRecorderParams`): Parameters.
'''
def __init__(self, filename, params=None):
pass
def _check_journals_and_maybe_raise(self):
'''Check if any journal files exist and raise an error.'''
pass
def _start_new_warc_file(self, meta=False):
'''Create and set as current WARC file.'''
pass
def _generate_warc_filename(self, meta=False):
'''Return a suitable WARC filename.'''
pass
def _start_new_cdx_file(self):
'''Create and set current CDX file.'''
pass
def _populate_warcinfo(self, extra_fields=None):
'''Add the metadata to the Warcinfo record.'''
pass
def _setup_log(self):
'''Set up the logging file.'''
pass
def listen_to_http_client(self, client: HTTPClient):
pass
def _http_session_callback(self, http_session: HTTPSession):
pass
def new_http_recorder_session(self) -> 'HTTPWARCRecorderSession':
pass
def listen_to_ftp_client(self, client: FTPClient):
pass
def _ftp_session_callback(self, ftp_session: FTPSession):
pass
def new_ftp_recorder_session(self) -> 'FTPWARCRecorderSession':
pass
def flush_session(self):
pass
def _move_file_to_dest_dir(self, filename):
'''Move the file to the ``move_to`` directory.'''
pass
def set_length_and_maybe_checksums(self, record, payload_offset=None):
'''Set the content length and possibly the checksums.'''
pass
def write_record(self, record):
'''Append the record to the WARC file.'''
pass
def close(self):
'''Close the WARC file and clean up any logging handlers.'''
pass
def _write_cdx_header(self):
'''Write the CDX header.
It writes the fields:
1. a: original URL
2. b: UNIX timestamp
3. m: MIME Type from the HTTP Content-type
4. s: response code
5. k: new style checksum
6. S: raw file record size
7. V: offset in raw file
8. g: filename of raw file
9. u: record ID
'''
pass
def _write_cdx_field(self, record, raw_file_record_size, raw_file_offset):
'''Write the CDX field if needed.'''
pass
@classmethod
def parse_mimetype(cls, value):
'''Return the MIME type from a Content-Type string.
Returns:
str, None: A string in the form ``type/subtype`` or None.
'''
pass
| 23 | 14 | 19 | 3 | 14 | 2 | 3 | 0.13 | 1 | 14 | 6 | 0 | 20 | 8 | 21 | 21 | 427 | 83 | 306 | 69 | 283 | 39 | 207 | 62 | 185 | 7 | 1 | 3 | 54 |
6,559 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.OverwriteFileWriter
|
class OverwriteFileWriter(BaseFileWriter):
'''File writer that overwrites files.'''
@property
def session_class(self):
return OverwriteFileWriterSession
|
class OverwriteFileWriter(BaseFileWriter):
'''File writer that overwrites files.'''
@property
def session_class(self):
pass
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 25 | 5 | 0 | 4 | 3 | 1 | 1 | 3 | 2 | 1 | 1 | 5 | 0 | 1 |
6,560 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.OverwriteFileWriterSession
|
class OverwriteFileWriterSession(BaseFileWriterSession):
pass
|
class OverwriteFileWriterSession(BaseFileWriterSession):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
6,561 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/util_test.py
|
wpull.util_test.TestUtil.test_close_on_error.MyObject
|
class MyObject(object):
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def oops(self):
with close_on_error(self.close):
raise ValueError()
|
class MyObject(object):
def __init__(self):
pass
def close(self):
pass
def oops(self):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 3 | 1 | 3 | 3 | 10 | 2 | 8 | 5 | 4 | 0 | 8 | 5 | 4 | 1 | 1 | 1 | 3 |
6,562 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.SingleDocumentWriterSession
|
class SingleDocumentWriterSession(BaseWriterSession):
'''Write all data into stream.'''
def __init__(self, stream: BinaryIO, headers_included: bool):
self._stream = stream
self._headers_included = headers_included
def process_request(self, request):
return request
def process_response(self, response: BaseResponse):
if self._headers_included and isinstance(response, SerializableMixin):
self._stream.write(response.to_bytes())
if not self._stream.readable():
response.body = MuxBody(self._stream)
else:
response.body = Body(self._stream)
return response
def discard_document(self, response):
response.body.flush()
def save_document(self, response):
response.body.flush()
def extra_resource_path(self, suffix):
pass
|
class SingleDocumentWriterSession(BaseWriterSession):
'''Write all data into stream.'''
def __init__(self, stream: BinaryIO, headers_included: bool):
pass
def process_request(self, request):
pass
def process_response(self, response: BaseResponse):
pass
def discard_document(self, response):
pass
def save_document(self, response):
pass
def extra_resource_path(self, suffix):
pass
| 7 | 1 | 4 | 0 | 3 | 0 | 1 | 0.05 | 1 | 6 | 4 | 0 | 6 | 2 | 6 | 31 | 28 | 7 | 20 | 9 | 13 | 1 | 19 | 9 | 12 | 3 | 4 | 1 | 8 |
6,563 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/application/app.py
|
wpull.application.app.Application.Event
|
class Event(enum.Enum):
pipeline_begin = 'pipeline_begin'
pipeline_end = 'pipeline_end'
|
class Event(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
6,564 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/application/plugin_test.py
|
wpull.application.plugin_test.MockPlugin
|
class MockPlugin(WpullPlugin):
@hook('hook_thing')
def my_hook_callback(self):
pass
@event('event_thing')
def my_event_callback(self, data):
pass
def unrelated_function(self):
pass
|
class MockPlugin(WpullPlugin):
@hook('hook_thing')
def my_hook_callback(self):
pass
@event('event_thing')
def my_event_callback(self, data):
pass
def unrelated_function(self):
pass
| 6 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 0 | 3 | 6 | 11 | 2 | 9 | 6 | 3 | 0 | 7 | 4 | 3 | 1 | 2 | 0 | 3 |
6,565 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/application/plugin_test.py
|
wpull.application.plugin_test.TestPlugin
|
class TestPlugin(unittest.TestCase):
def test_plugin_function_discovery(self):
plugin = MockPlugin()
funcs = list(plugin.get_plugin_functions())
self.assertEqual(2, len(funcs))
self.assertIn(
PluginClientFunctionInfo(
plugin.my_event_callback, 'event_thing',
PluginFunctionCategory.event),
funcs)
self.assertIn(
PluginClientFunctionInfo(
plugin.my_hook_callback, 'hook_thing',
PluginFunctionCategory.hook),
funcs)
def test_plugin_interface_registry(self):
registry = InterfaceRegistry()
@event_interface('test_event', registry)
def event_callback(data):
pass
self.assertEqual(1, len(registry))
self.assertIn('test_event', registry)
|
class TestPlugin(unittest.TestCase):
def test_plugin_function_discovery(self):
pass
def test_plugin_interface_registry(self):
pass
@event_interface('test_event', registry)
def event_callback(data):
pass
| 5 | 0 | 9 | 1 | 8 | 0 | 1 | 0 | 1 | 4 | 3 | 0 | 2 | 0 | 2 | 74 | 26 | 4 | 22 | 8 | 17 | 0 | 13 | 7 | 9 | 1 | 2 | 0 | 3 |
6,566 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/application/plugins/server_response.plugin.py
|
wpull.application.plugins.server_response.plugin.PrintServerResponsePlugin
|
class PrintServerResponsePlugin(WpullPlugin):
def should_activate(self):
return self.app_session.args.server_response
@event(PluginFunctions.handle_pre_response)
def print_response(self, response: BaseResponse):
print(str(response))
|
class PrintServerResponsePlugin(WpullPlugin):
def should_activate(self):
pass
@event(PluginFunctions.handle_pre_response)
def print_response(self, response: BaseResponse):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 2 | 1 | 0 | 2 | 0 | 2 | 5 | 7 | 1 | 6 | 4 | 2 | 0 | 5 | 3 | 2 | 1 | 2 | 0 | 2 |
6,567 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/application/tasks/shutdown.py
|
wpull.application.tasks.shutdown.AppStopTask
|
class AppStopTask(ItemTask[AppSession], HookableMixin):
def __init__(self):
super().__init__()
self.hook_dispatcher.register(PluginFunctions.exit_status)
@asyncio.coroutine
def process(self, session: AppSession):
statistics = session.factory['Statistics']
app = session.factory['Application']
self._update_exit_code_from_stats(statistics, app)
try:
new_exit_code = self.hook_dispatcher.call(
PluginFunctions.exit_status, session, app.exit_code)
app.exit_code = new_exit_code
except HookDisconnected:
pass
@classmethod
def _update_exit_code_from_stats(cls, statistics: Statistics,
app: Application):
'''Set the current exit code based on the Statistics.'''
for error_type in statistics.errors:
exit_code = app.ERROR_CODE_MAP.get(error_type)
if exit_code:
app.update_exit_code(exit_code)
@staticmethod
@hook_interface(PluginFunctions.exit_status)
def plugin_exit_status(app_session: AppSession, exit_code: int) -> int:
'''Return the program exit status code.
Exit codes are values from :class:`errors.ExitStatus`.
Args:
exit_code: The exit code Wpull wants to return.
Returns:
int: The exit code that Wpull will return.
'''
return exit_code
|
class AppStopTask(ItemTask[AppSession], HookableMixin):
def __init__(self):
pass
@asyncio.coroutine
def process(self, session: AppSession):
pass
@classmethod
def _update_exit_code_from_stats(cls, statistics: Statistics,
app: Application):
'''Set the current exit code based on the Statistics.'''
pass
@staticmethod
@hook_interface(PluginFunctions.exit_status)
def plugin_exit_status(app_session: AppSession, exit_code: int) -> int:
'''Return the program exit status code.
Exit codes are values from :class:`errors.ExitStatus`.
Args:
exit_code: The exit code Wpull wants to return.
Returns:
int: The exit code that Wpull will return.
'''
pass
| 9 | 2 | 8 | 1 | 5 | 2 | 2 | 0.32 | 2 | 7 | 5 | 0 | 2 | 0 | 4 | 29 | 40 | 7 | 25 | 14 | 15 | 8 | 20 | 10 | 15 | 3 | 4 | 2 | 7 |
6,568 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/application/tasks/stats.py
|
wpull.application.tasks.stats.StatsStopTask
|
class StatsStopTask(ItemTask[AppSession], HookableMixin):
def __init__(self):
super().__init__()
self.event_dispatcher.register(PluginFunctions.finishing_statistics)
@asyncio.coroutine
def process(self, session: AppSession):
statistics = session.factory['Statistics']
statistics.stop()
# TODO: human_format_speed arg
self._print_stats(statistics)
self.event_dispatcher.notify(
PluginFunctions.finishing_statistics, session, statistics)
@classmethod
def _print_stats(cls, stats: Statistics, human_format_speed: bool = True):
'''Log the final statistics to the user.'''
time_length = datetime.timedelta(
seconds=int(stats.stop_time - stats.start_time)
)
file_size = wpull.string.format_size(stats.size)
if stats.bandwidth_meter.num_samples:
speed = stats.bandwidth_meter.speed()
if human_format_speed:
speed_size_str = wpull.string.format_size(speed)
else:
speed_size_str = '{:.1f} b'.format(speed * 8)
else:
speed_size_str = _('-- B')
_logger.info(_('FINISHED.'))
_logger.info(__(
_(
'Duration: {preformatted_timedelta}. '
'Speed: {preformatted_speed_size}/s.'
),
preformatted_timedelta=time_length,
preformatted_speed_size=speed_size_str,
))
_logger.info(__(
gettext.ngettext(
'Downloaded: {num_files} file, {preformatted_file_size}.',
'Downloaded: {num_files} files, {preformatted_file_size}.',
stats.files
),
num_files=stats.files,
preformatted_file_size=file_size
))
if stats.is_quota_exceeded:
_logger.info(_('Download quota exceeded.'))
@staticmethod
@event_interface(PluginFunctions.finishing_statistics)
def plugin_finishing_statistics(app_session: AppSession, statistics: Statistics):
'''Callback containing final statistics.
Args:
start_time (float): timestamp when the engine started
end_time (float): timestamp when the engine stopped
num_urls (int): number of URLs downloaded
bytes_downloaded (int): size of files downloaded in bytes
'''
|
class StatsStopTask(ItemTask[AppSession], HookableMixin):
def __init__(self):
pass
@asyncio.coroutine
def process(self, session: AppSession):
pass
@classmethod
def _print_stats(cls, stats: Statistics, human_format_speed: bool = True):
'''Log the final statistics to the user.'''
pass
@staticmethod
@event_interface(PluginFunctions.finishing_statistics)
def plugin_finishing_statistics(app_session: AppSession, statistics: Statistics):
'''Callback containing final statistics.
Args:
start_time (float): timestamp when the engine started
end_time (float): timestamp when the engine stopped
num_urls (int): number of URLs downloaded
bytes_downloaded (int): size of files downloaded in bytes
'''
pass
| 9 | 2 | 15 | 2 | 11 | 2 | 2 | 0.19 | 2 | 7 | 3 | 0 | 2 | 0 | 4 | 29 | 66 | 10 | 47 | 13 | 38 | 9 | 24 | 10 | 19 | 4 | 4 | 2 | 7 |
6,569 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/database/wrap.py
|
wpull.database.wrap.URLTableHookWrapper
|
class URLTableHookWrapper(BaseURLTable, HookableMixin):
'''URL table wrapper with scripting hooks.
Args:
url_table: URL table.
Attributes:
url_table: URL table.
'''
def __init__(self, url_table):
super().__init__()
self.url_table = url_table
self._queue_counter = 0
self.event_dispatcher.register(PluginFunctions.queued_url)
self.event_dispatcher.register(PluginFunctions.dequeued_url)
def queue_count(self):
'''Return the number of URLs queued in this session.'''
return self._queue_counter
def count(self):
return self.url_table.count()
def get_one(self, url):
return self.url_table.get_one(url)
def get_all(self):
return self.url_table.get_all()
def add_many(self, urls):
added_urls = tuple(self.url_table.add_many(urls))
for url in added_urls:
url_info = parse_url_or_log(url)
if url_info:
self._queue_counter += 1
self.event_dispatcher.notify(
PluginFunctions.queued_url, url_info)
return added_urls
def check_out(self, filter_status, filter_level=None):
url_record = self.url_table.check_out(filter_status, filter_level)
self._queue_counter -= 1
self.event_dispatcher.notify(
PluginFunctions.dequeued_url, url_record.url_info, url_record)
return url_record
def check_in(self, url, new_status, increment_try_count=True,
url_result=None):
if new_status == Status.error:
self._queue_counter += 1
url_info = parse_url_or_log(url)
if url_info:
self.event_dispatcher.notify(
PluginFunctions.queued_url, url_info)
return self.url_table.check_in(url, new_status, increment_try_count=increment_try_count, url_result=url_result)
def update_one(self, *args, **kwargs):
return self.url_table.update_one(*args, **kwargs)
def release(self):
return self.url_table.release()
def remove_many(self, urls):
return self.url_table.remove_many(urls)
def close(self):
return self.url_table.close()
def add_visits(self, visits):
return self.url_table.add_visits(visits)
def get_revisit_id(self, url, payload_digest):
return self.url_table.get_revisit_id(url, payload_digest)
def get_hostnames(self):
return self.url_table.get_hostnames()
@staticmethod
@event_interface(PluginFunctions.queued_url)
def queued_url(url_info: URLInfo):
'''Callback fired after an URL was put into the queue.
'''
@staticmethod
@event_interface(PluginFunctions.dequeued_url)
def dequeued_url(url_info: URLInfo, record_info: URLRecord):
'''Callback fired after an URL was retrieved from the queue.
'''
def get_root_url_todo_count(self):
return self.url_table.get_root_url_todo_count()
def convert_check_out(self):
return self.url_table.convert_check_out()
def convert_check_in(self, file_id: int, status: Status):
self.url_table.convert_check_in(file_id, status)
|
class URLTableHookWrapper(BaseURLTable, HookableMixin):
'''URL table wrapper with scripting hooks.
Args:
url_table: URL table.
Attributes:
url_table: URL table.
'''
def __init__(self, url_table):
pass
def queue_count(self):
'''Return the number of URLs queued in this session.'''
pass
def count(self):
pass
def get_one(self, url):
pass
def get_all(self):
pass
def add_many(self, urls):
pass
def check_out(self, filter_status, filter_level=None):
pass
def check_in(self, url, new_status, increment_try_count=True,
url_result=None):
pass
def update_one(self, *args, **kwargs):
pass
def release(self):
pass
def remove_many(self, urls):
pass
def close(self):
pass
def add_visits(self, visits):
pass
def get_revisit_id(self, url, payload_digest):
pass
def get_hostnames(self):
pass
@staticmethod
@event_interface(PluginFunctions.queued_url)
def queued_url(url_info: URLInfo):
'''Callback fired after an URL was put into the queue.
'''
pass
@staticmethod
@event_interface(PluginFunctions.dequeued_url)
def dequeued_url(url_info: URLInfo, record_info: URLRecord):
'''Callback fired after an URL was retrieved from the queue.
'''
pass
def get_root_url_todo_count(self):
pass
def convert_check_out(self):
pass
def convert_check_in(self, file_id: int, status: Status):
pass
| 25 | 4 | 3 | 0 | 3 | 0 | 1 | 0.18 | 2 | 7 | 4 | 0 | 18 | 2 | 20 | 61 | 102 | 29 | 62 | 31 | 36 | 11 | 57 | 28 | 36 | 3 | 4 | 2 | 24 |
6,570 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/network/dns.py
|
wpull.network.dns.Resolver
|
class Resolver(HookableMixin):
'''Asynchronous resolver with cache and timeout.
Args:
family: IPv4 or IPv6 preference.
timeout: A time in seconds used for timing-out requests. If not
specified, this class relies on the underlying libraries.
bind_address: An IP address to bind DNS requests if possible.
cache: Cache to store results of any query.
rotate: If result is cached rotates the results, otherwise, shuffle
the results.
'''
def __init__(
self,
family: IPFamilyPreference = IPFamilyPreference.any,
timeout: Optional[float] = None,
bind_address: Optional[str] = None,
cache: Optional[FIFOCache] = None,
rotate: bool = False):
super().__init__()
assert family in IPFamilyPreference, \
'Unknown family {}.'.format(family)
self._family = family
self._timeout = timeout
self._bind_address = bind_address
self._cache = cache
self._rotate = rotate
self._dns_resolver = dns.resolver.Resolver()
self.dns_python_enabled = True
if timeout:
self._dns_resolver.timeout = timeout
self.hook_dispatcher.register(PluginFunctions.resolve_dns)
self.event_dispatcher.register(PluginFunctions.resolve_dns_result)
@classmethod
def new_cache(cls) -> FIFOCache:
'''Return a default cache'''
return FIFOCache(max_items=100, time_to_live=3600)
@asyncio.coroutine
def resolve(self, host: str) -> ResolveResult:
'''Resolve hostname.
Args:
host: Hostname.
Returns:
Resolved IP addresses.
Raises:
DNSNotFound if the hostname could not be resolved or
NetworkError if there was an error connecting to DNS servers.
Coroutine.
'''
_logger.debug(__('Lookup address {0}.', host))
try:
host = self.hook_dispatcher.call(PluginFunctions.resolve_dns, host
) or host
except HookDisconnected:
pass
cache_key = (host, self._family)
if self._cache and cache_key in self._cache:
resolve_result = self._cache[cache_key]
_logger.debug(__('Return by cache {0}.', resolve_result))
if self._rotate:
resolve_result.rotate()
return resolve_result
address_infos = []
dns_infos = []
if not self.dns_python_enabled:
families = ()
elif self._family == IPFamilyPreference.any:
families = (socket.AF_INET, socket.AF_INET6)
elif self._family == IPFamilyPreference.ipv4_only:
families = (socket.AF_INET, )
else:
families = (socket.AF_INET6, )
for family in families:
datetime_now = datetime.datetime.utcnow()
try:
answer = yield from self._query_dns(host, family)
except DNSNotFound:
continue
else:
dns_infos.append(DNSInfo(datetime_now, answer.response.answer))
address_infos.extend(self._convert_dns_answer(answer))
if not address_infos:
# Maybe the address is defined in hosts file or mDNS
if self._family == IPFamilyPreference.any:
family = socket.AF_UNSPEC
elif self._family == IPFamilyPreference.ipv4_only:
family = socket.AF_INET
else:
family = socket.AF_INET6
results = yield from self._getaddrinfo(host, family)
address_infos.extend(self._convert_addrinfo(results))
_logger.debug(__('Resolved addresses: {0}.', address_infos))
resolve_result = ResolveResult(address_infos, dns_infos)
if self._cache:
self._cache[cache_key] = resolve_result
self.event_dispatcher.notify(
PluginFunctions.resolve_dns_result, host, resolve_result)
if self._rotate:
resolve_result.shuffle()
return resolve_result
@asyncio.coroutine
def _query_dns(self, host: str, family: int = socket.AF_INET) \
-> dns.resolver.Answer:
'''Query DNS using Python.
Coroutine.
'''
record_type = {socket.AF_INET: 'A', socket.AF_INET6: 'AAAA'}[family]
event_loop = asyncio.get_event_loop()
query = functools.partial(
self._dns_resolver.query, host, record_type,
source=self._bind_address)
try:
answer = yield from event_loop.run_in_executor(None, query)
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as error:
# dnspython doesn't raise an instance with a message, so use the
# class name instead.
raise DNSNotFound(
'DNS resolution failed: {error}'
.format(error=wpull.util.get_exception_message(error))
) from error
except dns.exception.DNSException as error:
raise NetworkError(
'DNS resolution error: {error}'
.format(error=wpull.util.get_exception_message(error))
) from error
else:
return answer
@asyncio.coroutine
def _getaddrinfo(self, host: str, family: int = socket.AF_UNSPEC) \
-> List[tuple]:
'''Query DNS using system resolver.
Coroutine.
'''
event_loop = asyncio.get_event_loop()
query = event_loop.getaddrinfo(host, 0, family=family,
proto=socket.IPPROTO_TCP)
if self._timeout:
query = asyncio.wait_for(query, self._timeout)
try:
results = yield from query
except socket.error as error:
if error.errno in (
socket.EAI_FAIL,
socket.EAI_NODATA,
socket.EAI_NONAME):
raise DNSNotFound(
'DNS resolution failed: {error}'.format(error=error)
) from error
else:
raise NetworkError(
'DNS resolution error: {error}'.format(error=error)
) from error
except asyncio.TimeoutError as error:
raise NetworkError('DNS resolve timed out.') from error
else:
return results
@classmethod
def _convert_dns_answer(cls, answer: dns.resolver.Answer) \
-> Iterable[AddressInfo]:
'''Convert the DNS answer to address info.'''
assert answer.rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA)
if answer.rdtype == dns.rdatatype.A:
family = socket.AF_INET
else:
family = socket.AF_INET6
for record in answer:
ip_address = record.to_text()
if family == socket.AF_INET6:
flow_info, control_id = cls._get_ipv6_info(ip_address)
else:
flow_info = control_id = None
yield AddressInfo(ip_address, family, flow_info, control_id)
@classmethod
def _convert_addrinfo(cls, results: List[tuple]) -> Iterable[AddressInfo]:
'''Convert the result list to address info.'''
for result in results:
family = result[0]
address = result[4]
ip_address = address[0]
if family == socket.AF_INET6:
flow_info = address[2]
control_id = address[3]
else:
flow_info = None
control_id = None
yield AddressInfo(ip_address, family, flow_info, control_id)
@classmethod
def _get_ipv6_info(cls, ip_address: str) -> tuple:
'''Extract the flow info and control id.'''
results = socket.getaddrinfo(
ip_address, 0, proto=socket.IPPROTO_TCP,
flags=socket.AI_NUMERICHOST)
flow_info = results[0][4][2]
control_id = results[0][4][3]
return flow_info, control_id
@staticmethod
@hook_interface(PluginFunctions.resolve_dns)
def resolve_dns(host: str) -> str:
'''Resolve the hostname to an IP address.
Args:
host: The hostname.
This callback is to override the DNS lookup.
It is useful when the server is no longer available to the public.
Typically, large infrastructures will change the DNS settings to
make clients no longer hit the front-ends, but rather go towards
a static HTTP server with a "We've been acqui-hired!" page. In these
cases, the original servers may still be online.
Returns:
str, None: ``None`` to use the original behavior or a string
containing an IP address or an alternate hostname.
'''
return host
@staticmethod
@event_interface(PluginFunctions.resolve_dns_result)
def resolve_dns_result(host: str, result: ResolveResult):
'''Callback when a DNS resolution has been made.'''
|
class Resolver(HookableMixin):
'''Asynchronous resolver with cache and timeout.
Args:
family: IPv4 or IPv6 preference.
timeout: A time in seconds used for timing-out requests. If not
specified, this class relies on the underlying libraries.
bind_address: An IP address to bind DNS requests if possible.
cache: Cache to store results of any query.
rotate: If result is cached rotates the results, otherwise, shuffle
the results.
'''
def __init__(
self,
family: IPFamilyPreference = IPFamilyPreference.any,
timeout: Optional[float] = None,
bind_address: Optional[str] = None,
cache: Optional[FIFOCache] = None,
rotate: bool = False):
pass
@classmethod
def new_cache(cls) -> FIFOCache:
'''Return a default cache'''
pass
@asyncio.coroutine
def resolve(self, host: str) -> ResolveResult:
'''Resolve hostname.
Args:
host: Hostname.
Returns:
Resolved IP addresses.
Raises:
DNSNotFound if the hostname could not be resolved or
NetworkError if there was an error connecting to DNS servers.
Coroutine.
'''
pass
@asyncio.coroutine
def _query_dns(self, host: str, family: int = socket.AF_INET) \
-> dns.resolver.Answer:
'''Query DNS using Python.
Coroutine.
'''
pass
@asyncio.coroutine
def _getaddrinfo(self, host: str, family: int = socket.AF_UNSPEC) \
-> List[tuple]:
'''Query DNS using system resolver.
Coroutine.
'''
pass
@classmethod
def _convert_dns_answer(cls, answer: dns.resolver.Answer) \
-> Iterable[AddressInfo]:
'''Convert the DNS answer to address info.'''
pass
@classmethod
def _convert_addrinfo(cls, results: List[tuple]) -> Iterable[AddressInfo]:
'''Convert the result list to address info.'''
pass
@classmethod
def _get_ipv6_info(cls, ip_address: str) -> tuple:
'''Extract the flow info and control id.'''
pass
@staticmethod
@hook_interface(PluginFunctions.resolve_dns)
def resolve_dns(host: str) -> str:
'''Resolve the hostname to an IP address.
Args:
host: The hostname.
This callback is to override the DNS lookup.
It is useful when the server is no longer available to the public.
Typically, large infrastructures will change the DNS settings to
make clients no longer hit the front-ends, but rather go towards
a static HTTP server with a "We've been acqui-hired!" page. In these
cases, the original servers may still be online.
Returns:
str, None: ``None`` to use the original behavior or a string
containing an IP address or an alternate hostname.
'''
pass
@staticmethod
@event_interface(PluginFunctions.resolve_dns_result)
def resolve_dns_result(host: str, result: ResolveResult):
'''Callback when a DNS resolution has been made.'''
pass
| 22 | 10 | 24 | 5 | 16 | 4 | 4 | 0.28 | 1 | 17 | 8 | 1 | 4 | 7 | 10 | 12 | 270 | 56 | 167 | 67 | 136 | 47 | 118 | 47 | 107 | 14 | 2 | 2 | 35 |
6,571 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/pipeline/progress.py
|
wpull.pipeline.progress.ProtocolProgress.State
|
class State(enum.Enum):
idle = 'idle'
sending_request = 'sending_request'
sending_body = 'sending_body'
receiving_response = 'receiving_response'
receiving_body = 'receiving_body'
|
class State(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 6 | 0 | 6 | 6 | 5 | 0 | 6 | 6 | 5 | 0 | 4 | 0 | 0 |
6,572 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/processor/rule.py
|
wpull.processor.rule.FetchRule
|
class FetchRule(HookableMixin):
'''Decide on what URLs should be fetched.'''
def __init__(self, url_filter: DemuxURLFilter = None,
robots_txt_checker: RobotsTxtChecker = None,
http_login: Optional[Tuple[str, str]] = None,
ftp_login: Optional[Tuple[str, str]] = None,
duration_timeout: Optional[int] = None):
super().__init__()
self._url_filter = url_filter
self._robots_txt_checker = robots_txt_checker
self.http_login = http_login
self.ftp_login = ftp_login
self.duration_timeout = duration_timeout
self.hook_dispatcher.register(PluginFunctions.accept_url)
@asyncio.coroutine
def consult_robots_txt(self, request: HTTPRequest) -> bool:
'''Consult by fetching robots.txt as needed.
Args:
request: The request to be made
to get the file.
Returns:
True if can fetch
Coroutine
'''
if not self._robots_txt_checker:
return True
result = yield from self._robots_txt_checker.can_fetch(request)
return result
def consult_helix_fossil(self) -> bool:
'''Consult the helix fossil.
Returns:
True if can fetch
'''
return random.random() < 0.75
def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool = False) \
-> Tuple[bool, str, dict]:
'''Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, filters, redirect
3. dict: The result from :func:`DemuxURLFilter.test_info`
'''
if not self._url_filter:
return True, 'nofilters', None
test_info = self._url_filter.test_info(url_info, url_record)
verdict = test_info['verdict']
if verdict:
reason = 'filters'
elif is_redirect and self.is_only_span_hosts_failed(test_info):
verdict = True
reason = 'redirect'
else:
reason = 'filters'
return verdict, reason, test_info
@classmethod
def is_only_span_hosts_failed(cls, test_info: dict) -> bool:
'''Return whether only the SpanHostsFilter failed.'''
return (
len(test_info['failed']) == 1 and
'SpanHostsFilter' in test_info['map'] and
not test_info['map']['SpanHostsFilter']
)
def consult_hook(self, item_session: ItemSession, verdict: bool,
reason: str, test_info: dict):
'''Consult the scripting hook.
Returns:
tuple: (bool, str)
'''
try:
reasons = {
'filters': test_info['map'],
'reason': reason,
}
verdict = self.hook_dispatcher.call(
PluginFunctions.accept_url, item_session, verdict, reasons,
)
reason = 'callback_hook'
except HookDisconnected:
pass
return verdict, reason
@staticmethod
@hook_interface(PluginFunctions.accept_url)
def plugin_accept_url(item_session: ItemSession, verdict: bool, reasons: dict) -> bool:
'''Return whether to download this URL.
Args:
item_session: Current URL item.
verdict: A bool indicating whether Wpull wants to download
the URL.
reasons: A dict containing information for the verdict:
* ``filters`` (dict): A mapping (str to bool) from filter name
to whether the filter passed or not.
* ``reason`` (str): A short reason string. Current values are:
``filters``, ``robots``, ``redirect``.
Returns:
If ``True``, the URL should be downloaded. Otherwise, the URL
is skipped.
'''
return verdict
@asyncio.coroutine
def check_initial_web_request(self, item_session: ItemSession, request: HTTPRequest) -> Tuple[bool, str]:
'''Check robots.txt, URL filters, and scripting hook.
Returns:
tuple: (bool, str)
Coroutine.
'''
verdict, reason, test_info = self.consult_filters(
item_session.request.url_info, item_session.url_record)
if verdict and self._robots_txt_checker:
can_fetch = yield from self.consult_robots_txt(request)
if not can_fetch:
verdict = False
reason = 'robotstxt'
verdict, reason = self.consult_hook(
item_session, verdict, reason, test_info
)
return verdict, reason
def check_subsequent_web_request(self, item_session: ItemSession,
is_redirect: bool = False) -> Tuple[bool, str]:
'''Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
'''
verdict, reason, test_info = self.consult_filters(
item_session.request.url_info,
item_session.url_record, is_redirect=is_redirect)
# TODO: provide an option to change this
if item_session.is_virtual:
verdict = True
verdict, reason = self.consult_hook(item_session, verdict,
reason, test_info)
return verdict, reason
def check_generic_request(self, item_session: ItemSession) -> Tuple[bool, str]:
'''Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
'''
verdict, reason, test_info = self.consult_filters(
item_session.request.url_info,
item_session.url_record)
verdict, reason = self.consult_hook(item_session, verdict,
reason, test_info)
return verdict, reason
check_ftp_request = check_generic_request
|
class FetchRule(HookableMixin):
'''Decide on what URLs should be fetched.'''
def __init__(self, url_filter: DemuxURLFilter = None,
robots_txt_checker: RobotsTxtChecker = None,
http_login: Optional[Tuple[str, str]] = None,
ftp_login: Optional[Tuple[str, str]] = None,
duration_timeout: Optional[int] = None):
pass
@asyncio.coroutine
def consult_robots_txt(self, request: HTTPRequest) -> bool:
'''Consult by fetching robots.txt as needed.
Args:
request: The request to be made
to get the file.
Returns:
True if can fetch
Coroutine
'''
pass
def consult_helix_fossil(self) -> bool:
'''Consult the helix fossil.
Returns:
True if can fetch
'''
pass
def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool = False) \
-> Tuple[bool, str, dict]:
'''Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, filters, redirect
3. dict: The result from :func:`DemuxURLFilter.test_info`
'''
pass
@classmethod
def is_only_span_hosts_failed(cls, test_info: dict) -> bool:
'''Return whether only the SpanHostsFilter failed.'''
pass
def consult_hook(self, item_session: ItemSession, verdict: bool,
reason: str, test_info: dict):
'''Consult the scripting hook.
Returns:
tuple: (bool, str)
'''
pass
@staticmethod
@hook_interface(PluginFunctions.accept_url)
def plugin_accept_url(item_session: ItemSession, verdict: bool, reasons: dict) -> bool:
'''Return whether to download this URL.
Args:
item_session: Current URL item.
verdict: A bool indicating whether Wpull wants to download
the URL.
reasons: A dict containing information for the verdict:
* ``filters`` (dict): A mapping (str to bool) from filter name
to whether the filter passed or not.
* ``reason`` (str): A short reason string. Current values are:
``filters``, ``robots``, ``redirect``.
Returns:
If ``True``, the URL should be downloaded. Otherwise, the URL
is skipped.
'''
pass
@asyncio.coroutine
def check_initial_web_request(self, item_session: ItemSession, request: HTTPRequest) -> Tuple[bool, str]:
'''Check robots.txt, URL filters, and scripting hook.
Returns:
tuple: (bool, str)
Coroutine.
'''
pass
def check_subsequent_web_request(self, item_session: ItemSession,
is_redirect: bool = False) -> Tuple[bool, str]:
'''Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
'''
pass
def check_generic_request(self, item_session: ItemSession) -> Tuple[bool, str]:
'''Check URL filters and scripting hook.
Returns:
tuple: (bool, str)
'''
pass
| 16 | 10 | 17 | 3 | 8 | 6 | 2 | 0.63 | 1 | 12 | 7 | 0 | 8 | 5 | 10 | 12 | 190 | 43 | 90 | 37 | 67 | 57 | 59 | 26 | 48 | 4 | 2 | 2 | 18 |
6,573 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/urlfilter_test.py
|
wpull.urlfilter_test.TestURLFilter
|
class TestURLFilter(unittest.TestCase):
def test_scheme_filter(self):
record = URLRecord()
url_filter = SchemeFilter()
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.net'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('https://example.net'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('ftp://example.net'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('mailto:user@example.com'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse("javascript:alert('hello!')"),
record
))
def test_https_filter(self):
record= URLRecord()
url_filter = HTTPSOnlyFilter()
self.assertFalse(url_filter.test(
URLInfo.parse('http://example.net'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('https://example.net'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('mailto:user@example.com'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse("javascript:alert('hello!')"),
record
))
def test_follow_ftp_filter(self):
record = URLRecord()
url_filter = FollowFTPFilter()
self.assertTrue(url_filter.test(
URLInfo.parse('http://wolf.farts/1'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('https://wolf.farts/1'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('mailto:wolf@wolf.farts'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('ftp://wolf.farts/'),
record
))
record.parent_url = 'http://wolf.farts'
self.assertTrue(url_filter.test(
URLInfo.parse('http://wolf.farts/1'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('https://wolf.farts/1'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('mailto:wolf@wolf.farts'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('ftp://wolf.farts/'),
record
))
url_filter = FollowFTPFilter(follow=True)
self.assertTrue(url_filter.test(
URLInfo.parse('http://wolf.farts/1'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('https://wolf.farts/1'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('mailto:wolf@wolf.farts'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('ftp://wolf.farts/'),
record
))
record.parent_url = 'ftp://wolf.farts'
self.assertTrue(url_filter.test(
URLInfo.parse('http://wolf.farts/1'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('https://wolf.farts/1'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('mailto:wolf@wolf.farts'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('ftp://wolf.farts/'),
record
))
url_filter = FollowFTPFilter(follow=True)
self.assertTrue(url_filter.test(
URLInfo.parse('http://wolf.farts/1'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('https://wolf.farts/1'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('mailto:wolf@wolf.farts'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('ftp://wolf.farts/'),
record
))
def test_wget_domain_filter(self):
url_filter = BackwardDomainFilter(
accepted=['g.example.com', 'cdn.example.com', 'cdn.test'])
self.assertTrue(
url_filter.test(URLInfo.parse('g.example.com'), None))
self.assertTrue(
url_filter.test(URLInfo.parse('blog.example.com'), None))
self.assertTrue(
url_filter.test(URLInfo.parse('cdn.example.com'), None))
self.assertTrue(
url_filter.test(URLInfo.parse('server1.cdn.test'), None))
self.assertFalse(
url_filter.test(URLInfo.parse('example.com'), None))
self.assertFalse(
url_filter.test(URLInfo.parse("javascript:alert('hello!')"), None))
url_filter = BackwardDomainFilter(
accepted=['g.example.com', 'cdn.example.com', 'cdn.test'],
rejected=['blog.example.com']
)
self.assertTrue(
url_filter.test(URLInfo.parse('g.example.com'), None))
self.assertFalse(
url_filter.test(URLInfo.parse('blog.example.com'), None))
self.assertTrue(
url_filter.test(URLInfo.parse('cdn.example.com'), None))
self.assertTrue(
url_filter.test(URLInfo.parse('server1.cdn.test'), None))
self.assertFalse(
url_filter.test(URLInfo.parse('example.com'), None))
def test_hostname_filter(self):
url_filter = HostnameFilter(
accepted=['g.example.com', 'cdn.example.com', 'cdn.test'])
self.assertTrue(
url_filter.test(URLInfo.parse('g.example.com'), None))
self.assertFalse(
url_filter.test(URLInfo.parse('blog.example.com'), None))
self.assertTrue(
url_filter.test(URLInfo.parse('cdn.example.com'), None))
self.assertFalse(
url_filter.test(URLInfo.parse('server1.cdn.test'), None))
self.assertFalse(
url_filter.test(URLInfo.parse('example.com'), None))
self.assertFalse(
url_filter.test(URLInfo.parse("javascript:alert('hello!')"), None))
url_filter = HostnameFilter(
accepted=['g.example.com', 'cdn.example.com', 'cdn.test'],
rejected=['blog.example.com']
)
self.assertTrue(
url_filter.test(URLInfo.parse('g.example.com'), None))
self.assertFalse(
url_filter.test(URLInfo.parse('blog.example.com'), None))
self.assertTrue(
url_filter.test(URLInfo.parse('cdn.example.com'), None))
self.assertFalse(
url_filter.test(URLInfo.parse('server1.cdn.test'), None))
self.assertFalse(
url_filter.test(URLInfo.parse('example.com'), None))
def test_recursive_filter_off(self):
record = URLRecord()
record.level = 0
url_filter = RecursiveFilter()
self.assertTrue(url_filter.test(None, record))
record.level = 1
self.assertFalse(url_filter.test(None, record))
def test_recursive_filter_on(self):
record = URLRecord()
record.level = 0
url_filter = RecursiveFilter(enabled=True)
self.assertTrue(url_filter.test(None, record))
record.level = 1
self.assertTrue(url_filter.test(None, record))
def test_recursive_filter_requisites(self):
record = URLRecord()
record.level = 0
record.inline_level = 1
url_filter = RecursiveFilter(page_requisites=True)
self.assertTrue(url_filter.test(None, record))
def test_level_filter(self):
record = URLRecord()
record.level = 4
url_filter = LevelFilter(0)
self.assertTrue(url_filter.test(None, record))
url_filter = LevelFilter(5)
record.level = 5
self.assertTrue(url_filter.test(None, record))
record.level = 6
self.assertFalse(url_filter.test(None, record))
url_filter = LevelFilter(5)
record.inline_level = 1
record.level = 5
self.assertTrue(url_filter.test(None, record))
record.level = 6
self.assertTrue(url_filter.test(None, record))
record.level = 7
self.assertTrue(url_filter.test(None, record))
record.level = 8
self.assertFalse(url_filter.test(None, record))
url_filter = LevelFilter(0)
record.inline_level = 1
self.assertTrue(url_filter.test(None, record))
record.inline_level = 2
self.assertTrue(url_filter.test(None, record))
record.inline_level = 3
self.assertTrue(url_filter.test(None, record))
record.inline_level = 4
self.assertTrue(url_filter.test(None, record))
record.inline_level = 5
self.assertTrue(url_filter.test(None, record))
record.inline_level = 6
self.assertFalse(url_filter.test(None, record))
record.level = 1
url_filter = LevelFilter(0, inline_max_depth=0)
record.inline_level = 1000
self.assertTrue(url_filter.test(None, record))
url_filter = LevelFilter(5, inline_max_depth=1)
record.inline_level = 1
self.assertTrue(url_filter.test(None, record))
record.inline_level = 2
self.assertFalse(url_filter.test(None, record))
def test_tries_filter(self):
record = URLRecord()
record.try_count = 4
url_filter = TriesFilter(0)
self.assertTrue(url_filter.test(None, record))
url_filter = TriesFilter(5)
record.try_count = 4
self.assertTrue(url_filter.test(None, record))
record.try_count = 5
self.assertFalse(url_filter.test(None, record))
def test_parent_filter(self):
record = URLRecord()
url_filter = ParentFilter()
record.root_url = 'http://example.com/blog/topic2/'
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.com/blog/topic2/'),
record
))
record.root_url = 'http://example.com/blog/topic1/'
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.com/blog/topic1/blah.html'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('https://example.com/blog/topic1/blah2.html'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('http://example.com/blog/'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('https://example.com/blog/'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('http://somewhere.com/'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('https://somewhere.com/'),
record
))
record.inline_level = 1
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.com/styles.css'),
record
))
def test_span_hosts_filter(self):
record = URLRecord()
record.url = 'http://example.com'
url_filter = SpanHostsFilter([
URLInfo.parse('http://example.com/blog/').hostname,
],
enabled=False
)
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.com/blog/topic1/blah.html'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('http://hotdog.example/blog/topic1/blah.html'),
record
))
url_filter = SpanHostsFilter([
URLInfo.parse('http://example.com/blog/').hostname,
],
enabled=True
)
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.com/blog/topic1/blah.html'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('http://hotdog.example/blog/topic1/blah.html'),
record
))
url_filter = SpanHostsFilter([
URLInfo.parse('http://example.com/blog/').hostname,
],
page_requisites=True
)
record = URLRecord()
record.url = 'http://1.example.com/'
record.inline_level = 1
self.assertTrue(url_filter.test(
URLInfo.parse('http://1.example.com/'),
record
))
url_filter = SpanHostsFilter([
URLInfo.parse('http://example.com/blog/').hostname,
],
linked_pages=True,
)
record = URLRecord()
record.url = 'http://1.example.com/'
record.parent_url = 'http://example.com/blog/'
self.assertTrue(url_filter.test(
URLInfo.parse('http://1.example.com/'),
record
))
record = URLRecord()
record.url = 'http://1.example.com/blah.html'
record.parent_url = 'http://1.example.com/'
self.assertFalse(url_filter.test(
URLInfo.parse('http://1.example.com/blah.html'),
record
))
def test_regex_filter(self):
record = URLRecord()
record.url = 'http://example.com/blog/'
url_filter = RegexFilter()
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.net'),
record
))
url_filter = RegexFilter(accepted=r'blo[a-z]/$')
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.net/blob/'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('http://example.net/blob/123'),
record
))
url_filter = RegexFilter(rejected=r'\.gif$')
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.net/blob/'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('http://example.net/blob/123.gif'),
record
))
def test_directory_filter(self):
record = URLRecord()
record.url = 'http://example.com/blog/'
url_filter = DirectoryFilter()
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.com'),
record
))
url_filter = DirectoryFilter(accepted=['/blog'])
self.assertFalse(url_filter.test(
URLInfo.parse('http://example.com'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.com/blog/'),
record
))
url_filter = DirectoryFilter(rejected=['/cgi-bin/'])
self.assertTrue(url_filter.test(
URLInfo.parse('http://example.com/blog/'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('http://example.com/cgi-bin'),
record
))
def test_backward_filename_filter(self):
url_filter = BackwardFilenameFilter(
accepted=['html', 'image.*.png'],
rejected=['bmp', 'jp[eg]', 'image.123.png']
)
record = URLRecord()
record.url = 'http://example.com/'
self.assertTrue(url_filter.test(
URLInfo.parse('http://example/index.html'),
record
))
self.assertTrue(url_filter.test(
URLInfo.parse('http://example/myimage.1003.png'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('http://example/myimage.123.png'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('http://example/blah.png'),
record
))
self.assertFalse(url_filter.test(
URLInfo.parse('http://example/image.1003.png.bmp'),
record
))
|
class TestURLFilter(unittest.TestCase):
def test_scheme_filter(self):
pass
def test_https_filter(self):
pass
def test_follow_ftp_filter(self):
pass
def test_wget_domain_filter(self):
pass
def test_hostname_filter(self):
pass
def test_recursive_filter_off(self):
pass
def test_recursive_filter_on(self):
pass
def test_recursive_filter_requisites(self):
pass
def test_level_filter(self):
pass
def test_tries_filter(self):
pass
def test_parent_filter(self):
pass
def test_span_hosts_filter(self):
pass
def test_regex_filter(self):
pass
def test_directory_filter(self):
pass
def test_backward_filename_filter(self):
pass
| 16 | 0 | 33 | 3 | 29 | 0 | 1 | 0 | 1 | 15 | 15 | 0 | 15 | 0 | 15 | 87 | 503 | 66 | 437 | 44 | 421 | 0 | 211 | 44 | 195 | 1 | 2 | 0 | 15 |
6,574 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.SingleDocumentWriter
|
class SingleDocumentWriter(BaseWriter):
'''Writer that writes all the data into a single file.'''
def __init__(self, stream: BinaryIO, headers_included: bool=False):
self._stream = stream
self._headers_included = headers_included
def session(self) -> SingleDocumentWriterSession:
return SingleDocumentWriterSession(self._stream, self._headers_included)
|
class SingleDocumentWriter(BaseWriter):
'''Writer that writes all the data into a single file.'''
def __init__(self, stream: BinaryIO, headers_included: bool=False):
pass
def session(self) -> SingleDocumentWriterSession:
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 1 | 0.17 | 1 | 3 | 1 | 0 | 2 | 2 | 2 | 23 | 8 | 1 | 6 | 5 | 3 | 1 | 6 | 5 | 3 | 1 | 4 | 0 | 2 |
6,575 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/processor/rule.py
|
wpull.processor.rule.ProcessingRule
|
class ProcessingRule(HookableMixin):
'''Document processing rules.
Args:
fetch_rule: The FetchRule instance.
document_scraper: The document
scraper.
'''
def __init__(self, fetch_rule: FetchRule,
document_scraper: DemuxDocumentScraper = None,
sitemaps: bool = False,
url_rewriter: URLRewriter = None):
super().__init__()
self._fetch_rule = fetch_rule
self._document_scraper = document_scraper
self._sitemaps = sitemaps
self._url_rewriter = url_rewriter
self.event_dispatcher.register(PluginFunctions.get_urls)
parse_url = staticmethod(wpull.url.parse_url_or_log)
def add_extra_urls(self, item_session: ItemSession):
'''Add additional URLs such as robots.txt, favicon.ico.'''
if item_session.url_record.level == 0 and self._sitemaps:
extra_url_infos = (
self.parse_url(
'{0}://{1}/robots.txt'.format(
item_session.url_record.url_info.scheme,
item_session.url_record.url_info.hostname_with_port)
),
self.parse_url(
'{0}://{1}/sitemap.xml'.format(
item_session.url_record.url_info.scheme,
item_session.url_record.url_info.hostname_with_port)
)
)
for url_info in extra_url_infos:
item_session.add_child_url(url_info.url)
def scrape_document(self, item_session: ItemSession):
'''Process document for links.'''
self.event_dispatcher.notify(
PluginFunctions.get_urls, item_session
)
if not self._document_scraper:
return
demux_info = self._document_scraper.scrape_info(
item_session.request, item_session.response,
item_session.url_record.link_type
)
num_inline_urls = 0
num_linked_urls = 0
for scraper, scrape_result in demux_info.items():
new_inline, new_linked = self._process_scrape_info(
scraper, scrape_result, item_session
)
num_inline_urls += new_inline
num_linked_urls += new_linked
_logger.debug('Candidate URLs: inline={0} linked={1}',
num_inline_urls, num_linked_urls
)
@staticmethod
@event_interface(PluginFunctions.get_urls)
def plugin_get_urls(item_session: ItemSession):
'''Add additional URLs to be added to the URL Table.
When this event is dispatched, the caller should add any URLs needed
using :meth:`.ItemSession.add_child_url`.
'''
def _process_scrape_info(self, scraper: BaseScraper,
scrape_result: ScrapeResult,
item_session: ItemSession):
'''Collect the URLs from the scrape info dict.'''
if not scrape_result:
return 0, 0
num_inline = 0
num_linked = 0
for link_context in scrape_result.link_contexts:
url_info = self.parse_url(link_context.link)
if not url_info:
continue
url_info = self.rewrite_url(url_info)
child_url_record = item_session.child_url_record(
url_info.url, inline=link_context.inline
)
if not self._fetch_rule.consult_filters(item_session.request.url_info, child_url_record)[0]:
continue
if link_context.inline:
num_inline += 1
else:
num_linked += 1
item_session.add_child_url(url_info.url, inline=link_context.inline,
link_type=link_context.link_type)
return num_inline, num_linked
def rewrite_url(self, url_info: URLInfo) -> URLInfo:
'''Return a rewritten URL such as escaped fragment.'''
if self._url_rewriter:
return self._url_rewriter.rewrite(url_info)
else:
return url_info
|
class ProcessingRule(HookableMixin):
'''Document processing rules.
Args:
fetch_rule: The FetchRule instance.
document_scraper: The document
scraper.
'''
def __init__(self, fetch_rule: FetchRule,
document_scraper: DemuxDocumentScraper = None,
sitemaps: bool = False,
url_rewriter: URLRewriter = None):
pass
def add_extra_urls(self, item_session: ItemSession):
'''Add additional URLs such as robots.txt, favicon.ico.'''
pass
def scrape_document(self, item_session: ItemSession):
'''Process document for links.'''
pass
@staticmethod
@event_interface(PluginFunctions.get_urls)
def plugin_get_urls(item_session: ItemSession):
'''Add additional URLs to be added to the URL Table.
When this event is dispatched, the caller should add any URLs needed
using :meth:`.ItemSession.add_child_url`.
'''
pass
def _process_scrape_info(self, scraper: BaseScraper,
scrape_result: ScrapeResult,
item_session: ItemSession):
'''Collect the URLs from the scrape info dict.'''
pass
def rewrite_url(self, url_info: URLInfo) -> URLInfo:
'''Return a rewritten URL such as escaped fragment.'''
pass
| 9 | 6 | 17 | 3 | 13 | 1 | 3 | 0.17 | 1 | 10 | 8 | 0 | 5 | 4 | 6 | 8 | 120 | 25 | 81 | 30 | 67 | 14 | 49 | 24 | 42 | 6 | 2 | 2 | 16 |
6,576 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/protocol/abstract/client.py
|
wpull.protocol.abstract.client.BaseClient.ClientEvent
|
class ClientEvent(enum.Enum):
new_session = 'new_session'
|
class ClientEvent(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 4 | 0 | 0 |
6,577 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/protocol/abstract/client.py
|
wpull.protocol.abstract.client.BaseSession.SessionEvent
|
class SessionEvent(enum.Enum):
begin_session = 'begin_session'
end_session = 'end_session'
|
class SessionEvent(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
6,578 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/protocol/ftp/client.py
|
wpull.protocol.ftp.client.Session.Event
|
class Event(enum.Enum):
begin_control = 'begin_control'
control_send_data = 'control_send_data'
control_receive_data = 'control_receive_data'
end_control = 'end_control'
begin_transfer = 'begin_transfer'
transfer_send_data = 'transfer_send_data'
transfer_receive_data = 'transfer_receive_data'
end_transfer = 'end_transfer'
|
class Event(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 9 | 0 | 9 | 9 | 8 | 0 | 9 | 9 | 8 | 0 | 4 | 0 | 0 |
6,579 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/protocol/http/client.py
|
wpull.protocol.http.client.Session.Event
|
class Event(enum.Enum):
begin_request = 'begin_request'
request_data = 'request_data'
end_request = 'end_request'
begin_response = 'begin_response'
response_data = 'response_data'
end_response = 'end_response'
|
class Event(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 7 | 0 | 7 | 7 | 6 | 0 | 7 | 7 | 6 | 0 | 4 | 0 | 0 |
6,580 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/proxy/server.py
|
wpull.proxy.server.HTTPProxyServer.Event
|
class Event(enum.Enum):
begin_session = 'begin_session'
end_session = 'end_session'
|
class Event(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
6,581 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/proxy/server.py
|
wpull.proxy.server.HTTPProxySession.Event
|
class Event(enum.Enum):
client_request = 'client_request'
server_begin_response = 'server_begin_response'
server_end_response = 'server_end_response'
server_response_error = 'server_response_error'
|
class Event(enum.Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 5 | 0 | 5 | 5 | 4 | 0 | 5 | 5 | 4 | 0 | 4 | 0 | 0 |
6,582 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/warc/recorder_test.py
|
wpull.warc.recorder_test.TestWARC.test_warc_recorder_rollback.BadRecord
|
class BadRecord(WARCRecord):
def __init__(self, original_record):
super().__init__()
self.block_file = original_record.block_file
self.fields = original_record.fields
def __iter__(self):
for dummy in range(1000):
yield b"where's my elephant?"
raise OSError('Oops')
|
class BadRecord(WARCRecord):
def __init__(self, original_record):
pass
def __iter__(self):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 3 | 0 | 0 | 2 | 2 | 2 | 9 | 10 | 1 | 9 | 6 | 6 | 0 | 9 | 6 | 6 | 2 | 2 | 1 | 3 |
6,583 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/warc/recorder_test.py
|
wpull.warc.recorder_test.TestWARC.test_warc_recorder_journal.MockRecord
|
class MockRecord(WARCRecord):
def __init__(self, original_record):
super().__init__()
self.block_file = original_record.block_file
self.fields = original_record.fields
def __iter__(self):
print(list(os.walk('.')))
test_instance.assertTrue(
os.path.exists(warc_filename + '-wpullinc')
)
for dummy in range(1000):
yield b"where's my elephant?"
|
class MockRecord(WARCRecord):
def __init__(self, original_record):
pass
def __iter__(self):
pass
| 3 | 0 | 6 | 1 | 6 | 0 | 2 | 0 | 1 | 4 | 1 | 0 | 2 | 2 | 2 | 9 | 14 | 2 | 12 | 6 | 9 | 0 | 10 | 6 | 7 | 2 | 2 | 1 | 3 |
6,584 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/testing/integration/sample_user_scripts/stopper.plugin.py
|
stopper.plugin.MyPlugin
|
class MyPlugin(WpullPlugin):
@hook(PluginFunctions.handle_response)
def stop(self):
print('stop')
return Actions.STOP
|
class MyPlugin(WpullPlugin):
@hook(PluginFunctions.handle_response)
def stop(self):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 4 | 5 | 0 | 5 | 3 | 2 | 0 | 4 | 2 | 2 | 1 | 2 | 0 | 1 |
6,585 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.TimestampingFileWriterSession
|
class TimestampingFileWriterSession(BaseFileWriterSession):
def process_request(self, request: BaseRequest):
request = super().process_request(request)
orig_file = '{0}.orig'.format(self._filename)
if os.path.exists(orig_file):
modified_time = os.path.getmtime(orig_file)
elif os.path.exists(self._filename):
modified_time = os.path.getmtime(self._filename)
else:
modified_time = None
_logger.debug('Checking for last modified={0}.', modified_time)
if modified_time:
date_str = email.utils.formatdate(modified_time)
request.fields['If-Modified-Since'] = date_str
return request
|
class TimestampingFileWriterSession(BaseFileWriterSession):
def process_request(self, request: BaseRequest):
pass
| 2 | 0 | 20 | 6 | 14 | 0 | 4 | 0 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 43 | 21 | 6 | 15 | 5 | 13 | 0 | 13 | 5 | 11 | 4 | 5 | 1 | 4 |
6,586 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/writer.py
|
wpull.writer.TimestampingFileWriter
|
class TimestampingFileWriter(BaseFileWriter):
'''File writer that only downloads newer files from the server.'''
@property
def session_class(self) -> BaseFileWriterSession:
return TimestampingFileWriterSession
|
class TimestampingFileWriter(BaseFileWriter):
'''File writer that only downloads newer files from the server.'''
@property
def session_class(self) -> BaseFileWriterSession:
pass
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 1 | 2 | 2 | 0 | 1 | 0 | 1 | 25 | 5 | 0 | 4 | 3 | 1 | 1 | 3 | 2 | 1 | 1 | 5 | 0 | 1 |
6,587 |
ArchiveTeam/wpull
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/ArchiveTeam_wpull/wpull/processor/rule.py
|
wpull.processor.rule.ResultRule
|
class ResultRule(HookableMixin):
'''Decide on the results of a fetch.
Args:
ssl_verification: If True, don't ignore certificate errors.
retry_connrefused: If True, don't consider a connection refused
error to be a permanent error.
retry_dns_error: If True, don't consider a DNS resolution error
to be permanent error.
waiter: The Waiter.
statistics: The Statistics.
'''
def __init__(self, ssl_verification: bool = False,
retry_connrefused: bool = False,
retry_dns_error: bool = False,
waiter: Optional[Waiter] = None,
statistics: Optional[Statistics] = None):
super().__init__()
self._ssl_verification = ssl_verification
self.retry_connrefused = retry_connrefused
self.retry_dns_error = retry_dns_error
self._waiter = waiter
self._statistics = statistics
self.hook_dispatcher.register(PluginFunctions.wait_time)
self.hook_dispatcher.register(PluginFunctions.handle_response)
self.hook_dispatcher.register(PluginFunctions.handle_pre_response)
self.hook_dispatcher.register(PluginFunctions.handle_error)
def handle_pre_response(self, item_session: ItemSession) -> Actions:
'''Process a response that is starting.'''
action = self.consult_pre_response_hook(item_session)
if action == Actions.RETRY:
item_session.set_status(Status.skipped)
elif action == Actions.FINISH:
item_session.set_status(Status.done)
elif action == Actions.STOP:
raise HookStop('Script requested immediate stop.')
return action
def handle_document(self, item_session: ItemSession, filename: str) -> Actions:
'''Process a successful document response.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
if action == Actions.NORMAL:
self._statistics.increment(item_session.response.body.size())
item_session.set_status(Status.done, filename=filename)
return action
def handle_no_document(self, item_session: ItemSession) -> Actions:
'''Callback for successful responses containing no useful document.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
if action == Actions.NORMAL:
item_session.set_status(Status.skipped)
return action
def handle_intermediate_response(self, item_session: ItemSession) -> Actions:
'''Callback for successful intermediate responses.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
return action
def handle_document_error(self, item_session: ItemSession) -> Actions:
'''Callback for when the document only describes an server error.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.increment()
self._statistics.errors[ServerError] += 1
action = self.handle_response(item_session)
if action == Actions.NORMAL:
item_session.set_status(Status.error)
return action
def handle_response(self, item_session: ItemSession) -> Actions:
'''Generic handler for a response.
Returns:
A value from :class:`.hook.Actions`.
'''
action = self.consult_response_hook(item_session)
if action == Actions.RETRY:
item_session.set_status(Status.error)
elif action == Actions.FINISH:
item_session.set_status(Status.done)
elif action == Actions.STOP:
raise HookStop('Script requested immediate stop.')
return action
def handle_error(self, item_session: ItemSession, error: BaseException) -> Actions:
'''Process an error.
Returns:
A value from :class:`.hook.Actions`.
'''
if not self._ssl_verification and \
isinstance(error, SSLVerificationError):
# Change it into a different error since the user doesn't care
# about verifying certificates
self._statistics.increment_error(ProtocolError())
else:
self._statistics.increment_error(error)
self._waiter.increment()
action = self.consult_error_hook(item_session, error)
if action == Actions.RETRY:
item_session.set_status(Status.error)
elif action == Actions.FINISH:
item_session.set_status(Status.done)
elif action == Actions.STOP:
raise HookStop('Script requested immediate stop.')
elif self._ssl_verification and isinstance(error, SSLVerificationError):
raise
elif isinstance(error, ConnectionRefused) and \
not self.retry_connrefused:
item_session.set_status(Status.skipped)
elif isinstance(error, DNSNotFound) and \
not self.retry_dns_error:
item_session.set_status(Status.skipped)
else:
item_session.set_status(Status.error)
return action
def get_wait_time(self, item_session: ItemSession, error=None):
'''Return the wait time in seconds between requests.'''
seconds = self._waiter.get()
try:
return self.hook_dispatcher.call(PluginFunctions.wait_time, seconds,
item_session, error)
except HookDisconnected:
return seconds
@staticmethod
@hook_interface(PluginFunctions.wait_time)
def plugin_wait_time(seconds: float, item_session: ItemSession, error: Optional[Exception] = None) -> float:
'''Return the wait time between requests.
Args:
seconds: The original time in seconds.
item_session:
error:
Returns:
The time in seconds.
'''
return seconds
def consult_pre_response_hook(self, item_session: ItemSession) -> Actions:
'''Return scripting action when a response begins.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_pre_response,
item_session
)
except HookDisconnected:
return Actions.NORMAL
@staticmethod
@hook_interface(PluginFunctions.handle_pre_response)
def plugin_handle_pre_response(item_session: ItemSession) -> Actions:
'''Return an action to handle a response status before a download.
Args:
item_session:
Returns:
A value from :class:`Actions`. The default is
:attr:`Actions.NORMAL`.
'''
return Actions.NORMAL
def consult_response_hook(self, item_session: ItemSession) -> Actions:
'''Return scripting action when a response ends.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_response, item_session
)
except HookDisconnected:
return Actions.NORMAL
@staticmethod
@hook_interface(PluginFunctions.handle_response)
def plugin_handle_response(item_session: ItemSession) -> Actions:
'''Return an action to handle the response.
Args:
item_session:
Returns:
A value from :class:`Actions`. The default is
:attr:`Actions.NORMAL`.
'''
return Actions.NORMAL
def consult_error_hook(self, item_session: ItemSession, error: BaseException):
'''Return scripting action when an error occured.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_error, item_session, error)
except HookDisconnected:
return Actions.NORMAL
@staticmethod
@hook_interface(PluginFunctions.handle_error)
def plugin_handle_error(item_session: ItemSession, error: BaseException) -> Actions:
'''Return an action to handle the error.
Args:
item_session:
error:
Returns:
A value from :class:`Actions`. The default is
:attr:`Actions.NORMAL`.
'''
return Actions.NORMAL
|
class ResultRule(HookableMixin):
'''Decide on the results of a fetch.
Args:
ssl_verification: If True, don't ignore certificate errors.
retry_connrefused: If True, don't consider a connection refused
error to be a permanent error.
retry_dns_error: If True, don't consider a DNS resolution error
to be permanent error.
waiter: The Waiter.
statistics: The Statistics.
'''
def __init__(self, ssl_verification: bool = False,
retry_connrefused: bool = False,
retry_dns_error: bool = False,
waiter: Optional[Waiter] = None,
statistics: Optional[Statistics] = None):
pass
def handle_pre_response(self, item_session: ItemSession) -> Actions:
'''Process a response that is starting.'''
pass
def handle_document(self, item_session: ItemSession, filename: str) -> Actions:
'''Process a successful document response.
Returns:
A value from :class:`.hook.Actions`.
'''
pass
def handle_no_document(self, item_session: ItemSession) -> Actions:
'''Callback for successful responses containing no useful document.
Returns:
A value from :class:`.hook.Actions`.
'''
pass
def handle_intermediate_response(self, item_session: ItemSession) -> Actions:
'''Callback for successful intermediate responses.
Returns:
A value from :class:`.hook.Actions`.
'''
pass
def handle_document_error(self, item_session: ItemSession) -> Actions:
'''Callback for when the document only describes an server error.
Returns:
A value from :class:`.hook.Actions`.
'''
pass
def handle_response(self, item_session: ItemSession) -> Actions:
'''Generic handler for a response.
Returns:
A value from :class:`.hook.Actions`.
'''
pass
def handle_error(self, item_session: ItemSession, error: BaseException) -> Actions:
'''Process an error.
Returns:
A value from :class:`.hook.Actions`.
'''
pass
def get_wait_time(self, item_session: ItemSession, error=None):
'''Return the wait time in seconds between requests.'''
pass
@staticmethod
@hook_interface(PluginFunctions.wait_time)
def plugin_wait_time(seconds: float, item_session: ItemSession, error: Optional[Exception] = None) -> float:
'''Return the wait time between requests.
Args:
seconds: The original time in seconds.
item_session:
error:
Returns:
The time in seconds.
'''
pass
def consult_pre_response_hook(self, item_session: ItemSession) -> Actions:
'''Return scripting action when a response begins.'''
pass
@staticmethod
@hook_interface(PluginFunctions.handle_pre_response)
def plugin_handle_pre_response(item_session: ItemSession) -> Actions:
'''Return an action to handle a response status before a download.
Args:
item_session:
Returns:
A value from :class:`Actions`. The default is
:attr:`Actions.NORMAL`.
'''
pass
def consult_response_hook(self, item_session: ItemSession) -> Actions:
'''Return scripting action when a response ends.'''
pass
@staticmethod
@hook_interface(PluginFunctions.handle_response)
def plugin_handle_response(item_session: ItemSession) -> Actions:
'''Return an action to handle the response.
Args:
item_session:
Returns:
A value from :class:`Actions`. The default is
:attr:`Actions.NORMAL`.
'''
pass
def consult_error_hook(self, item_session: ItemSession, error: BaseException):
'''Return scripting action when an error occured.'''
pass
@staticmethod
@hook_interface(PluginFunctions.handle_error)
def plugin_handle_error(item_session: ItemSession, error: BaseException) -> Actions:
'''Return an action to handle the error.
Args:
item_session:
error:
Returns:
A value from :class:`Actions`. The default is
:attr:`Actions.NORMAL`.
'''
pass
| 25 | 16 | 13 | 2 | 7 | 4 | 2 | 0.56 | 1 | 19 | 13 | 0 | 12 | 5 | 16 | 18 | 249 | 51 | 127 | 38 | 98 | 71 | 94 | 30 | 77 | 8 | 2 | 1 | 36 |
6,588 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/urlfilter.py
|
wpull.urlfilter.TriesFilter
|
class TriesFilter(BaseURLFilter):
'''Allow URLs that have been attempted up to a limit of tries.'''
def __init__(self, max_tries):
self._tries = max_tries
def test(self, url_info, url_table_record):
if self._tries:
return url_table_record.try_count < self._tries
else:
return True
|
class TriesFilter(BaseURLFilter):
'''Allow URLs that have been attempted up to a limit of tries.'''
def __init__(self, max_tries):
pass
def test(self, url_info, url_table_record):
pass
| 3 | 1 | 4 | 0 | 4 | 0 | 2 | 0.13 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 23 | 10 | 1 | 8 | 4 | 5 | 1 | 7 | 4 | 4 | 2 | 4 | 1 | 3 |
6,589 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/protocol/ftp/ls/listing_test.py
|
wpull.protocol.ftp.ls.listing_test.TestListing
|
class TestListing(unittest.TestCase):
def test_guess_listing_type(self):
self.assertEqual('unix', guess_listing_type(UNIX_LS.splitlines()))
self.assertEqual('msdos', guess_listing_type(MSDOS_LS.splitlines()))
self.assertEqual('msdos', guess_listing_type(MSDOS_NO_DIR_LS.splitlines()))
self.assertEqual('nlst', guess_listing_type(NLST_LS.splitlines()))
self.assertEqual('unknown', guess_listing_type(MVS_LS.splitlines()))
def test_parse_unix_perm(self):
self.assertEqual(0, parse_unix_perm('a'))
self.assertEqual(0, parse_unix_perm('1234567890'))
self.assertEqual(0, parse_unix_perm('---------'))
self.assertEqual(0o400, parse_unix_perm('r--------'))
self.assertEqual(0o040, parse_unix_perm('---r-----'))
self.assertEqual(0o004, parse_unix_perm('------r--'))
self.assertEqual(0o444, parse_unix_perm('r--r--r--'))
self.assertEqual(0o222, parse_unix_perm('-w--w--w-'))
self.assertEqual(0o111, parse_unix_perm('--x--x--x'))
self.assertEqual(0o111, parse_unix_perm('--s--s--s'))
self.assertEqual(0o545, parse_unix_perm('r-xr--r-x'))
self.assertEqual(0o632, parse_unix_perm('rw--wx-w-'))
self.assertEqual(0o535, parse_unix_perm('r-x-wxr-x'))
self.assertEqual(0o777, parse_unix_perm('rwxrwxrwx'))
self.assertEqual(0o777, parse_unix_perm('rwsrwsrws'))
def test_parse_unix(self):
parser = ListingParser(UNIX_LS)
results = list(parser.parse_input())
date_factory = functools.partial(datetime.datetime,
tzinfo=datetime.timezone.utc)
datetime_now = datetime.datetime.utcnow()
datetime_now = datetime_now.replace(tzinfo=datetime.timezone.utc)
current_year = datetime_now.year
datetime_1 = date_factory(current_year, 1, 29, 3, 26)
datetime_2 = date_factory(current_year, 1, 25, 0, 17)
if datetime_1 > datetime_now:
datetime_1 = datetime_1.replace(year=current_year - 1)
if datetime_2 > datetime_now:
datetime_2 = datetime_2.replace(year=current_year - 1)
self.assertEqual(
[
FileEntry('README', 'file', 531,
datetime_1,
perm=0o644),
FileEntry('etc', 'dir', 512,
date_factory(1994, 4, 8),
perm=0o555),
FileEntry('etc', 'dir', 512,
date_factory(1994, 4, 8),
perm=0o555),
FileEntry('bin', 'symlink', 7,
datetime_2,
'usr/bin', perm=0o777),
FileEntry('blah', 'dir', 512,
date_factory(2004, 4, 8),
perm=0o555),
],
results
)
def test_parse_msdos(self):
parser = ListingParser(MSDOS_LS)
results = list(parser.parse_input())
date_factory = functools.partial(datetime.datetime,
tzinfo=datetime.timezone.utc)
self.assertEqual(
[
FileEntry('licensed', 'dir', None,
date_factory(2000, 4, 27, 21, 9)),
FileEntry('pub', 'dir', None,
date_factory(2000, 7, 18, 10, 16)),
FileEntry('readme.htm', 'file', 589,
date_factory(2000, 4, 14, 15, 47)),
],
results
)
def test_parse_msdos_no_dir(self):
parser = ListingParser(MSDOS_NO_DIR_LS)
results = list(parser.parse_input())
date_factory = functools.partial(datetime.datetime,
tzinfo=datetime.timezone.utc)
self.assertEqual(
[
FileEntry('licensed.exe', 'file', 123,
date_factory(2000, 4, 27, 21, 9)),
FileEntry('pub.pdf', 'file', 456,
date_factory(2000, 7, 18, 10, 16)),
FileEntry('readme.htm', 'file', 589,
date_factory(2000, 4, 14, 15, 47)),
],
results
)
def test_parse_nlst(self):
parser = ListingParser(NLST_LS)
results = list(parser.parse_input())
self.assertEqual(
[
FileEntry('horse.txt'),
FileEntry('fish'),
FileEntry('dolphin.jpg'),
FileEntry('delicious cake.wri'),
FileEntry('egg'),
],
results
)
def test_parse_junk(self):
parser = ListingParser(' aj \x00 a304 jrf')
self.assertRaises(UnknownListingError, parser.parse_input)
def test_parse_unix_datelike_file(self):
parser = ListingParser(UNIX_LS_DATELIKE_FILE)
results = list(parser.parse_input())
date_factory = functools.partial(datetime.datetime,
tzinfo=datetime.timezone.utc)
self.assertEqual(
[
FileEntry('2009-12', 'file', 1558532,
date_factory(2009, 12, 30),
perm=0o644),
FileEntry('2010-01', 'file', 10564020,
date_factory(2010, 1, 14),
perm=0o644),
],
results
)
def test_parse_unix_datelike_file_2(self):
parser = ListingParser(UNIX_LS_DATELIKE_FILE_2)
results = list(parser.parse_input())
date_factory = functools.partial(datetime.datetime,
tzinfo=datetime.timezone.utc)
self.assertEqual(
[
FileEntry('english_german.2010-03-24.tar.gz', 'file', 242408,
date_factory(2010, 3, 24),
perm=0o644),
FileEntry('old', 'dir', 4096,
date_factory(2010, 3, 24),
perm=0o755),
],
results
)
|
class TestListing(unittest.TestCase):
def test_guess_listing_type(self):
pass
def test_parse_unix_perm(self):
pass
def test_parse_unix_perm(self):
pass
def test_parse_msdos(self):
pass
def test_parse_msdos_no_dir(self):
pass
def test_parse_nlst(self):
pass
def test_parse_junk(self):
pass
def test_parse_unix_datelike_file(self):
pass
def test_parse_unix_datelike_file_2(self):
pass
| 10 | 0 | 16 | 1 | 15 | 0 | 1 | 0 | 1 | 6 | 2 | 0 | 9 | 0 | 9 | 81 | 156 | 19 | 137 | 32 | 127 | 0 | 64 | 32 | 54 | 3 | 2 | 1 | 11 |
6,590 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/urlfilter.py
|
wpull.urlfilter.SchemeFilter
|
class SchemeFilter(BaseURLFilter):
'''Allow URL if the URL is in list.'''
def __init__(self, allowed=('http', 'https', 'ftp')):
self._allowed = allowed
def test(self, url_info, url_table_record):
return url_info.scheme in self._allowed
|
class SchemeFilter(BaseURLFilter):
'''Allow URL if the URL is in list.'''
def __init__(self, allowed=('http', 'https', 'ftp')):
pass
def test(self, url_info, url_table_record):
pass
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 0.2 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 23 | 7 | 1 | 5 | 4 | 2 | 1 | 5 | 4 | 2 | 1 | 4 | 0 | 2 |
6,591 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/database/sqltable_test.py
|
wpull.database.sqltable_test.TestDatabase
|
class TestDatabase(unittest.TestCase):
def get_url_table(self):
return SQLiteURLTable(':memory:')
def test_url_add_and_update(self):
url_table = self.get_url_table()
urls = [
'http://example.com',
'http://example.com/kitteh',
'http://example.com/doge',
]
url_properties = URLProperties()
url_properties.parent_url = 'http://example.com'
url_properties.level = 0
url_properties.root_url = 'http://example.net'
url_table.add_many(
[AddURLInfo(url, url_properties, None) for url in urls],
)
self.assertTrue(url_table.contains(urls[0]))
self.assertTrue(url_table.contains(urls[1]))
self.assertTrue(url_table.contains(urls[2]))
self.assertEqual(3, url_table.count())
self.assertEqual(3, url_table.get_root_url_todo_count())
for i in range(3):
url_record = url_table.get_one(urls[i])
self.assertEqual(urls[i], url_record.url)
self.assertEqual(Status.todo, url_record.status)
self.assertEqual(0, url_record.try_count)
self.assertEqual('http://example.com', url_record.parent_url)
self.assertEqual('http://example.net', url_record.root_url)
url_record = url_table.check_out(
Status.todo,
)
self.assertEqual(Status.in_progress, url_record.status)
url_result = URLResult()
url_result.status_code = 200
url_table.check_in(url_record.url, Status.done,
increment_try_count=True, url_result=url_result)
url_record = url_table.get_one(url_record.url)
self.assertEqual(200, url_record.status_code)
self.assertEqual(Status.done, url_record.status)
self.assertEqual(1, url_record.try_count)
self.assertEqual(2, url_table.get_root_url_todo_count())
hostnames = tuple(url_table.get_hostnames())
self.assertEqual(1, len(hostnames))
self.assertEqual('example.com', hostnames[0])
def test_warc_visits(self):
url_table = self.get_url_table()
self.assertFalse(
url_table.get_revisit_id('http://example.com/1', 'digest123')
)
url_table.add_visits([
('http://example.com/1', 'id123', 'digest123'),
('http://example.com/2', 'id456', 'digest456'),
])
self.assertEqual(
'id123',
url_table.get_revisit_id('http://example.com/1', 'digest123')
)
self.assertEqual(
'id456',
url_table.get_revisit_id('http://example.com/2', 'digest456')
)
self.assertFalse(
url_table.get_revisit_id('http://example.com/1', 'digestbad')
)
self.assertFalse(
url_table.get_revisit_id('http://example.com/2', 'digestbad')
)
self.assertFalse(
url_table.get_revisit_id('http://example.com/asdf', 'digest123')
)
|
class TestDatabase(unittest.TestCase):
def get_url_table(self):
pass
def test_url_add_and_update(self):
pass
def test_warc_visits(self):
pass
| 4 | 0 | 29 | 5 | 23 | 0 | 1 | 0 | 1 | 6 | 4 | 0 | 3 | 0 | 3 | 75 | 89 | 18 | 71 | 12 | 67 | 0 | 45 | 12 | 41 | 2 | 2 | 1 | 4 |
6,592 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/debug.py
|
wpull.debug.DebugConsoleHandler
|
class DebugConsoleHandler(tornado.web.RequestHandler):
TEMPLATE = '''<html>
<style>
#commandbox {{
width: 100%;
}}
</style>
<body>
<p>Welcome to DEBUG CONSOLE!</p>
<p><tt>Builder()</tt> instance at <tt>wpull_builder</tt>.</p>
<form method="post">
<input id="commandbox" name="command" value="{command}">
<input type="submit" value="Execute">
</form>
<pre>{output}</pre>
</body>
</html>
'''
def get(self):
self.write(
self.TEMPLATE.format(output='(ready)', command='')
.encode('utf-8'))
def post(self):
command = self.get_argument('command', strip=False)
sys.stdout = io.StringIO()
try:
exec(
command,
{'wpull_builder': self.application.settings['builder']}
)
result = sys.stdout.getvalue()
except Exception:
result = traceback.format_exc()
finally:
sys.stdout = sys.__stdout__
self.write(
self.TEMPLATE.format(output=result, command=html.escape(command))
.encode('utf-8'))
|
class DebugConsoleHandler(tornado.web.RequestHandler):
def get(self):
pass
def post(self):
pass
| 3 | 0 | 11 | 1 | 10 | 0 | 2 | 0.03 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 83 | 42 | 4 | 37 | 6 | 34 | 1 | 14 | 6 | 11 | 2 | 2 | 1 | 3 |
6,593 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/decompression.py
|
wpull.decompression.DeflateDecompressor
|
class DeflateDecompressor(SimpleGzipDecompressor):
'''zlib decompressor with raw deflate detection.
This class doesn't do any special. It only tries regular zlib and then
tries raw deflate on the first decompress.
'''
def __init__(self):
super().__init__()
self.decompressobj = None
def decompress(self, value):
if not self.decompressobj:
try:
self.decompressobj = zlib.decompressobj()
return self.decompressobj.decompress(value)
except zlib.error:
self.decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
return self.decompressobj.decompress(value)
return self.decompressobj.decompress(value)
def flush(self):
if self.decompressobj:
return super().flush()
else:
return b''
|
class DeflateDecompressor(SimpleGzipDecompressor):
'''zlib decompressor with raw deflate detection.
This class doesn't do any special. It only tries regular zlib and then
tries raw deflate on the first decompress.
'''
def __init__(self):
pass
def decompress(self, value):
pass
def flush(self):
pass
| 4 | 1 | 6 | 0 | 6 | 0 | 2 | 0.22 | 1 | 2 | 0 | 0 | 3 | 1 | 3 | 6 | 26 | 4 | 18 | 5 | 14 | 4 | 17 | 5 | 13 | 3 | 2 | 2 | 6 |
6,594 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/decompression.py
|
wpull.decompression.GzipDecompressor
|
class GzipDecompressor(SimpleGzipDecompressor):
'''gzip decompressor with gzip header detection.
This class checks if the stream starts with the 2 byte gzip magic number.
If it is not present, it returns the bytes unchanged.
'''
def __init__(self):
super().__init__()
self.checked = False
self.is_ok = None
def decompress(self, value):
if self.checked:
if self.is_ok:
return super().decompress(value)
else:
return value
else:
# XXX: gzip magic value is \x1f\x8b but data may come in as
# a single byte. The likelyhood of plaintext starting with \x1f is
# very low, right?
self.checked = True
if value[:1] == b'\x1f':
self.is_ok = True
return super().decompress(value)
else:
self.is_ok = False
return value
def flush(self):
if self.is_ok:
return super().flush()
else:
return b''
|
class GzipDecompressor(SimpleGzipDecompressor):
'''gzip decompressor with gzip header detection.
This class checks if the stream starts with the 2 byte gzip magic number.
If it is not present, it returns the bytes unchanged.
'''
def __init__(self):
pass
def decompress(self, value):
pass
def flush(self):
pass
| 4 | 1 | 9 | 0 | 8 | 1 | 2 | 0.29 | 1 | 1 | 0 | 0 | 3 | 2 | 3 | 6 | 34 | 3 | 24 | 6 | 20 | 7 | 20 | 6 | 16 | 4 | 2 | 2 | 7 |
6,595 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/decompression.py
|
wpull.decompression.SimpleGzipDecompressor
|
class SimpleGzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
# This class taken from tornado.util.GzipDecompressor
# Copyright Facebook. License Apache License Version 2.0.
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value):
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
"""
return self.decompressobj.decompress(value)
def flush(self):
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
|
class SimpleGzipDecompressor(object):
'''Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
'''
def __init__(self):
pass
def decompress(self, value):
'''Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
'''
pass
def flush(self):
'''Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
'''
pass
| 4 | 3 | 7 | 1 | 2 | 4 | 1 | 2.57 | 1 | 0 | 0 | 2 | 3 | 1 | 3 | 3 | 30 | 5 | 7 | 5 | 3 | 18 | 7 | 5 | 3 | 1 | 1 | 0 | 3 |
6,596 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/decompression_test.py
|
wpull.decompression_test.TestDecompression
|
class TestDecompression(unittest.TestCase):
def test_deflate_decompressor(self):
input_list = []
hash_obj = hashlib.sha1(b'moose')
for dummy in range(100):
data = hash_obj.digest()
input_list.append(data)
hash_obj.update(data)
input_data = b''.join(input_list)
zlib_data = zlib.compress(input_data)
deflate_data = zlib_data[2:-4]
decompressor = DeflateDecompressor()
test_data = decompressor.decompress(zlib_data[:50]) \
+ decompressor.decompress(zlib_data[50:]) \
+ decompressor.flush()
self.assertEqual(input_data, test_data)
decompressor = DeflateDecompressor()
test_data = decompressor.decompress(deflate_data[:50]) \
+ decompressor.decompress(deflate_data[50:]) \
+ decompressor.flush()
self.assertEqual(input_data, test_data)
def test_deflate_decompressor_flush(self):
decompressor = DeflateDecompressor()
data = decompressor.flush()
self.assertEqual(b'', data)
def test_gzip_decompressor(self):
file_buffer = io.BytesIO()
gzip_file = gzip.GzipFile(mode='wb', fileobj=file_buffer)
gzip_file.write(b'HELLO KITTEN')
gzip_file.close()
decompressor = GzipDecompressor()
data = decompressor.decompress(file_buffer.getvalue()[:5])
data += decompressor.decompress(file_buffer.getvalue()[5:])
data += decompressor.flush()
self.assertEqual(b'HELLO KITTEN', data)
def test_gzip_decompressor_flush(self):
decompressor = GzipDecompressor()
data = decompressor.flush()
self.assertEqual(b'', data)
def test_gzip_decompressor_not_gzip(self):
decompressor = GzipDecompressor()
data = decompressor.decompress(b'LAMMA ')
data += decompressor.decompress(b'JUMP')
data += decompressor.flush()
self.assertEqual(b'LAMMA JUMP', data)
def test_gzip_uncompress(self):
self.assertEqual(
b'DRAGON',
gzip_uncompress(gzip.compress(b'DRAGON'))
)
# Check for no crash:
gzip_uncompress(gzip.compress(b'DRAGON')[:1], truncated=True)
|
class TestDecompression(unittest.TestCase):
def test_deflate_decompressor(self):
pass
def test_deflate_decompressor_flush(self):
pass
def test_gzip_decompressor(self):
pass
def test_gzip_decompressor_flush(self):
pass
def test_gzip_decompressor_not_gzip(self):
pass
def test_gzip_uncompress(self):
pass
| 7 | 0 | 10 | 2 | 8 | 0 | 1 | 0.02 | 1 | 4 | 2 | 0 | 6 | 0 | 6 | 78 | 67 | 15 | 51 | 26 | 44 | 1 | 44 | 26 | 37 | 2 | 2 | 1 | 7 |
6,597 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/database/sqltable.py
|
wpull.database.sqltable.SQLiteURLTable
|
class SQLiteURLTable(BaseSQLURLTable):
'''URL table with SQLite storage.
Args:
path: A SQLite filename
'''
def __init__(self, path=':memory:'):
super().__init__()
# We use a SingletonThreadPool always because we are using WAL
# and want SQLite to handle the checkpoints. Otherwise NullPool
# will open and close the connection rapidly, defeating the purpose
# of WAL.
escaped_path = path.replace('?', '_')
self._engine = create_engine(
'sqlite:///{0}'.format(escaped_path), poolclass=SingletonThreadPool)
sqlalchemy.event.listen(
self._engine, 'connect', self._apply_pragmas_callback)
DBBase.metadata.create_all(self._engine)
self._session_maker_instance = sessionmaker(bind=self._engine)
@classmethod
def _apply_pragmas_callback(cls, connection, record):
'''Set SQLite pragmas.
Write-ahead logging, synchronous=NORMAL is used.
'''
_logger.debug('Setting pragmas.')
connection.execute('PRAGMA journal_mode=WAL')
connection.execute('PRAGMA synchronous=NORMAL')
@property
def _session_maker(self):
return self._session_maker_instance
def close(self):
self._engine.dispose()
|
class SQLiteURLTable(BaseSQLURLTable):
'''URL table with SQLite storage.
Args:
path: A SQLite filename
'''
def __init__(self, path=':memory:'):
pass
@classmethod
def _apply_pragmas_callback(cls, connection, record):
'''Set SQLite pragmas.
Write-ahead logging, synchronous=NORMAL is used.
'''
pass
@property
def _session_maker(self):
pass
def close(self):
pass
| 7 | 2 | 6 | 0 | 4 | 2 | 1 | 0.55 | 1 | 1 | 0 | 0 | 3 | 2 | 4 | 60 | 36 | 5 | 20 | 10 | 13 | 11 | 16 | 8 | 11 | 1 | 5 | 0 | 4 |
6,598 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/document/base.py
|
wpull.document.base.BaseExtractiveReader
|
class BaseExtractiveReader(object, metaclass=abc.ABCMeta):
'''Base class for document readers that can only extract links.'''
def iter_links(self, file, encoding=None):
'''Return links from file.
Returns:
iterator: Each item is a str which represents a link.
'''
|
class BaseExtractiveReader(object, metaclass=abc.ABCMeta):
'''Base class for document readers that can only extract links.'''
def iter_links(self, file, encoding=None):
'''Return links from file.
Returns:
iterator: Each item is a str which represents a link.
'''
pass
| 2 | 2 | 6 | 1 | 1 | 4 | 1 | 2.5 | 2 | 0 | 0 | 2 | 1 | 0 | 1 | 21 | 8 | 1 | 2 | 2 | 0 | 5 | 2 | 2 | 0 | 1 | 3 | 0 | 1 |
6,599 |
ArchiveTeam/wpull
|
ArchiveTeam_wpull/wpull/document/base_test.py
|
wpull.document.base_test.TestBase
|
class TestBase(unittest.TestCase):
def test_very_false(self):
self.assertFalse(VeryFalse)
self.assertTrue(VeryFalse is VeryFalse)
|
class TestBase(unittest.TestCase):
def test_very_false(self):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 73 | 4 | 0 | 4 | 2 | 2 | 0 | 4 | 2 | 2 | 1 | 2 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.