id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
148,048 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/example/scrapy_movie/items.py
|
crawlib.example.scrapy_movie.items.MovieListPage
|
class MovieListPage(ExtendedDocument):
_id = me.fields.IntField(primary_key=True)
status = me.fields.IntField()
edit_at = me.fields.DateTimeField()
meta = dict(
collection="site_movie_listpage",
db_alias=Config.MongoDB.database,
)
|
class MovieListPage(ExtendedDocument):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 1 | 8 | 5 | 7 | 0 | 5 | 5 | 4 | 0 | 1 | 0 | 0 |
148,049 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/entity/sql/entity.py
|
crawlib.entity.sql.entity.SqlEntitySingleStatus
|
class SqlEntitySingleStatus(SqlEntity):
CONF_STATUS_KEY = "status"
CONF_EDIT_AT_KEY = "edit_at"
status = sa.Column(sa.Integer, default=Status.S0_ToDo.id)
edit_at = sa.Column(sa.DateTime, default=epoch)
|
class SqlEntitySingleStatus(SqlEntity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 4 | 0 | 0 | 0 | 34 | 6 | 1 | 5 | 5 | 4 | 0 | 5 | 5 | 4 | 0 | 5 | 0 | 0 |
148,050 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/movie_url_builder.py
|
crawlib.tests.dummy_site_crawler.movie_url_builder.UrlBuilder
|
class UrlBuilder(BaseUrlBuilder):
domain = "http://127.0.0.1:{}".format(PORT)
def url_first_listpage(self):
return self.join_all("movie", "listpage", str(1))
def url_nth_listpage(self, nth):
return self.join_all("movie", "listpage", str(nth))
def url_movie_detail(self, movie_id):
return self.join_all("movie", str(movie_id))
|
class UrlBuilder(BaseUrlBuilder):
def url_first_listpage(self):
pass
def url_nth_listpage(self, nth):
pass
def url_movie_detail(self, movie_id):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 3 | 0 | 3 | 6 | 11 | 3 | 8 | 5 | 4 | 0 | 8 | 5 | 4 | 1 | 3 | 0 | 3 |
148,051 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/music_url_builder.py
|
crawlib.tests.dummy_site_crawler.music_url_builder.UrlBuilder
|
class UrlBuilder(BaseUrlBuilder):
domain = "http://127.0.0.1:{}".format(PORT)
def url_random_music(self):
return self.join_all("music", "random")
def url_artist(self, artist_id):
return self.join_all("music", "artist", str(artist_id))
def url_genre(self, genre_id):
return self.join_all("music", "genre", str(genre_id))
def url_music_detail(self, music_id):
return self.join_all("music", str(music_id))
|
class UrlBuilder(BaseUrlBuilder):
def url_random_music(self):
pass
def url_artist(self, artist_id):
pass
def url_genre(self, genre_id):
pass
def url_music_detail(self, music_id):
pass
| 5 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 4 | 0 | 4 | 7 | 14 | 4 | 10 | 6 | 5 | 0 | 10 | 6 | 5 | 1 | 3 | 0 | 4 |
148,052 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/config.py
|
crawlib.tests.dummy_site_crawler.sql_backend.config.Config
|
class Config(ConfigClass):
DB_LOCAL_HOST = Constant(default="localhost")
DB_LOCAL_PORT = Constant(default=43347)
DB_LOCAL_DATABASE = Constant(default="postgres")
DB_LOCAL_USERNAME = Constant(default="postgres")
DB_LOCAL_PASSWORD = Constant(default="password")
DB_HOST = Derivable()
@DB_HOST.getter
def get_DB_HOST(self):
if self.is_ci_runtime():
return self.DB_LOCAL_HOST.get_value()
else:
return self.DB_LOCAL_HOST.get_value()
DB_PORT = Derivable()
@DB_PORT.getter
def get_DB_PORT(self):
if self.is_ci_runtime():
return 5432
else:
return self.DB_LOCAL_PORT.get_value()
DB_DATABASE = Derivable()
@DB_DATABASE.getter
def get_DB_DATABASE(self):
if self.is_ci_runtime():
return self.DB_LOCAL_DATABASE.get_value()
else:
return self.DB_LOCAL_DATABASE.get_value()
DB_USERNAME = Derivable()
@DB_USERNAME.getter
def get_DB_USERNAME(self):
if self.is_ci_runtime():
return self.DB_LOCAL_USERNAME.get_value()
else:
return self.DB_LOCAL_USERNAME.get_value()
DB_PASSWORD = Derivable()
@DB_PASSWORD.getter
def get_DB_PASSWORD(self):
if self.is_ci_runtime():
return None
else:
return self.DB_LOCAL_PASSWORD.get_value()
|
class Config(ConfigClass):
@DB_HOST.getter
def get_DB_HOST(self):
pass
@DB_PORT.getter
def get_DB_PORT(self):
pass
@DB_DATABASE.getter
def get_DB_DATABASE(self):
pass
@DB_USERNAME.getter
def get_DB_USERNAME(self):
pass
@DB_PASSWORD.getter
def get_DB_PASSWORD(self):
pass
| 11 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 5 | 51 | 10 | 41 | 21 | 30 | 0 | 31 | 16 | 25 | 2 | 1 | 1 | 10 |
148,053 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s1_movie/entity_movie_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s1_movie.entity_movie_sql_backend.MoviePageBase
|
class MoviePageBase(SqlEntity):
id = sa.Column(sa.Integer, primary_key=True)
title = sa.Column(sa.String)
status_movie_info = sa.Column(sa.Integer, default=Status.S0_ToDo.id)
edit_at_movie_info = sa.Column(sa.DateTime, default=epoch)
image_content = sa.Column(sa.String)
status_cover_image = sa.Column(sa.Integer, default=Status.S0_ToDo.id)
edit_at_cover_image = sa.Column(sa.DateTime, default=epoch)
@property
def movie_id(self):
return self.id
def build_request(self, url, **kwargs):
request = url
return request
def send_request(self, request, **kwargs):
return requests.get(request)
|
class MoviePageBase(SqlEntity):
@property
def movie_id(self):
pass
def build_request(self, url, **kwargs):
pass
def send_request(self, request, **kwargs):
pass
| 5 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 2 | 3 | 0 | 3 | 37 | 20 | 4 | 16 | 12 | 11 | 0 | 15 | 11 | 11 | 1 | 5 | 0 | 3 |
148,054 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/entity/mongodb/entity.py
|
crawlib.entity.mongodb.entity.MongodbEntitySingleStatus
|
class MongodbEntitySingleStatus(MongodbEntity):
"""
**中文文档**
如果某个页面的 Entity 类不会被其他类继承, 通常即意味着对于该页面我们只有一种抓取模式.
也就是说只需要一套 ``status``, ``edit_at`` field.
什么叫做: 会被其他类继承, 有多种抓取模式?
例如, 我们要抓取一个图片网站上的图片. 网址的格式为 example.com/post/<post_id>
1. 我们第一次访问 post 页面是抓取页面上的封面大图地址 (假设一个页面只有一张).
2. 第二次访问 则是下载所有图片.
我们通常是将 1, 2 分为两次操作, 以免图片下载失败就导致 post 页面也被标记为失败,
导致要对页面重新访问. 造成重复操作.
.. code-block:: python
class Post(MongodbEntity):
_id = fields.StringField(primary_key)
status_detail = fields.IntField(default=0)
edit_at_detail = fields.DateTimeField(default=epoch)
cover_url = field.StringField()
CONF_STATUS_KEY = "status_detail"
CONF_EDIT_AT_KEY = "edit_at_detail"
def build_url(self):
return "www.example.com/post/{}".format(self._id)
class PostCoverImage(Post)
status_download = fields.IntField(default=0)
edit_at_download = fields.DateTimeField(default=epoch)
CONF_STATUS_KEY = "status_download"
CONF_EDIT_AT_KEY = "edit_at_download"
def build_url(self):
return self.cover_url
"""
meta = {
"abstract": True,
}
CONF_STATUS_KEY = "status"
CONF_EDIT_AT_KEY = "edit_at"
status = fields.IntField(default=Status.S0_ToDo.id)
edit_at = fields.DateTimeField(default=lambda: epoch)
|
class MongodbEntitySingleStatus(MongodbEntity):
'''
**中文文档**
如果某个页面的 Entity 类不会被其他类继承, 通常即意味着对于该页面我们只有一种抓取模式.
也就是说只需要一套 ``status``, ``edit_at`` field.
什么叫做: 会被其他类继承, 有多种抓取模式?
例如, 我们要抓取一个图片网站上的图片. 网址的格式为 example.com/post/<post_id>
1. 我们第一次访问 post 页面是抓取页面上的封面大图地址 (假设一个页面只有一张).
2. 第二次访问 则是下载所有图片.
我们通常是将 1, 2 分为两次操作, 以免图片下载失败就导致 post 页面也被标记为失败,
导致要对页面重新访问. 造成重复操作.
.. code-block:: python
class Post(MongodbEntity):
_id = fields.StringField(primary_key)
status_detail = fields.IntField(default=0)
edit_at_detail = fields.DateTimeField(default=epoch)
cover_url = field.StringField()
CONF_STATUS_KEY = "status_detail"
CONF_EDIT_AT_KEY = "edit_at_detail"
def build_url(self):
return "www.example.com/post/{}".format(self._id)
class PostCoverImage(Post)
status_download = fields.IntField(default=0)
edit_at_download = fields.DateTimeField(default=epoch)
CONF_STATUS_KEY = "status_download"
CONF_EDIT_AT_KEY = "edit_at_download"
def build_url(self):
return self.cover_url
'''
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 3.5 | 1 | 0 | 0 | 3 | 0 | 0 | 0 | 31 | 51 | 15 | 8 | 6 | 7 | 28 | 6 | 6 | 5 | 0 | 5 | 0 | 0 |
148,055 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/entity/base.py
|
crawlib.entity.base.RelationshipConfig
|
class RelationshipConfig(object):
"""
This class defines crawlable entity class's relationship.
**中文文档**
简单来说, 就是一个 Parent Entity 页面上, 可能会出现哪些 Child Entity 的数据.
"""
def __init__(self, relationship_collection: List[Relationship] = None):
"""
:type relationship_collection: List[Relationship]
:param relationship_collection:
"""
if relationship_collection is None:
relationship_collection = list()
self.relationship_collection = relationship_collection # type: List[Relationship]
self.mapping = dict() # type: Dict[Type[EntityExtendScheduler], Relationship]
for relationship in relationship_collection:
self.mapping[relationship.child_klass] = relationship
def get_relationship(self, klass: Type[Union[EntityBase, 'Entity']]) -> str:
"""
Get relationship of the parent Entity to the child Entity.
"""
return self.mapping[klass].relationship
def get_n_child_key(self, klass: Type[Union[EntityBase, 'Entity']]) -> str:
"""
Get the column / field name that identified how many child it has
in ORM entity class.
"""
return self.mapping[klass].n_child_key
def __iter__(self):
return iter(self.mapping)
def iter_recursive_child_class(self) -> Iterable[Union[EntityBase, 'Entity']]:
"""
A method that yield child entity class to crawl when current entity is done.
"""
for relationship in self.relationship_collection:
if relationship.recursive:
yield relationship.child_klass
|
class RelationshipConfig(object):
'''
This class defines crawlable entity class's relationship.
**中文文档**
简单来说, 就是一个 Parent Entity 页面上, 可能会出现哪些 Child Entity 的数据.
'''
def __init__(self, relationship_collection: List[Relationship] = None):
'''
:type relationship_collection: List[Relationship]
:param relationship_collection:
'''
pass
def get_relationship(self, klass: Type[Union[EntityBase, 'Entity']]) -> str:
'''
Get relationship of the parent Entity to the child Entity.
'''
pass
def get_n_child_key(self, klass: Type[Union[EntityBase, 'Entity']]) -> str:
'''
Get the column / field name that identified how many child it has
in ORM entity class.
'''
pass
def __iter__(self):
pass
def iter_recursive_child_class(self) -> Iterable[Union[EntityBase, 'Entity']]:
'''
A method that yield child entity class to crawl when current entity is done.
'''
pass
| 6 | 5 | 6 | 0 | 3 | 3 | 2 | 1.17 | 1 | 5 | 2 | 0 | 5 | 2 | 5 | 5 | 44 | 7 | 18 | 10 | 12 | 21 | 18 | 10 | 12 | 3 | 1 | 2 | 9 |
148,056 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/entity/base.py
|
crawlib.entity.base.Relationship
|
class Relationship(object):
"""
:class:`Relationship` defines crawlable entity relationship.
Use blog app as example. You want to extract all Post from ListPage.
Then on ListPage should have many Post. So one ListPage has ``MANY`` Post.
Then you can define the relationship this way::
Relationship(child_klass=Post, relationship="many", n_child_key="n_post")
.. note::
One crawlable entity may relate to multiple child entity.
See :class:`RelationshipConfig` for more info.
**中文文档**
在 crawlib 框架中, 所有的 url 背后都对应着一个 Entity. Relationship 类定义了
当前的 Entity 与其他的 Entity 类之间的关系.
一对一关系. 常用于下载页面. 例如包含了一个图片的页面, 第一步我们要访问页面, 获得图片的地址.
第二部我们要将图片下载到本地. 这时我们可以定义主 Entity 为 Page, 子 Entity 为 Image.
其中 Image 是 Page 的 Subclass, 直接继承而来, 并且共用一个数据表.
一对多关系. 常用于当前 url 上的链接对应的新 url 隶属不同的 ORM Entity, 也就是不同
的数据表 的情况.
:param child_klass: 在 parent entity class 页面中能抓取到的 child entity klass
类.
:param relationship: "one" or "many" 表示是 1 对多或一对 1 的关系.
:param n_child_key:
:param recursive: 只有当 recursive 为 True 时, 才会在执行广度优先爬虫时, 爬完一个
entity 之后, 继续爬下一个 child entity. 设置为 False 时, 则表示仅仅将 child
entity 存入数据库, 而并不对 child entity 对应的 url 做抓取.
"""
class Option(object):
one = "one"
many = "many"
def __init__(self,
child_klass: Type[EntityBase],
relationship: str,
n_child_key: str = None,
recursive: bool = True):
if not issubclass(child_klass, EntityBase):
msg = "'{}' has to be subclass of 'Entity'!".format(child_klass)
raise TypeError(msg)
if relationship not in (self.Option.one, self.Option.many):
msg = "`relationship` has to be one of 'one' or 'many'!"
raise ValueError(msg)
if (n_child_key is None) and (relationship == self.Option.many):
msg = "you have to specify `n_child_key` when `relationship` is 'many'!"
raise ValueError(msg)
self.child_klass = child_klass
self.relationship = relationship
self.n_child_key = n_child_key
self.recursive = recursive
|
class Relationship(object):
'''
:class:`Relationship` defines crawlable entity relationship.
Use blog app as example. You want to extract all Post from ListPage.
Then on ListPage should have many Post. So one ListPage has ``MANY`` Post.
Then you can define the relationship this way::
Relationship(child_klass=Post, relationship="many", n_child_key="n_post")
.. note::
One crawlable entity may relate to multiple child entity.
See :class:`RelationshipConfig` for more info.
**中文文档**
在 crawlib 框架中, 所有的 url 背后都对应着一个 Entity. Relationship 类定义了
当前的 Entity 与其他的 Entity 类之间的关系.
一对一关系. 常用于下载页面. 例如包含了一个图片的页面, 第一步我们要访问页面, 获得图片的地址.
第二部我们要将图片下载到本地. 这时我们可以定义主 Entity 为 Page, 子 Entity 为 Image.
其中 Image 是 Page 的 Subclass, 直接继承而来, 并且共用一个数据表.
一对多关系. 常用于当前 url 上的链接对应的新 url 隶属不同的 ORM Entity, 也就是不同
的数据表 的情况.
:param child_klass: 在 parent entity class 页面中能抓取到的 child entity klass
类.
:param relationship: "one" or "many" 表示是 1 对多或一对 1 的关系.
:param n_child_key:
:param recursive: 只有当 recursive 为 True 时, 才会在执行广度优先爬虫时, 爬完一个
entity 之后, 继续爬下一个 child entity. 设置为 False 时, 则表示仅仅将 child
entity 存入数据库, 而并不对 child entity 对应的 url 做抓取.
'''
class Option(object):
def __init__(self,
child_klass: Type[EntityBase],
relationship: str,
n_child_key: str = None,
recursive: bool = True):
pass
| 3 | 1 | 18 | 0 | 18 | 0 | 4 | 1.14 | 1 | 6 | 2 | 0 | 1 | 4 | 1 | 1 | 59 | 12 | 22 | 14 | 15 | 25 | 18 | 10 | 15 | 4 | 1 | 1 | 4 |
148,057 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/example/scrapy_movie/items.py
|
crawlib.example.scrapy_movie.items.ScrapyMovieItem
|
class ScrapyMovieItem(scrapy.Item):
_id = scrapy.Field()
title = scrapy.Field()
status = scrapy.Field()
edit_at = scrapy.Field()
def process(self):
c_movie.update_one(
filter={"_id": self["_id"]},
update={"$set": dict(self)},
upsert=True,
)
|
class ScrapyMovieItem(scrapy.Item):
def process(self):
pass
| 2 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 12 | 1 | 11 | 6 | 9 | 0 | 7 | 6 | 5 | 1 | 1 | 0 | 1 |
148,058 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/entity/base.py
|
crawlib.entity.base.ParseResult
|
class ParseResult(object):
"""
A data container to hold scraped data.
**中文文档**
ParseResult 是 crawlib 广度优先框架中所使用的类, 用于包装从 html 抓取的数据.
在 crawlib 中一个 url 对应着一个 entity, 而 html 中可能会抓取到其他 child entity
的信息. 对于当前 entity 的信息, 我们将其存储在 :attr:`ParseResult.entity` 中.
对于 child entity 的信息, 我们将其存储在 :attr:`ParseResult.children` 中.
:param entity_data: 由于 html 背后必然对应一个 url, 而在 crawlib2 框架里, 每一个 url
都对应着一个 ORM Entity. 此属性保存的就是这个从 html 中提取出来的,
跟 html 唯一对应的 Entity 的其他属性的值. 这些值会被写入数据库中.
:param children: 从 entity 所对应的 url 页面上抓取下来的其他 entity 实例. 在
``Entity.process_pr`` 方法中, 会根据 child entity 的类型进行归类, 然后对
每类进行处理.
:param additional_data: 与 crawlib 框架无关的额外的数据, 用于扩展 crawlib 的功能.
:param status: 表示当前的抓取状态码
:param enit_at: 表示最新更新的时间.
"""
entity_data = attr.ib(default=None) # type: Optional[dict]
children = attr.ib(factory=list) # type: Optional[List[Entity]]
additional_data = attr.ib(factory=dict) # type: Dict
status = attr.ib(
default=Status.S30_ParseError.id,
validator=attr.validators.instance_of(int)
) # type: int
edit_at = attr.ib(default=datetime.utcnow()) # type: datetime
@entity_data.validator
def check_entity_data(self, attribute, value):
"""
- :attr:`ParseResult.entity` could be None, it means the SELF entity
will not be updated.
- :attr:`ParseResult.entity` should be Any subclass of :class:`Entity`
"""
if value is not None:
if not isinstance(value, dict):
raise TypeError("ParseResult.entity_data has to be a dictionary")
@children.validator
def check_children(self, attribute, value):
for item in value:
if not isinstance(item, Entity):
raise TypeError("ParseResult.children has to be a list of Entity")
# -- utility methods
def set_status_todo(self):
self.status = Status.S0_ToDo.id
def set_status_url_error(self):
self.status = Status.S5_UrlError.id
def set_status_http_error(self):
self.status = Status.S10_HttpError.id
def set_status_wrong_page(self):
self.status = Status.S20_WrongPage.id
def set_status_decode_error(self):
self.status = Status.S25_DecodeError.id
def set_status_parse_error(self):
self.status = Status.S30_ParseError.id
def set_status_incomplete_data(self):
self.status = Status.S40_InCompleteData.id
def set_status_finished(self):
self.status = Status.S50_Finished.id
def set_status_server_side_error(self):
self.status = Status.S60_ServerSideError.id
def set_status_finalized(self):
self.status = Status.S99_Finalized.id
def is_finished(self):
"""
test if the status should be marked as `finished`.
:rtype: bool
"""
try:
return self.status >= FINISHED_STATUS_CODE
except: # pragma: no cover
return False
|
class ParseResult(object):
'''
A data container to hold scraped data.
**中文文档**
ParseResult 是 crawlib 广度优先框架中所使用的类, 用于包装从 html 抓取的数据.
在 crawlib 中一个 url 对应着一个 entity, 而 html 中可能会抓取到其他 child entity
的信息. 对于当前 entity 的信息, 我们将其存储在 :attr:`ParseResult.entity` 中.
对于 child entity 的信息, 我们将其存储在 :attr:`ParseResult.children` 中.
:param entity_data: 由于 html 背后必然对应一个 url, 而在 crawlib2 框架里, 每一个 url
都对应着一个 ORM Entity. 此属性保存的就是这个从 html 中提取出来的,
跟 html 唯一对应的 Entity 的其他属性的值. 这些值会被写入数据库中.
:param children: 从 entity 所对应的 url 页面上抓取下来的其他 entity 实例. 在
``Entity.process_pr`` 方法中, 会根据 child entity 的类型进行归类, 然后对
每类进行处理.
:param additional_data: 与 crawlib 框架无关的额外的数据, 用于扩展 crawlib 的功能.
:param status: 表示当前的抓取状态码
:param enit_at: 表示最新更新的时间.
'''
@entity_data.validator
def check_entity_data(self, attribute, value):
'''
- :attr:`ParseResult.entity` could be None, it means the SELF entity
will not be updated.
- :attr:`ParseResult.entity` should be Any subclass of :class:`Entity`
'''
pass
@children.validator
def check_children(self, attribute, value):
pass
def set_status_todo(self):
pass
def set_status_url_error(self):
pass
def set_status_http_error(self):
pass
def set_status_wrong_page(self):
pass
def set_status_decode_error(self):
pass
def set_status_parse_error(self):
pass
def set_status_incomplete_data(self):
pass
def set_status_finished(self):
pass
def set_status_server_side_error(self):
pass
def set_status_finalized(self):
pass
def is_finished(self):
'''
test if the status should be marked as `finished`.
:rtype: bool
'''
pass
| 16 | 3 | 3 | 0 | 3 | 1 | 1 | 0.75 | 1 | 14 | 12 | 0 | 13 | 0 | 13 | 13 | 88 | 17 | 44 | 22 | 28 | 33 | 39 | 20 | 25 | 3 | 1 | 2 | 18 |
148,059 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/decorator.py
|
crawlib.decorator.FakeResponse
|
class FakeResponse: pass
|
class FakeResponse:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 0 | 0 | 0 |
148,060 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/decode.py
|
crawlib.decode.UrlSpecifiedDecoder
|
class UrlSpecifiedDecoder(object):
"""
Designed for automatically decoding html from binary content of an url.
First, `chardet.detect` is very expensive in time.
Second, usually each website (per domain) only use one encoding algorithm.
This class avoid perform `chardet.detect` twice on the same domain.
:param domain_encoding_table: dict, key is root domain, and value is the
domain's default encoding.
"""
class ErrorsHandle(object):
strict = "strict"
ignore = "ignore"
replace = "replace"
def __init__(self):
self.domain_encoding_table = dict()
def decode(self, binary, url, encoding=None, errors="strict"):
"""
Decode binary to string.
:param binary: binary content of a http request.
:param url: endpoint of the request.
:param encoding: manually specify the encoding.
:param errors: errors handle method.
:return: str
"""
if encoding is None:
domain = util.get_domain(url)
if domain in self.domain_encoding_table:
encoding = self.domain_encoding_table[domain]
html = binary.decode(encoding, errors=errors)
else:
html, encoding, confidence = smart_decode(
binary, errors=errors)
# cache domain name and encoding
self.domain_encoding_table[domain] = encoding
else:
html = binary.decode(encoding, errors=errors)
return html
|
class UrlSpecifiedDecoder(object):
'''
Designed for automatically decoding html from binary content of an url.
First, `chardet.detect` is very expensive in time.
Second, usually each website (per domain) only use one encoding algorithm.
This class avoid perform `chardet.detect` twice on the same domain.
:param domain_encoding_table: dict, key is root domain, and value is the
domain's default encoding.
'''
class ErrorsHandle(object):
def __init__(self):
pass
def decode(self, binary, url, encoding=None, errors="strict"):
'''
Decode binary to string.
:param binary: binary content of a http request.
:param url: endpoint of the request.
:param encoding: manually specify the encoding.
:param errors: errors handle method.
:return: str
'''
pass
| 4 | 2 | 14 | 2 | 8 | 5 | 2 | 0.85 | 1 | 1 | 0 | 0 | 2 | 1 | 2 | 2 | 46 | 9 | 20 | 11 | 16 | 17 | 17 | 11 | 13 | 3 | 1 | 2 | 4 |
148,061 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/cached_request.py
|
crawlib.cached_request.CachedRequest
|
class CachedRequest(object):
"""
Implement a disk cache backed html puller, primarily using ``requests`` library.
Usage:
.. code-block:: python
import pytest
from crawlib import create_cache_here, CachedRequest
from xxx import parse_html
cache = create_cache_here(__file__)
spider = CachedRequest(cache=cache)
def test_parse_html_function():
url = "https://www.python.org/downloads/"
html = spider.request_for_html(url) # equivalent to requests.get(url)
# validate your parse html function
result = parse_html(html)
To make post request:
.. code-block:: python
def test_parse_html_function():
url = "https://www.python.org/downloads/"
html = spider.request_for_html(
url,
request_method=requests.post,
request_kwargs={"data": ...},
)
# validate your parse html function
result = parse_html(html)
**中文文档**
在为爬虫程序写测试时, 由于我们要对 针对某一类 URL 所对应的 Html 进行数据抽取的函数
进行测试, 我们希望在一段时间内, 比如1天内, 只爬取一次. 使得在本地机器上反复测试时,
可以不用每次等待爬取. **以加快每次测试的速度**.
"""
def __init__(self,
cache: Cache,
log_cache_miss: bool = False,
expire: int = 24 * 3600):
"""
:type cache: Cache
:param cache:
:type log_cache_miss: bool
:param log_cache_miss: default False
:type expire: int
:param expire: default expire time for cache
"""
if not isinstance(cache, Cache):
raise TypeError
self.cache = cache
self.log_cache_miss = log_cache_miss
self.expire = expire
self.use_which = "requests" # type: str
self.get_html_method = self.get_html_method_for_requests # type: callable
self.use_requests()
def use_requests(self):
self.use_which = "requests"
self.get_html_method = self.get_html_method_for_requests
def get_html_method_for_requests(self,
response: requests.Response,
encoding: str = None,
errors: str = "strict",
**kwargs) -> str:
"""
Get html from ``requests.Response`` object.
:param response: the return of ``requests.request(method, url, **kwargs)``
:param encoding: manually specify the encoding.
:param errors: errors handle method.
:return: html
"""
return decoder.decode(
binary=response.content,
url=response.url,
encoding=encoding,
errors=errors,
)
def get_binary_method_for_requests(self,
response: requests.Response,
**kwargs) -> bytes:
"""
Get binary data from ``requests.Response`` object.
:param response:
:param kwargs:
:return: binary data
"""
return response.content
# Frequently used method
def request_for_html(self,
url: str,
get_html_method: typing.Callable = None,
get_html_method_kwargs: dict = None,
request_method: typing.Callable = None,
request_kwargs: dict = None,
cache_expire: int = None,
cacheable_callback: typing.Callable = lambda html: True) -> str:
"""
:param url:
:param get_html_method: a callable method takes requests.Response as
first argument, returns html.
:param get_html_method_kwargs:
:param request_method: requests.get or requests.post
:param request_kwargs:
:param cacheable_callback: a method takes html as single argument,
if returns True, then update cache. otherwise do nothing.
**中文文档**
使用 ``requests.request()`` 执行 HTTP request, 返回 HTML.
永远优先尝试使用缓存. 如果缓存未命中, 则执行 HTTP request. 并用
cacheable_callback 检查 html, 如果返回 True, 则更新缓存. 如果返回 False
则不更新缓存.
"""
if get_html_method is None:
get_html_method = self.get_html_method
if get_html_method_kwargs is None:
get_html_method_kwargs = dict()
if request_method is None:
request_method = requests.get
if request_kwargs is None:
request_kwargs = dict()
if cache_expire is None:
cache_expire = self.expire
if self.use_which == "requests":
if "url" not in request_kwargs:
request_kwargs["url"] = url
if url in self.cache:
return self.cache[url]
else:
if self.log_cache_miss:
msg = "{} doesn't hit cache!".format(url)
print(msg)
response = request_method(**request_kwargs)
html = get_html_method(response, **get_html_method_kwargs)
if cacheable_callback(html):
self.cache.set(url, html, cache_expire)
return html
|
class CachedRequest(object):
'''
Implement a disk cache backed html puller, primarily using ``requests`` library.
Usage:
.. code-block:: python
import pytest
from crawlib import create_cache_here, CachedRequest
from xxx import parse_html
cache = create_cache_here(__file__)
spider = CachedRequest(cache=cache)
def test_parse_html_function():
url = "https://www.python.org/downloads/"
html = spider.request_for_html(url) # equivalent to requests.get(url)
# validate your parse html function
result = parse_html(html)
To make post request:
.. code-block:: python
def test_parse_html_function():
url = "https://www.python.org/downloads/"
html = spider.request_for_html(
url,
request_method=requests.post,
request_kwargs={"data": ...},
)
# validate your parse html function
result = parse_html(html)
**中文文档**
在为爬虫程序写测试时, 由于我们要对 针对某一类 URL 所对应的 Html 进行数据抽取的函数
进行测试, 我们希望在一段时间内, 比如1天内, 只爬取一次. 使得在本地机器上反复测试时,
可以不用每次等待爬取. **以加快每次测试的速度**.
'''
def __init__(self,
cache: Cache,
log_cache_miss: bool = False,
expire: int = 24 * 3600):
'''
:type cache: Cache
:param cache:
:type log_cache_miss: bool
:param log_cache_miss: default False
:type expire: int
:param expire: default expire time for cache
'''
pass
def use_requests(self):
pass
def get_html_method_for_requests(self,
response: requests.Response,
encoding: str = None,
errors: str = "strict",
**kwargs) -> str:
'''
Get html from ``requests.Response`` object.
:param response: the return of ``requests.request(method, url, **kwargs)``
:param encoding: manually specify the encoding.
:param errors: errors handle method.
:return: html
'''
pass
def get_binary_method_for_requests(self,
response: requests.Response,
**kwargs) -> bytes:
'''
Get binary data from ``requests.Response`` object.
:param response:
:param kwargs:
:return: binary data
'''
pass
def request_for_html(self,
url: str,
get_html_method: typing.Callable = None,
get_html_method_kwargs: dict = None,
request_method: typing.Callable = None,
request_kwargs: dict = None,
cache_expire: int = None,
cacheable_callback: typing.Callable = lambda html: True) -> str:
'''
:param url:
:param get_html_method: a callable method takes requests.Response as
first argument, returns html.
:param get_html_method_kwargs:
:param request_method: requests.get or requests.post
:param request_kwargs:
:param cacheable_callback: a method takes html as single argument,
if returns True, then update cache. otherwise do nothing.
**中文文档**
使用 ``requests.request()`` 执行 HTTP request, 返回 HTML.
永远优先尝试使用缓存. 如果缓存未命中, 则执行 HTTP request. 并用
cacheable_callback 检查 html, 如果返回 True, 则更新缓存. 如果返回 False
则不更新缓存.
'''
pass
| 6 | 5 | 23 | 4 | 12 | 8 | 3 | 1.1 | 1 | 7 | 0 | 0 | 5 | 5 | 5 | 5 | 164 | 34 | 63 | 30 | 41 | 69 | 41 | 14 | 35 | 11 | 1 | 2 | 16 |
148,062 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/exc.py
|
crawlib.exc.WrongHtmlError
|
class WrongHtmlError(Exception):
"""
The html is not the one we desired.
**中文文档**
页面不是我们想要的页面。有以下几种可能:
1. 服务器暂时连不上, 返回了404页面。
2. 服务器要求验证码, 返回了验证码页面。
3. 页面暂时因为各种奇怪的原因不是我们需要的页面。
"""
status_code = Status.S20_WrongPage.id
|
class WrongHtmlError(Exception):
'''
The html is not the one we desired.
**中文文档**
页面不是我们想要的页面。有以下几种可能:
1. 服务器暂时连不上, 返回了404页面。
2. 服务器要求验证码, 返回了验证码页面。
3. 页面暂时因为各种奇怪的原因不是我们需要的页面。
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 13 | 3 | 2 | 2 | 1 | 8 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
148,063 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s2_music/entity_music_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s2_music.entity_music_sql_backend.MusicPageBase
|
class MusicPageBase(Base, SqlEntitySingleStatus):
__abstract__ = True
def build_request(self, url, **kwargs):
request = url
return request
def send_request(self, request, **kwargs):
return requests.get(request)
|
class MusicPageBase(Base, SqlEntitySingleStatus):
def build_request(self, url, **kwargs):
pass
def send_request(self, request, **kwargs):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 2 | 0 | 0 | 4 | 2 | 0 | 2 | 36 | 9 | 2 | 7 | 5 | 4 | 0 | 7 | 5 | 4 | 1 | 6 | 0 | 2 |
148,064 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/scrapy_movie/scrapy_movie/middlewares.py
|
scrapy_movie.middlewares.ScrapyMovieDownloaderMiddleware
|
class ScrapyMovieDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
class ScrapyMovieDownloaderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
pass
def process_request(self, request, spider):
pass
def process_response(self, request, response, spider):
pass
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
pass
| 7 | 0 | 7 | 1 | 2 | 4 | 1 | 1.64 | 1 | 0 | 0 | 0 | 4 | 0 | 5 | 5 | 45 | 8 | 14 | 8 | 7 | 23 | 13 | 7 | 7 | 1 | 1 | 0 | 5 |
148,065 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/scrapy_movie/scrapy_movie/middlewares.py
|
scrapy_movie.middlewares.ScrapyMovieSpiderMiddleware
|
class ScrapyMovieSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
class ScrapyMovieSpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
pass
def process_spider_input(self, response, spider):
pass
def process_spider_output(self, response, result, spider):
pass
def process_spider_exception(self, response, exception, spider):
pass
def process_start_requests(self, start_requests, spider):
pass
def spider_opened(self, spider):
pass
| 8 | 0 | 6 | 1 | 3 | 3 | 1 | 1 | 1 | 0 | 0 | 0 | 5 | 0 | 6 | 6 | 46 | 10 | 18 | 11 | 10 | 18 | 17 | 10 | 10 | 2 | 1 | 1 | 8 |
148,066 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/scrapy_movie/scrapy_movie/pipelines.py
|
scrapy_movie.pipelines.ScrapyMoviePipeline
|
class ScrapyMoviePipeline(object):
def process_item(self, item, spider):
return item
|
class ScrapyMoviePipeline(object):
def process_item(self, item, spider):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
148,067 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/scrapy_movie/scrapy_movie/spiders/example.py
|
scrapy_movie.spiders.example.ExampleSpider
|
class ExampleSpider(scrapy.Spider):
name = 'example'
allowed_domains = ['example.com']
start_urls = ['http://example.com/']
def parse(self, response):
pass
|
class ExampleSpider(scrapy.Spider):
def parse(self, response):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 7 | 1 | 6 | 5 | 4 | 0 | 6 | 5 | 4 | 1 | 1 | 0 | 1 |
148,068 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/devops/config.py
|
crawlib.devops.config.Config
|
class Config(ConfigClass):
METADATA = Constant(default=dict())
PROJECT_NAME = Constant()
PROJECT_NAME_SLUG = Derivable()
@PROJECT_NAME_SLUG.getter
def get_project_name_slug(self):
return self.PROJECT_NAME.get_value().replace("_", "-")
STAGE = Constant() # example dev / test / prod
ENVIRONMENT_NAME = Derivable()
@ENVIRONMENT_NAME.getter
def get_ENVIRONMENT_NAME(self):
return "{}-{}".format(self.PROJECT_NAME_SLUG.get_value(self), self.STAGE.get_value())
|
class Config(ConfigClass):
@PROJECT_NAME_SLUG.getter
def get_project_name_slug(self):
pass
@ENVIRONMENT_NAME.getter
def get_ENVIRONMENT_NAME(self):
pass
| 5 | 0 | 2 | 0 | 2 | 0 | 1 | 0.08 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 17 | 5 | 12 | 10 | 7 | 1 | 10 | 8 | 7 | 1 | 1 | 0 | 2 |
148,069 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s2_music/entity_music.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s2_music.entity_music.GenrePage
|
class GenrePage(MusicWebsiteEntity):
CONF_UPDATE_INTERVAL = 3600
CONF_UPDATE_FIELDS = ("musics", "n_music")
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(MusicPage, Relationship.Option.many, "n_music", recursive=False)
])
_id = fields.IntField(primary_key=True)
musics = fields.ListField(fields.IntField())
n_music = fields.IntField()
meta = dict(
collection="site_music_genre",
db_alias=config.DB_DATABASE.get_value(),
)
@property
def genre_id(self):
return self._id
def build_url(self):
return url_builder.url_genre(self._id)
def parse_response(self, url, request, response, html=None, **kwargs):
if html is None:
html = response.text
soup = BeautifulSoup(html, "html.parser")
div = soup.find("div", id="detail")
musics = [
int(a["href"].split("/")[-1])
for a in div.find_all("a")
]
entity_data = dict(musics=musics)
children = list()
for music_id in musics:
music = MusicPage(_id=music_id)
children.append(music)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class GenrePage(MusicWebsiteEntity):
@property
def genre_id(self):
pass
def build_url(self):
pass
def parse_response(self, url, request, response, html=None, **kwargs):
pass
| 5 | 0 | 10 | 1 | 9 | 0 | 2 | 0 | 1 | 8 | 4 | 0 | 3 | 0 | 3 | 36 | 50 | 10 | 40 | 21 | 35 | 0 | 26 | 20 | 22 | 3 | 7 | 1 | 5 |
148,070 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s2_music/entity_music.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s2_music.entity_music.ArtistPage
|
class ArtistPage(MusicWebsiteEntity):
CONF_UPDATE_INTERVAL = 3600
CONF_UPDATE_FIELDS = ("musics", "n_music")
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(MusicPage, Relationship.Option.many, "n_music", recursive=False)
])
_id = fields.IntField(primary_key=True)
musics = fields.ListField(fields.IntField())
n_music = fields.IntField()
meta = dict(
collection="site_music_artist",
db_alias=config.DB_DATABASE.get_value(),
)
@property
def artist_id(self):
return self._id
def build_url(self):
return url_builder.url_artist(self._id)
def parse_response(self, url, request, response, html=None, **kwargs):
if html is None:
html = response.text
soup = BeautifulSoup(html, "html.parser")
div = soup.find("div", id="detail")
musics = [
int(a["href"].split("/")[-1])
for a in div.find_all("a")
]
entity_data = dict(musics=musics)
children = list()
for music_id in musics:
music = MusicPage(_id=music_id)
children.append(music)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class ArtistPage(MusicWebsiteEntity):
@property
def artist_id(self):
pass
def build_url(self):
pass
def parse_response(self, url, request, response, html=None, **kwargs):
pass
| 5 | 0 | 10 | 1 | 9 | 0 | 2 | 0 | 1 | 8 | 4 | 0 | 3 | 0 | 3 | 36 | 50 | 10 | 40 | 21 | 35 | 0 | 26 | 20 | 22 | 3 | 7 | 1 | 5 |
148,071 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s2_music/entity_base.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s2_music.entity_base.MusicWebsiteEntity
|
class MusicWebsiteEntity(MongodbEntitySingleStatus):
meta = {
"abstract": True,
}
def build_request(self, url, **kwargs):
request = url
return request
def send_request(self, request, **kwargs):
return requests.get(request)
|
class MusicWebsiteEntity(MongodbEntitySingleStatus):
def build_request(self, url, **kwargs):
pass
def send_request(self, request, **kwargs):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 4 | 2 | 0 | 2 | 33 | 11 | 2 | 9 | 5 | 6 | 0 | 7 | 5 | 4 | 1 | 6 | 0 | 2 |
148,072 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/middleware/base.py
|
crawlib.middleware.base.DomainSpecifiedKlass
|
class DomainSpecifiedKlass(object):
"""
"""
domain = None
def __init__(self, domain=None):
"""
:type domain: str
:param domain:
"""
if domain is not None:
self.domain = domain
if self.domain is None:
raise ValueError("You have to specify `domain`")
self.domain = util.get_domain(self.domain)
|
class DomainSpecifiedKlass(object):
'''
'''
def __init__(self, domain=None):
'''
:type domain: str
:param domain:
'''
pass
| 2 | 2 | 10 | 0 | 6 | 4 | 3 | 0.75 | 1 | 1 | 0 | 1 | 1 | 0 | 1 | 1 | 15 | 1 | 8 | 3 | 6 | 6 | 8 | 3 | 6 | 3 | 1 | 1 | 3 |
148,073 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/middleware/url_builder/builder.py
|
crawlib.middleware.url_builder.builder.BaseUrlBuilder
|
class BaseUrlBuilder(DomainSpecifiedKlass):
"""
Base url builder. Provide functional interface to create url.
Example::
>>> from crawlib2 import BaseUrlBuilder
>>> class PythonOrgUrlBuilder(DomainSpecifiedKlass):
... domain = "https://www.python.org"
...
... def url_downloads_page(self):
... return self.join_all("downloads")
...
... def url_release(self, version):
... '''version is like "2.7.16", "3.6.8", ...'''
... return self.join_all("downloads", "release", version.replace(".". ""))
>>> url_builder = PythonOrgUrlBuilder()
"""
def join_all(self, *parts):
"""
Join all parts with domain. Example domain: https://www.python.org
:rtype: list
:param parts: Other parts, example: "/doc", "/py27"
:rtype: str
:return: url
Example::
>>> join_all("product", "iphone")
https://www.apple.com/product/iphone
"""
url = util.join_all(self.domain, *parts)
return url
def add_params(self, endpoint, params):
"""
Combine query endpoint and params.
"""
if not endpoint.startswith(self.domain):
raise ValueError("%s not start with %s" % (endpoint, self.domain))
return util.add_params(endpoint, params)
|
class BaseUrlBuilder(DomainSpecifiedKlass):
'''
Base url builder. Provide functional interface to create url.
Example::
>>> from crawlib2 import BaseUrlBuilder
>>> class PythonOrgUrlBuilder(DomainSpecifiedKlass):
... domain = "https://www.python.org"
...
... def url_downloads_page(self):
... return self.join_all("downloads")
...
... def url_release(self, version):
... '''version is like "2.7.16", "3.6.8", ...'''
... return self.join_all("downloads", "release", version.replace(".". ""))
>>> url_builder = PythonOrgUrlBuilder()
'''
def join_all(self, *parts):
'''
Join all parts with domain. Example domain: https://www.python.org
:rtype: list
:param parts: Other parts, example: "/doc", "/py27"
:rtype: str
:return: url
Example::
>>> join_all("product", "iphone")
https://www.apple.com/product/iphone
'''
pass
def add_params(self, endpoint, params):
'''
Combine query endpoint and params.
'''
pass
| 3 | 3 | 12 | 2 | 4 | 7 | 2 | 3.5 | 1 | 1 | 0 | 3 | 2 | 0 | 2 | 3 | 44 | 8 | 8 | 4 | 5 | 28 | 8 | 4 | 5 | 2 | 2 | 1 | 3 |
148,074 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/exc.py
|
crawlib.exc.ServerSideError
|
class ServerSideError(Exception):
"""
Server side problem.
code 404
**中文文档**
1. 因为服务器的缘故该页面无法正常访问, 也可能已经不存在了, 但以后可能会回来。
2. 因为服务器的缘故, 上面的数据不是我们想要的, 但是我们可以暂时用着, 以后可能要重新抓取。
"""
status_code = Status.S60_ServerSideError.id
|
class ServerSideError(Exception):
'''
Server side problem.
code 404
**中文文档**
1. 因为服务器的缘故该页面无法正常访问, 也可能已经不存在了, 但以后可能会回来。
2. 因为服务器的缘故, 上面的数据不是我们想要的, 但是我们可以暂时用着, 以后可能要重新抓取。
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 3.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 12 | 3 | 2 | 2 | 1 | 7 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
148,075 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/exc.py
|
crawlib.exc.ParseError
|
class ParseError(Exception):
"""
Failed to parse data from html, may due to bug in your method.
**中文文档**
由于函数的设计失误, 解析页面信息发生了错误。
"""
code = Status.S30_ParseError.id
|
class ParseError(Exception):
'''
Failed to parse data from html, may due to bug in your method.
**中文文档**
由于函数的设计失误, 解析页面信息发生了错误。
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 2.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 9 | 2 | 2 | 2 | 1 | 5 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
148,076 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/exc.py
|
crawlib.exc.IncompleteDataError
|
class IncompleteDataError(Exception):
"""
Successfully parse data from html, but we can't accept the result due to
missing data.
"""
status_code = Status.S40_InCompleteData.id
|
class IncompleteDataError(Exception):
'''
Successfully parse data from html, but we can't accept the result due to
missing data.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 6 | 0 | 2 | 2 | 1 | 4 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
148,077 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/exc.py
|
crawlib.exc.ForbiddenError
|
class ForbiddenError(Exception):
"""
Banned from server.
http status code 403
**中文文档**
被服务器禁止访问。
"""
status_code = Status.S20_WrongPage.id
|
class ForbiddenError(Exception):
'''
Banned from server.
http status code 403
**中文文档**
被服务器禁止访问。
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 11 | 3 | 2 | 2 | 1 | 6 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
148,078 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/test_entity_validate_implementation.py
|
test_entity_validate_implementation.test_validate_relationship_config_goodcase2.ImagePage
|
class ImagePage(Entity):
id = "image_page_id"
|
class ImagePage(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,079 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_validate_implementation.py
|
test_entity_validate_implementation.test_validate_relationship_config_goodcase1.Zipcode
|
class Zipcode(Entity): pass
|
class Zipcode(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,080 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/mongodb/test_mongo_entity.py
|
test_mongo_entity.TestMongoEntity
|
class TestMongoEntity(object):
def test_all(self):
class DummyEntityForTest(MongodbEntitySingleStatus):
_id = me.fields.IntField(primary_key=True)
value = me.fields.StringField()
CONF_UPDATE_INTERVAL = 1
CONF_STATUS_KEY = "status"
CONF_EDIT_AT_KEY = "edit_at"
CONF_ONLY_FIELDS = (
"_id",
)
meta = dict(
collection="dummy_entity_for_test",
db_alias=config.DB_DATABASE.get_value(),
)
# --- test SqlEntity.set_db_values() method ---
DummyEntityForTest.col().delete_many({})
DummyEntityForTest.smart_insert(DummyEntityForTest(_id=1, value="Alice"))
# --- test SqlEntity.get_unfinished(), SqlEntity.get_finished() methods ---
DummyEntityForTest.smart_insert(
[
DummyEntityForTest(_id=1, value="Alice", edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(_id=2, value="Bob", status=Status.S50_Finished.id, edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(_id=3, value="Cathy", status=Status.S50_Finished.id, edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(_id=4, value="David", edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(_id=5, value="Edward", edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(_id=6, value="Frank", status=Status.S50_Finished.id, edit_at=datetime(2099, 1, 1)),
DummyEntityForTest(_id=7, value="George", status=Status.S50_Finished.id, edit_at=datetime(2099, 1, 1)),
]
)
assert DummyEntityForTest.count_unfinished() == 3
assert DummyEntityForTest.count_unfinished(filters={"_id": {"$lte": 3}}) == 1
assert DummyEntityForTest.count_unfinished(filters={"_id": {"$gt": 3}}) == 2
assert DummyEntityForTest.count_unfinished(filters={"_id": {"$gt": 3}}, limit=1) == 1
assert [
entity._id
for entity in DummyEntityForTest.get_unfinished(order_by=["-_id", ], limit=2)
] == [5, 4]
# CONF_ONLY_FIELDS taken effect
for entity in DummyEntityForTest.get_unfinished():
assert entity.value is None
assert DummyEntityForTest.count_finished() == 4
assert DummyEntityForTest.count_finished(filters={"_id": {"$lte": 3}}) == 2
assert DummyEntityForTest.count_finished(filters={"_id": {"$gt": 3}}) == 2
assert DummyEntityForTest.count_finished(filters={"_id": {"$gt": 3}}, limit=1) == 1
assert [
entity._id
for entity in DummyEntityForTest.get_finished(order_by=["-_id", ], limit=2)
] == [7, 6]
for entity in DummyEntityForTest.get_finished():
assert entity.value is None
|
class TestMongoEntity(object):
def test_all(self):
pass
class DummyEntityForTest(MongodbEntitySingleStatus):
| 3 | 0 | 56 | 6 | 47 | 3 | 3 | 0.06 | 1 | 2 | 1 | 0 | 1 | 0 | 1 | 1 | 57 | 6 | 48 | 10 | 45 | 3 | 27 | 10 | 24 | 3 | 1 | 1 | 3 |
148,081 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/scrapy_mate/item.py
|
crawlib.scrapy_mate.item.Item
|
class Item(scrapy.Item):
def get_finished(self):
pass
|
class Item(scrapy.Item):
def get_finished(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
148,082 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/status.py
|
crawlib.status.StatusDetail
|
class StatusDetail(Constant):
"""
Status Class Template.
"""
id = None
description = None
description_en = None
description_cn = None
|
class StatusDetail(Constant):
'''
Status Class Template.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.6 | 1 | 0 | 0 | 10 | 0 | 0 | 0 | 0 | 8 | 0 | 5 | 4 | 4 | 3 | 5 | 4 | 4 | 0 | 1 | 0 | 0 |
148,083 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/exc.py
|
crawlib.exc.DownloadOversizeError
|
class DownloadOversizeError(Exception):
"""
The download target are not falls in the size range you specified.
"""
|
class DownloadOversizeError(Exception):
'''
The download target are not falls in the size range you specified.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 4 | 0 | 1 | 1 | 0 | 3 | 1 | 1 | 0 | 0 | 3 | 0 | 0 |
148,084 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/exc.py
|
crawlib.exc.DecodeError
|
class DecodeError(Exception):
"""
Failed to decode binary response.
"""
status_code = Status.S25_DecodeError.id
|
class DecodeError(Exception):
'''
Failed to decode binary response.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 5 | 0 | 2 | 2 | 1 | 3 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
148,085 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/exc.py
|
crawlib.exc.CaptchaError
|
class CaptchaError(Exception):
"""
Encounter a captcha page.
http status code 403
**中文文档**
遭遇反爬虫验证页面。
"""
status_code = Status.S20_WrongPage.id
|
class CaptchaError(Exception):
'''
Encounter a captcha page.
http status code 403
**中文文档**
遭遇反爬虫验证页面。
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 11 | 3 | 2 | 2 | 1 | 6 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
148,086 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/example/scrapy_movie/url_builder.py
|
crawlib.example.scrapy_movie.url_builder.UrlBuilder
|
class UrlBuilder(object):
def listpage_url(self, listpage_id):
return "{}/listpage/{}".format(Config.Url.domain, listpage_id)
|
class UrlBuilder(object):
def listpage_url(self, listpage_id):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 2 | 2 | 0 | 1 | 0 | 1 | 1 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
148,087 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/config.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.config.Config
|
class Config(ConfigClass):
DB_LOCAL_HOST = Constant(default="localhost")
DB_LOCAL_PORT = Constant(default=43346)
DB_LOCAL_DATABASE = Constant(default="admin")
DB_LOCAL_USERNAME = Constant(default="username")
DB_LOCAL_PASSWORD = Constant(default="password")
DB_HOST = Derivable()
@DB_HOST.getter
def get_DB_HOST(self):
if self.is_ci_runtime():
return self.DB_LOCAL_HOST.get_value()
else:
return self.DB_LOCAL_HOST.get_value()
DB_PORT = Derivable()
@DB_PORT.getter
def get_DB_PORT(self):
if self.is_ci_runtime():
return 27017
else:
return self.DB_LOCAL_PORT.get_value()
DB_DATABASE = Derivable()
@DB_DATABASE.getter
def get_DB_DATABASE(self):
if self.is_ci_runtime():
return self.DB_LOCAL_DATABASE.get_value()
else:
return self.DB_LOCAL_DATABASE.get_value()
DB_USERNAME = Derivable()
@DB_USERNAME.getter
def get_DB_USERNAME(self):
if self.is_ci_runtime():
return self.DB_LOCAL_USERNAME.get_value()
else:
return self.DB_LOCAL_USERNAME.get_value()
DB_PASSWORD = Derivable()
@DB_PASSWORD.getter
def get_DB_PASSWORD(self):
if self.is_ci_runtime():
return self.DB_LOCAL_PASSWORD.get_value()
else:
return self.DB_LOCAL_PASSWORD.get_value()
|
class Config(ConfigClass):
@DB_HOST.getter
def get_DB_HOST(self):
pass
@DB_PORT.getter
def get_DB_PORT(self):
pass
@DB_DATABASE.getter
def get_DB_DATABASE(self):
pass
@DB_USERNAME.getter
def get_DB_USERNAME(self):
pass
@DB_PASSWORD.getter
def get_DB_PASSWORD(self):
pass
| 11 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 5 | 51 | 10 | 41 | 21 | 30 | 0 | 31 | 16 | 25 | 2 | 1 | 1 | 10 |
148,088 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s1_movie/entity_base.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s1_movie.entity_base.MovieWebsiteEntity
|
class MovieWebsiteEntity(MongodbEntitySingleStatus):
meta = {
"abstract": True,
}
def build_request(self, url, **kwargs):
request = url
return request
def send_request(self, request, **kwargs):
return requests.get(request)
|
class MovieWebsiteEntity(MongodbEntitySingleStatus):
def build_request(self, url, **kwargs):
pass
def send_request(self, request, **kwargs):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 2 | 2 | 0 | 2 | 33 | 11 | 2 | 9 | 5 | 6 | 0 | 7 | 5 | 4 | 1 | 6 | 0 | 2 |
148,089 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/example/scrapy_movie/pipelines.py
|
crawlib.example.scrapy_movie.pipelines.MongodbPipeline
|
class MongodbPipeline(object):
def process_item(self, item, spider):
item.process()
return item
|
class MongodbPipeline(object):
def process_item(self, item, spider):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 4 | 0 | 4 | 2 | 2 | 0 | 4 | 2 | 2 | 1 | 1 | 0 | 1 |
148,090 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/example/scrapy_movie/parser.py
|
crawlib.example.scrapy_movie.parser.MovieListpageParseResult
|
class MovieListpageParseResult(scrapy.Item):
page = scrapy
|
class MovieListpageParseResult(scrapy.Item):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 0 | 0 |
148,091 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/example/scrapy_movie/items.py
|
crawlib.example.scrapy_movie.items.ScrapyMovieListpageItem
|
class ScrapyMovieListpageItem(scrapy.Item):
_id = scrapy.Field()
status = scrapy.Field()
edit_at = scrapy.Field()
def build_url(self):
return "{}/movie/listpage/{}".format(Config.Url.domain, self._id)
def process(self):
c_movie_listpage.update_one(
filter={"_id": self["_id"]},
update={"$set": dict(self)},
upsert=True,
)
|
class ScrapyMovieListpageItem(scrapy.Item):
def build_url(self):
pass
def process(self):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 3 | 2 | 0 | 2 | 0 | 2 | 2 | 14 | 2 | 12 | 6 | 9 | 0 | 8 | 6 | 5 | 1 | 1 | 0 | 2 |
148,092 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/mongo_backend/s1_movie/entity_movie.py
|
crawlib.tests.dummy_site_crawler.mongo_backend.s1_movie.entity_movie.MoviePageBase
|
class MoviePageBase(MongodbEntity):
_id = fields.IntField(primary_key=True)
title = fields.StringField()
status_movie_info = fields.IntField(default=Status.S0_ToDo.id)
edit_at_movie_info = fields.DateTimeField(default=lambda: time_util.epoch)
image_content = fields.StringField()
status_cover_image = fields.IntField(default=Status.S0_ToDo.id)
edit_at_cover_image = fields.DateTimeField(default=lambda: time_util.epoch)
meta = {
"abstract": True,
}
@property
def movie_id(self):
return self._id
def build_request(self, url, **kwargs):
request = url
return request
def send_request(self, request, **kwargs):
return requests.get(request)
|
class MoviePageBase(MongodbEntity):
@property
def movie_id(self):
pass
def build_request(self, url, **kwargs):
pass
def send_request(self, request, **kwargs):
pass
| 5 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 2 | 3 | 0 | 3 | 34 | 24 | 5 | 19 | 14 | 14 | 0 | 16 | 13 | 12 | 1 | 5 | 0 | 3 |
148,093 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/scrapy_movie/scrapy_movie/spiders/movie.py
|
scrapy_movie.spiders.movie.MovieSpider
|
class MovieSpider(scrapy.Spider):
name = "movie"
def start_requests(self):
listpage_id_list = [
1,
]
for listpage_id in listpage_id_list:
url = url_builder.listpage_url(listpage_id)
yield scrapy.Request(
url=url,
callback=self.parse_movie_listpage,
meta={"listpage_id": listpage_id}
)
def parse_movie_listpage(self, response):
result = parser.parse_movie_listpage(
response.text,
current_listpage_id=response.meta["listpage_id"]
)
for item in result:
item.process()
|
class MovieSpider(scrapy.Spider):
def start_requests(self):
pass
def parse_movie_listpage(self, response):
pass
| 3 | 0 | 9 | 0 | 9 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 22 | 2 | 20 | 9 | 17 | 0 | 11 | 9 | 8 | 2 | 1 | 1 | 4 |
148,094 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/test_cache_request.py
|
test_cache_request.TestCachedRequest
|
class TestCachedRequest(object):
def test(self):
url = "https://www.python.org/"
# https://www.python.org doesn't hit cache! should appear over and over again
html = spider.request_for_html(url, cacheable_callback=lambda html: False)
url = "https://www.python.org/about/"
# https://www.python.org doesn't hit cache! should appear every 10 seconds
html = spider.request_for_html(url, cache_expire=10)
|
class TestCachedRequest(object):
def test(self):
pass
| 2 | 0 | 8 | 1 | 5 | 2 | 1 | 0.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 9 | 1 | 6 | 4 | 4 | 2 | 6 | 4 | 4 | 1 | 1 | 0 | 1 |
148,095 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/exc.py
|
crawlib.exc.SoupError
|
class SoupError(Exception):
"""
Failed to convert html to beatifulsoup.
http status 200+
**中文文档**
html成功获得了, 但是格式有错误, 不能转化为soup。
"""
status_code = Status.S30_ParseError.id
|
class SoupError(Exception):
'''
Failed to convert html to beatifulsoup.
http status 200+
**中文文档**
html成功获得了, 但是格式有错误, 不能转化为soup。
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 11 | 3 | 2 | 2 | 1 | 6 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
148,096 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestRelationship
|
class TestRelationship(object):
def test(self):
class Post(object): pass
with raises(TypeError):
Relationship(Post, "many", "n_post")
class Post(Entity): pass
class PostCoverImage(Entity): pass
with raises(ValueError):
Relationship(Post, "bad relationship value", "n_post")
with raises(ValueError):
Relationship(Post, Relationship.Option.many)
Relationship(Post, Relationship.Option.many, "n_post", recursive=True)
Relationship(PostCoverImage, Relationship.Option.one, recursive=True)
|
class TestRelationship(object):
def test(self):
pass
class Post(object):
class Post(object):
class PostCoverImage(Entity):
| 5 | 0 | 19 | 7 | 12 | 0 | 1 | 0 | 1 | 6 | 4 | 0 | 1 | 0 | 1 | 1 | 20 | 7 | 13 | 5 | 11 | 0 | 16 | 5 | 11 | 1 | 1 | 1 | 1 |
148,097 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s1_movie/entity_movie_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s1_movie.entity_movie_sql_backend.MovieCoverImagePage
|
class MovieCoverImagePage(Base1, MoviePageBase):
__tablename__ = "site_movie_movie"
CONF_UPDATE_INTERVAL = 24 * 3600
CONF_STATUS_KEY = "status_cover_image"
CONF_EDIT_AT_KEY = "edit_at_cover_image"
def build_url(self):
return url_builder.url_movie_detail(self.id)
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
status = Status.S50_Finished.id
entity_data = dict(image_content=html)
pres = ParseResult(
entity_data=entity_data,
additional_data={},
status=status,
)
return pres
|
class MovieCoverImagePage(Base1, MoviePageBase):
def build_url(self):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 4 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 2 | 39 | 20 | 3 | 17 | 11 | 13 | 0 | 12 | 10 | 9 | 1 | 6 | 0 | 2 |
148,098 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s1_movie/entity_movie_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s1_movie.entity_movie_sql_backend.MoviePage
|
class MoviePage(Base2, MoviePageBase):
__tablename__ = "site_movie_movie"
CONF_UPDATE_INTERVAL = 24 * 3600
CONF_STATUS_KEY = "status_movie_info"
CONF_EDIT_AT_KEY = "edit_at_movie_info"
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(MovieCoverImagePage,
Relationship.Option.one, recursive=True)
])
def build_url(self):
return url_builder.url_movie_detail(self.id)
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
span_title = soup.find("span", class_="title")
title = span_title.text
entity_data = dict(title=title)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
additional_data={},
status=status,
)
return pres
|
class MoviePage(Base2, MoviePageBase):
def build_url(self):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 4 | 0 | 8 | 1 | 7 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 2 | 39 | 28 | 6 | 22 | 14 | 18 | 0 | 15 | 13 | 12 | 1 | 6 | 0 | 2 |
148,099 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s2_music/entity_music_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s2_music.entity_music_sql_backend.ArtistPage
|
class ArtistPage(MusicPageBase):
__tablename__ = "site_music_artist"
CONF_UPDATE_INTERVAL = 3600
CONF_ONLY_FIELDS = ("id",)
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(
child_klass=MusicPage,
relationship=Relationship.Option.many,
n_child_key="n_music",
recursive=False,
)
])
id = sa.Column(sa.Integer, primary_key=True) # type: int
musics = sa.Column(sa.PickleType) # type: typing.List[int]
n_music = sa.Column(sa.Integer) # type: int
@property
def artist_id(self):
return self.id
def build_url(self):
return url_builder.url_artist(self.artist_id)
@resolve_arg()
def parse_response(self, url, request, response, html=None, soup=None, **kwargs):
div = soup.find("div", id="detail")
musics = [
int(a["href"].split("/")[-1])
for a in div.find_all("a")
]
entity_data = dict(musics=musics)
children = list()
for music_id in musics:
music = MusicPage(id=music_id)
children.append(music)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class ArtistPage(MusicPageBase):
@property
def artist_id(self):
pass
def build_url(self):
pass
@resolve_arg()
def parse_response(self, url, request, response, html=None, soup=None, **kwargs):
pass
| 6 | 0 | 9 | 1 | 8 | 0 | 1 | 0.07 | 1 | 5 | 2 | 0 | 3 | 0 | 3 | 39 | 49 | 9 | 40 | 20 | 34 | 3 | 23 | 18 | 19 | 2 | 7 | 1 | 4 |
148,100 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s2_music/entity_music_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s2_music.entity_music_sql_backend.GenrePage
|
class GenrePage(MusicPageBase):
__tablename__ = "site_music_genre"
CONF_UPDATE_INTERVAL = 3600
CONF_ONLY_FIELDS = ("id",)
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(
child_klass=MusicPage,
relationship=Relationship.Option.many,
n_child_key="n_music",
recursive=False,
)
])
id = sa.Column(sa.Integer, primary_key=True) # type: int
musics = sa.Column(sa.PickleType) # type: typing.List[int]
n_music = sa.Column(sa.Integer) # type: int
@property
def genre_id(self):
return self.id
def build_url(self):
return url_builder.url_genre(self.genre_id)
@resolve_arg()
def parse_response(self, url, request, response, html=None, soup=None, **kwargs):
div = soup.find("div", id="detail")
musics = [
int(a["href"].split("/")[-1])
for a in div.find_all("a")
]
entity_data = dict(musics=musics)
children = list()
for music_id in musics:
music = MusicPage(id=music_id)
children.append(music)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class GenrePage(MusicPageBase):
@property
def genre_id(self):
pass
def build_url(self):
pass
@resolve_arg()
def parse_response(self, url, request, response, html=None, soup=None, **kwargs):
pass
| 6 | 0 | 9 | 1 | 8 | 0 | 1 | 0.07 | 1 | 5 | 2 | 0 | 3 | 0 | 3 | 39 | 49 | 9 | 40 | 20 | 34 | 3 | 23 | 18 | 19 | 2 | 7 | 1 | 4 |
148,101 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s2_music/entity_music_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s2_music.entity_music_sql_backend.MusicPage
|
class MusicPage(MusicPageBase):
__tablename__ = "site_music_music"
CONF_UPDATE_INTERVAL = 24 * 3600
CONF_ONLY_FIELDS = ("id",)
id = sa.Column(sa.Integer, primary_key=True) # type: int
title = sa.Column(sa.String) # type: title
artists = sa.Column(sa.PickleType) # type: typing.List[int]
n_artist = sa.Column(sa.Integer) # type: int
genres = sa.Column(sa.PickleType) # type: typing.List[int]
n_genre = sa.Column(sa.Integer) # type: int
@property
def music_id(self):
return self.id
def build_url(self):
return url_builder.url_music_detail(self.music_id)
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
div_detail = soup.find("div", id="detail")
title = div_detail.find("div", class_="title").find("span").text
artists = [
int(a["href"].split("/")[-1])
for a in div_detail.find("div", class_="artists").find_all("a")
]
genres = [
int(a["href"].split("/")[-1])
for a in div_detail.find("div", class_="genres").find_all("a")
]
entity_data = dict(title=title, artists=artists, genres=genres)
children = list()
for artist_id in artists:
children.append(ArtistPage(id=artist_id))
for genre_id in genres:
children.append(GenrePage(id=genre_id))
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class MusicPage(MusicPageBase):
@property
def music_id(self):
pass
def build_url(self):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 6 | 0 | 11 | 1 | 10 | 0 | 2 | 0.15 | 1 | 6 | 3 | 0 | 3 | 0 | 3 | 39 | 51 | 10 | 41 | 24 | 35 | 6 | 28 | 22 | 24 | 3 | 7 | 1 | 5 |
148,102 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s2_music/entity_music_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s2_music.entity_music_sql_backend.RandomMusicPage
|
class RandomMusicPage(MusicPageBase):
__tablename__ = "site_music_random_music"
CONF_UPDATE_INTERVAL = 1
CONF_ONLY_FIELDS = ("id",)
CONF_RELATIONSHIP = RelationshipConfig([
Relationship(
child_klass=MusicPage,
relationship=Relationship.Option.many,
n_child_key="n_music",
recursive=True,
)
])
id = sa.Column(sa.Integer, primary_key=True) # type: int
musics = sa.Column(sa.PickleType) # type: typing.List[int]
n_music = sa.Column(sa.Integer) # type: int
def build_url(self, **kwargs):
return url_builder.url_random_music()
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
musics = [
int(a["href"].split("/")[-1])
for a in soup.find_all("a")
]
entity_data = dict(musics=musics)
children = list()
for music_id in musics:
music = MusicPage(id=music_id)
children.append(music)
status = Status.S50_Finished.id
pres = ParseResult(
entity_data=entity_data,
children=children,
additional_data={},
status=status,
)
return pres
|
class RandomMusicPage(MusicPageBase):
def build_url(self, **kwargs):
pass
@resolve_arg()
def parse_response(self, url, request, response=None, html=None, soup=None, **kwargs):
pass
| 4 | 0 | 12 | 2 | 10 | 0 | 2 | 0.08 | 1 | 5 | 2 | 0 | 2 | 0 | 2 | 38 | 44 | 8 | 36 | 17 | 32 | 3 | 20 | 16 | 17 | 2 | 7 | 1 | 3 |
148,103 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/test_decorator.py
|
test_decorator.TestDecorator
|
class TestDecorator(object):
url = "https://www.python.org"
content = b'<html><div class="title">Hello World</div></html>'
html = str(content)
response_requests = None
response_scrapy = None
@classmethod
def setup_class(cls):
cls.response_requests = RequestResponse()
cls.response_requests.url = cls.url
cls.response_requests._content = cls.content
cls.response_scrapy = ScrapyResponse(
url=cls.url,
request=ScrapyRequest(url=cls.url),
body=cls.content,
)
def test_resolve_arg(self):
@decorator.resolve_arg(
response_arg="new_response",
html_arg="new_html",
soup_arg="new_soup",
)
def parse_html_func_with_soup(new_response=None, new_html=None, new_soup=None, **kwargs):
validate_resolved_arg(html=new_html, soup=new_soup)
parse_html_func_with_soup(new_response=self.response_requests)
parse_html_func_with_soup(new_response=self.response_scrapy)
parse_html_func_with_soup(new_html=self.html)
@decorator.resolve_arg(
response_arg="new_response",
html_arg="new_html",
rhtml_arg="new_rhtml",
)
def parse_html_func_with_rhtml(new_response=None, new_html=None, new_rhtml=None, **kwargs):
validate_resolved_arg(html=new_html, rhtml=new_rhtml)
parse_html_func_with_rhtml(new_response=self.response_requests)
parse_html_func_with_rhtml(new_response=self.response_scrapy)
parse_html_func_with_rhtml(new_html=self.html)
class HtmlParser(object):
@decorator.resolve_arg()
def parse_html(self, response=None, html=None, soup=None, **kwargs):
validate_resolved_arg(html=html, soup=soup)
html_parser = HtmlParser()
html_parser.parse_html(response=self.response_requests)
html_parser.parse_html(response=self.response_scrapy)
html_parser.parse_html(html=self.html)
|
class TestDecorator(object):
@classmethod
def setup_class(cls):
pass
def test_resolve_arg(self):
pass
@decorator.resolve_arg(
response_arg="new_response",
html_arg="new_html",
soup_arg="new_soup",
)
def parse_html_func_with_soup(new_response=None, new_html=None, new_soup=None, **kwargs):
pass
@decorator.resolve_arg(
response_arg="new_response",
html_arg="new_html",
rhtml_arg="new_rhtml",
)
def parse_html_func_with_rhtml(new_response=None, new_html=None, new_rhtml=None, **kwargs):
pass
class HtmlParser(object):
@decorator.resolve_arg()
def parse_html_func_with_soup(new_response=None, new_html=None, new_soup=None, **kwargs):
pass
| 11 | 0 | 10 | 1 | 9 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 2 | 2 | 52 | 7 | 45 | 25 | 26 | 0 | 29 | 13 | 22 | 1 | 1 | 0 | 5 |
148,104 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/crawlib/tests/dummy_site_crawler/sql_backend/s1_movie/entity_movie_sql_backend.py
|
crawlib.tests.dummy_site_crawler.sql_backend.s1_movie.entity_movie_sql_backend.SingleStatusEntityBase
|
class SingleStatusEntityBase(Base1, SqlEntitySingleStatus):
__abstract__ = True
def build_request(self, url, **kwargs):
request = url
return request
def send_request(self, request, **kwargs):
return requests.get(request)
|
class SingleStatusEntityBase(Base1, SqlEntitySingleStatus):
def build_request(self, url, **kwargs):
pass
def send_request(self, request, **kwargs):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 2 | 0 | 0 | 2 | 2 | 0 | 2 | 36 | 9 | 2 | 7 | 5 | 4 | 0 | 7 | 5 | 4 | 1 | 6 | 0 | 2 |
148,105 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/test_decorator.py
|
test_decorator.TestDecorator.test_resolve_arg.HtmlParser
|
class HtmlParser(object):
@decorator.resolve_arg()
def parse_html(self, response=None, html=None, soup=None, **kwargs):
validate_resolved_arg(html=html, soup=soup)
|
class HtmlParser(object):
@decorator.resolve_arg()
def parse_html(self, response=None, html=None, soup=None, **kwargs):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 4 | 0 | 4 | 3 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
148,106 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/test_entity_validate_implementation.py
|
test_entity_validate_implementation.test_validate_relationship_config_goodcase1.Country
|
class Country(Entity):
n_state = "n_state_field"
|
class Country(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 4 | 0 | 0 |
148,107 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/test_entity_validate_implementation.py
|
test_entity_validate_implementation.test_validate_relationship_config_goodcase1.State
|
class State(Entity):
n_zipcode = "n_zipcode_field"
|
class State(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 4 | 0 | 0 |
148,108 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/test_entity_validate_implementation.py
|
test_entity_validate_implementation.test_validate_relationship_config_goodcase2.ImageDownload
|
class ImageDownload(Entity):
id = "image_page_id"
|
class ImageDownload(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,109 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/test_entity_validate_implementation.py
|
test_entity_validate_implementation.test_validate_implementation.Country
|
class Country(Country):
CONF_EDIT_AT_KEY = "edit_at"
@classmethod
def _validate_orm_related(cls):
pass
|
class Country(Country):
@classmethod
def _validate_orm_related(cls):
pass
| 3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 8 | 0 | 0 | 0 | 22 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,110 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/test_entity_validate_implementation.py
|
test_entity_validate_implementation.test_validate_implementation.Country
|
class Country(Country):
CONF_EDIT_AT_KEY = "edit_at"
@classmethod
def _validate_orm_related(cls):
pass
|
class Country(Country):
@classmethod
def _validate_orm_related(cls):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 23 | 4 | 0 | 4 | 3 | 1 | 0 | 3 | 2 | 1 | 1 | 5 | 0 | 1 |
148,111 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/test_entity_validate_implementation.py
|
test_entity_validate_implementation.test_validate_implementation.Country
|
class Country(Country):
CONF_EDIT_AT_KEY = "edit_at"
@classmethod
def _validate_orm_related(cls):
pass
|
class Country(Country):
@classmethod
def _validate_orm_related(cls):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 23 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 5 | 0 | 1 |
148,112 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/test_entity_validate_implementation.py
|
test_entity_validate_implementation.test_validate_implementation.Country
|
class Country(Country):
CONF_EDIT_AT_KEY = "edit_at"
@classmethod
def _validate_orm_related(cls):
pass
|
class Country(Country):
@classmethod
def _validate_orm_related(cls):
pass
| 3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 5 | 0 | 0 |
148,113 |
MacHu-GWU/crawlib-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_crawlib-project/tests/entity/test_entity_validate_implementation.py
|
test_entity_validate_implementation.test_validate_implementation.Country
|
class Country(Country):
CONF_EDIT_AT_KEY = "edit_at"
@classmethod
def _validate_orm_related(cls):
pass
|
class Country(Country):
@classmethod
def _validate_orm_related(cls):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 23 | 6 | 1 | 5 | 4 | 2 | 0 | 4 | 3 | 2 | 1 | 5 | 0 | 1 |
148,114 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestParseResult
|
class TestParseResult(object):
def test_init_validator(self):
pr = ParseResult()
assert isinstance(pr.children, list)
assert isinstance(pr.additional_data, dict)
assert isinstance(pr.status, int)
assert isinstance(pr.edit_at, datetime)
with raises(TypeError) as e:
ParseResult(entity_data=[1, 2, 3])
assert "ParseResult.entity_data" in str(e)
with raises(TypeError) as e:
ParseResult(children=[1, 2, 3])
assert "ParseResult.children" in str(e)
|
class TestParseResult(object):
def test_init_validator(self):
pass
| 2 | 0 | 14 | 2 | 12 | 0 | 1 | 0 | 1 | 7 | 1 | 0 | 1 | 0 | 1 | 1 | 15 | 2 | 13 | 4 | 11 | 0 | 13 | 3 | 11 | 1 | 1 | 1 | 1 |
148,115 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/mongodb/test_mongo_query_builder.py
|
test_mongo_query_builder.TestQueryBuilder
|
class TestQueryBuilder(object):
def test_finished_unfinished(self, col):
finished_status = 50
update_interval = 24 * 3600 # 24 hours
status_key = "status"
edit_at_key = "edit_at"
n_documents = 100
col.insert([
{
status_key: random.randint(Status.S0_ToDo.id, Status.S99_Finalized.id),
edit_at_key: rolex.add_seconds(datetime.utcnow(), -random.randint(0, update_interval * 2))
} for _ in range(n_documents)
])
# test query_builder.finished
filters = query_builder.finished(
finished_status=finished_status,
update_interval=update_interval,
status_key=status_key,
edit_at_key=edit_at_key,
)
finished_count = col.find(filters).count()
for doc in col.find(filters):
status, edit_at = doc[status_key], doc[edit_at_key]
assert status >= finished_status
assert (datetime.utcnow() - edit_at).total_seconds() <= update_interval
# test query_builder.unfinished
filters = query_builder.unfinished(
finished_status=finished_status,
update_interval=update_interval,
status_key=status_key,
edit_at_key=edit_at_key,
)
unfinished_count = col.find(filters).count()
for doc in col.find(filters):
status, edit_at = doc[status_key], doc[edit_at_key]
assert (status < finished_status) \
or ((datetime.utcnow() - edit_at).total_seconds() > update_interval)
# test if total is ``n_documents``
assert (finished_count + unfinished_count) == n_documents
|
class TestQueryBuilder(object):
def test_finished_unfinished(self, col):
pass
| 2 | 0 | 42 | 4 | 35 | 4 | 3 | 0.11 | 1 | 5 | 3 | 0 | 1 | 0 | 1 | 1 | 43 | 4 | 36 | 13 | 34 | 4 | 20 | 12 | 18 | 3 | 1 | 1 | 3 |
148,116 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestRelationshipConfig
|
class TestRelationshipConfig(object):
def test(self):
class MovieListpage(Entity): pass
class Movie(Entity): pass
class MovieCoverImage(Entity): pass
class Cast(Entity): pass
class Genre(Entity): pass
config = RelationshipConfig([
Relationship(Movie, Relationship.Option.many, "n_movie")
])
assert config.get_relationship(Movie) == Relationship.Option.many
assert config.get_n_child_key(Movie) == "n_movie"
assert len(list(config.iter_recursive_child_class())) == 1
config = RelationshipConfig([
Relationship(MovieCoverImage, Relationship.Option.one),
Relationship(Cast, Relationship.Option.many, "n_cast", recursive=False),
Relationship(Genre, Relationship.Option.many, "n_genre", recursive=False),
])
assert len(list(config.iter_recursive_child_class())) == 1
|
class TestRelationshipConfig(object):
def test(self):
pass
class MovieListpage(Entity):
class MovieListpage(Entity):
class MovieCoverImage(Entity):
class Cast(Entity):
class Genre(Entity):
| 7 | 0 | 24 | 6 | 18 | 0 | 1 | 0 | 1 | 8 | 7 | 0 | 1 | 0 | 1 | 1 | 25 | 6 | 19 | 8 | 17 | 0 | 18 | 8 | 11 | 1 | 1 | 0 | 1 |
148,117 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestRelationship.test.PostCoverImage
|
class PostCoverImage(Entity): pass
|
class PostCoverImage(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,118 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestRelationshipConfig.test.Genre
|
class Genre(Entity): pass
|
class Genre(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,119 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestRelationship.test.Post
|
class Post(object): pass
|
class Post(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
148,120 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestRelationship.test.Post
|
class Post(object): pass
|
class Post(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,121 |
MacHu-GWU/crawlib-project
|
MacHu-GWU_crawlib-project/tests/entity/test_entity_base.py
|
test_entity_base.TestRelationshipConfig.test.Cast
|
class Cast(Entity): pass
|
class Cast(Entity):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
148,122 |
MacHu-GWU/dataIO-project
|
MacHu-GWU_dataIO-project/dataIO/js.py
|
dataIO.js.JsonExtError
|
class JsonExtError(Exception):
"""Raises when it is not a json file.
"""
pass
|
class JsonExtError(Exception):
'''Raises when it is not a json file.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 4 | 0 | 2 | 1 | 1 | 2 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
148,123 |
MacHu-GWU/dataIO-project
|
MacHu-GWU_dataIO-project/dataIO/pk.py
|
dataIO.pk.PickleExtError
|
class PickleExtError(Exception):
pass
|
class PickleExtError(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
148,124 |
MacHu-GWU/dataIO-project
|
MacHu-GWU_dataIO-project/tests/json_module_compare/module_json.py
|
module_json.Unittest
|
class Unittest(unittest.TestCase):
def test_all(self):
data = {
"int": 100,
"float": 3.1415926535,
"str": "string example 字符串例子",
"boolean": True,
}
js = json.dumps(data)
self.assertEqual(data["int"], json.loads(js)["int"])
self.assertAlmostEqual(data["float"], json.loads(js)[
"float"], delta=0.0001)
self.assertEqual(data["str"], json.loads(js)["str"])
self.assertEqual(data["boolean"], json.loads(js)["boolean"])
print(json.dumps(data, sort_keys=True, indent=4))
|
class Unittest(unittest.TestCase):
def test_all(self):
pass
| 2 | 0 | 16 | 2 | 14 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 73 | 17 | 2 | 15 | 4 | 13 | 0 | 9 | 4 | 7 | 1 | 2 | 0 | 1 |
148,125 |
MacHu-GWU/dataIO-project
|
MacHu-GWU_dataIO-project/tests/json_module_compare/module_bson.py
|
module_bson.Unittest
|
class Unittest(unittest.TestCase):
def test_all(self):
data = {
"int": 100,
"float": 3.1415926535,
"str": "string example 字符串例子",
"bytes": "bytes example 比特串例子".encode("utf-8"),
"boolean": True,
"datetime": datetime.now()
}
js = json_util.dumps(data)
data1 = json_util.loads(js)
self.assertEqual(data["int"], data1["int"])
self.assertAlmostEqual(data["float"], data1["float"], delta=0.0001)
self.assertEqual(data["str"], data1["str"])
self.assertEqual(data["boolean"], data1["boolean"])
print(data1["bytes"])
print(data1["datetime"])
print(json_util.dumps(data, sort_keys=True, indent=4))
|
class Unittest(unittest.TestCase):
def test_all(self):
pass
| 2 | 0 | 20 | 2 | 18 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 73 | 21 | 2 | 19 | 5 | 17 | 0 | 12 | 5 | 10 | 1 | 2 | 0 | 1 |
148,126 |
MacHu-GWU/dataIO-project
|
MacHu-GWU_dataIO-project/tests/json_module_compare/module_ujson.py
|
module_ujson.Unittest
|
class Unittest(unittest.TestCase):
def test_all(self):
data = {
"int": 100,
"float": 3.1415926535,
"str": "string example 字符串例子",
"bytes": "bytes example 比特串例子".encode("utf-8"),
"boolean": True,
"datetime": datetime.now()
}
js = ujson.dumps(data)
self.assertEqual(data["int"], ujson.loads(js)["int"])
self.assertAlmostEqual(data["float"], ujson.loads(js)[
"float"], delta=0.0001)
self.assertEqual(data["str"], ujson.loads(js)["str"])
self.assertNotEqual(data["bytes"], ujson.loads(js)["bytes"]) # 不相等
self.assertEqual(data["boolean"], ujson.loads(js)["boolean"])
self.assertNotEqual(data["datetime"], ujson.loads(js)["datetime"])
print(ujson.dumps(data, indent=4))
|
class Unittest(unittest.TestCase):
def test_all(self):
pass
| 2 | 0 | 20 | 2 | 18 | 1 | 1 | 0.05 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 73 | 21 | 2 | 19 | 4 | 17 | 1 | 11 | 4 | 9 | 1 | 2 | 0 | 1 |
148,127 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/bin/pgr/pygitrepo_os.py
|
pygitrepo_os.OSEnum
|
class OSEnum(object):
windows = "Windows"
macOS = "Darwin"
linux = "Linux"
java = "Java"
unknown = ""
|
class OSEnum(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0 | 6 | 6 | 5 | 0 | 6 | 6 | 5 | 0 | 1 | 0 | 0 |
148,128 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/bin/pgr/pygitrepo_print.py
|
pygitrepo_print.AnsiBack
|
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
|
class AnsiBack(AnsiCodes):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 20 | 1 | 18 | 18 | 17 | 1 | 18 | 18 | 17 | 0 | 2 | 0 | 0 |
148,129 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/bin/pgr/pygitrepo_print.py
|
pygitrepo_print.AnsiFore
|
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
|
class AnsiFore(AnsiCodes):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 20 | 1 | 18 | 18 | 17 | 1 | 18 | 18 | 17 | 0 | 2 | 0 | 0 |
148,130 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/bin/pgr/pygitrepo_print.py
|
pygitrepo_print.AnsiStyle
|
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
|
class AnsiStyle(AnsiCodes):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | 0 | 5 | 5 | 4 | 0 | 5 | 5 | 4 | 0 | 2 | 0 | 0 |
148,131 |
MacHu-GWU/docfly-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_docfly-project/docfly/pkg/sixmini.py
|
docfly.pkg.sixmini.X
|
class X(object):
def __len__(self):
return 1 << 31
|
class X(object):
def __len__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 4 | 1 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
148,132 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/bin/pgr/pygitrepo_config.py
|
pygitrepo_config.Config
|
class Config:
GITHUB_ACCOUNT = "MacHu-GWU"
GITHUB_REPO_NAME = "docfly-project"
PACKAGE_NAME = "docfly"
DEV_PY_VER_MAJOR = "3"
DEV_PY_VER_MINOR = "8"
DEV_PY_VER_MICRO = "11"
TOX_TEST_VERSIONS = [
"2.7.18",
"3.7.12",
"3.8.11",
"3.9.6",
]
# --- Documentation Build
DOC_HOST_RTD_PROJECT_NAME = "docfly"
DOC_HOST_AWS_PROFILE = None
DOC_HOST_S3_BUCKET = None
# --- AWS Lambda Related
AWS_LAMBDA_DEPLOY_AWS_PROFILE = None
AWS_LAMBDA_DEPLOY_S3_BUCKET = None
AWS_LAMBDA_BUILD_DOCKER_IMAGE = None
AWS_LAMBDA_BUILD_DOCKER_IMAGE_WORKSPACE_DIR = None
AWS_LAMBDA_TEST_DOCKER_IMAGE = None
|
class Config:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 2 | 21 | 16 | 20 | 2 | 16 | 16 | 15 | 0 | 0 | 0 | 0 |
148,133 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/docfly/api_reference_doc.py
|
docfly.api_reference_doc.ApiReferenceDoc
|
class ApiReferenceDoc(object):
"""
A class to generate sphinx-doc api reference part.
Example::
package
|--- subpackage1
|--- __init__.rst
|--- module.rst
|--- subpackage2
|--- __init__.rst
|--- module.rst
|--- __init__.rst
|--- module1.rst
|--- module2.rst
:param conf_file: the conf.py file for sphinx doc. it helps to locate
the api reference doc destination directory
:type conf_file: string
:param package_name: the importable package name
:type package_name: string
:param ignore: default empty list, package, module relative
prefix you want to ignored
:type ignored_package: list of string
**中文文档**
如果你需要忽略一个包: 请使用 ``docfly.packages``
如果你需要忽略一个模块: 请使用 ``docfly.zzz_manual_install`` 或
``docfly.zzz_manual_install.py``
"""
def __init__(
self,
conf_file,
package_name,
ignored_package=None,
):
self.conf_file = conf_file
self.package = Package(package_name)
if ignored_package is None:
ignored_package = list()
self.ignored_package = list()
for pkg_fullname in ignored_package:
if pkg_fullname.endswith(".py"):
self.ignored_package.append(pkg_fullname[:-3])
else:
self.ignored_package.append(pkg_fullname)
def fly(self):
"""
Generate doc tree.
"""
dst_dir = Path(self.conf_file).parent.abspath
package_dir = Path(dst_dir, self.package.shortname)
# delete existing api document
try:
if package_dir.exists():
shutil.rmtree(package_dir.abspath)
except Exception as e:
print("'%s' can't be removed! Error: %s" % (package_dir, e))
# create .rst files
for pkg, parent, sub_packages, sub_modules in self.package.walk():
if not is_ignored(pkg, self.ignored_package):
dir_path = Path(*([dst_dir, ] + pkg.fullname.split(".")))
init_path = Path(dir_path, "__init__.rst")
make_dir(dir_path.abspath)
make_file(
init_path.abspath,
self.generate_package_content(pkg),
)
for mod in sub_modules:
if not is_ignored(mod, self.ignored_package):
module_path = Path(dir_path, mod.shortname + ".rst")
make_file(
module_path.abspath,
self.generate_module_content(mod),
)
def generate_package_content(self, package):
"""Generate package.rst text content.
::
{{ package_name }}
==================
.. automodule:: {{ package_name }}
:members:
sub packages and modules
------------------------
.. toctree::
:maxdepth: 1
{{ sub_package_name1 }} <{{ sub_package_name1 }}/__init__>
{{ sub_package_name2 }} <{{ sub_package_name2 }}/__init__>
{{ sub_module_name1}} <{{ sub_module_name1}}>
{{ sub_module_name2}} <{{ sub_module_name2}}>
:type package: Package
"""
if isinstance(package, Package):
return render_package(
package=package,
ignored_package=self.ignored_package
)
else: # pragma: no cover
raise Exception("%r is not a Package object" % package)
def generate_module_content(self, module):
"""Generate module.rst text content.
::
{{ module_name }}
=================
.. automodule:: {{ module_fullname }}
:members:
"""
if isinstance(module, Module):
return render_module(module)
else: # pragma: no cover
raise Exception("%r is not a Module object" % module)
|
class ApiReferenceDoc(object):
'''
A class to generate sphinx-doc api reference part.
Example::
package
|--- subpackage1
|--- __init__.rst
|--- module.rst
|--- subpackage2
|--- __init__.rst
|--- module.rst
|--- __init__.rst
|--- module1.rst
|--- module2.rst
:param conf_file: the conf.py file for sphinx doc. it helps to locate
the api reference doc destination directory
:type conf_file: string
:param package_name: the importable package name
:type package_name: string
:param ignore: default empty list, package, module relative
prefix you want to ignored
:type ignored_package: list of string
**中文文档**
如果你需要忽略一个包: 请使用 ``docfly.packages``
如果你需要忽略一个模块: 请使用 ``docfly.zzz_manual_install`` 或
``docfly.zzz_manual_install.py``
'''
def __init__(
self,
conf_file,
package_name,
ignored_package=None,
):
pass
def fly(self):
'''
Generate doc tree.
'''
pass
def generate_package_content(self, package):
'''Generate package.rst text content.
::
{{ package_name }}
==================
.. automodule:: {{ package_name }}
:members:
sub packages and modules
------------------------
.. toctree::
:maxdepth: 1
{{ sub_package_name1 }} <{{ sub_package_name1 }}/__init__>
{{ sub_package_name2 }} <{{ sub_package_name2 }}/__init__>
{{ sub_module_name1}} <{{ sub_module_name1}}>
{{ sub_module_name2}} <{{ sub_module_name2}}>
:type package: Package
'''
pass
def generate_module_content(self, module):
'''Generate module.rst text content.
::
{{ module_name }}
=================
.. automodule:: {{ module_fullname }}
:members:
'''
pass
| 5 | 4 | 24 | 4 | 13 | 8 | 4 | 1.04 | 1 | 4 | 2 | 0 | 4 | 3 | 4 | 4 | 135 | 27 | 54 | 22 | 44 | 56 | 37 | 16 | 32 | 7 | 1 | 4 | 15 |
148,134 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/docfly/pkg/picage/model.py
|
docfly.pkg.picage.model.Module
|
class Module(BaseModuleOrPackage):
"""
Represent a module object in Python. Typically it's a ``*.py`` file.
:param name: module name, e.g.: "pip.commands.install".
:param path: module file absolute path.
:param parent: default None, parent package name, list of package
:param is_single_file: if it is a single file package/module.
"""
def __repr__(self):
return "Module(name=%r, path='%s')" % (self.name, self.path)
|
class Module(BaseModuleOrPackage):
'''
Represent a module object in Python. Typically it's a ``*.py`` file.
:param name: module name, e.g.: "pip.commands.install".
:param path: module file absolute path.
:param parent: default None, parent package name, list of package
:param is_single_file: if it is a single file package/module.
'''
def __repr__(self):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 2.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 5 | 12 | 2 | 3 | 2 | 1 | 7 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
148,135 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/tests/test_api_reference_doc.py
|
test_api_reference_doc.TestApiReferenceDoc
|
class TestApiReferenceDoc(object):
def test(self):
doc = ApiReferenceDoc(
conf_file=Path(__file__).change(new_basename="conf.py").abspath,
package_name=package_name,
ignored_package=[
"{}.pkg".format(package_name),
"{}.util.py".format(package_name),
]
)
doc.fly()
assert Path(DIR_HERE, package_name, "api_reference_doc.rst").exists()
assert Path(DIR_HERE, package_name, "doctree.rst").exists()
assert not Path(DIR_HERE, package_name, "pkg").exists()
assert not Path(DIR_HERE, package_name, "util").exists()
|
class TestApiReferenceDoc(object):
def test(self):
pass
| 2 | 0 | 16 | 2 | 14 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 17 | 2 | 15 | 3 | 13 | 0 | 8 | 3 | 6 | 1 | 1 | 0 | 1 |
148,136 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/docfly/pkg/picage/model.py
|
docfly.pkg.picage.model.Package
|
class Package(BaseModuleOrPackage):
"""
Represent a package object in Python. It is a directory having a
``__init__.py`` file.
:param name: dot seperated full name, e.g.: "pip.commands.install".
:param path: package directory/file absolute path.
:param parent parent: parent package, instance of :class:`Package`.
**中文文档**
是Python中Package概念的抽象类。指包含有 ``__init__.py`` 文件的文件夹。
Package必须可以被import命令所导入, 换言之, 就是已经被成功安装了。
Package的属性的解释:
- name: 包名称
- path: 包目录所在的路径
- fullname: 包的全名, 带母包
- shortname: 包的短名称, 也就是最后一个点之后的部分。
- parent: 母包的实例。
- is_single_file: 是否是单文件的包。
- sub_packages: 有序字典, {子包的名称: Package对象}
- sub_modules: 有序字典, {子模块的名称: Module对象}
"""
def __init__(self, name, path=None, parent=None, is_single_file=None):
super(Package, self).__init__(
name,
path=path,
parent=parent,
is_single_file=is_single_file,
)
self.sub_packages = OrderedDict()
self.sub_modules = OrderedDict()
# walk through all sub packages and sub modules
if self.is_single_file is False:
for p in Path.sort_by_abspath(self.path.iterdir()):
# if it's a directory
if p.is_dir():
# if there is a __init__.py file, must be a sub package
if Path(p, "__init__.py").exists():
pkg = Package(
name=name + "." + p.basename,
path=p,
parent=self,
is_single_file=False,
)
self.sub_packages[p.basename] = pkg
# if it's a file
else:
# if it's a .py file, must be a module
if p.ext == ".py" and p.fname != "__init__":
module = Module(
name=name + "." + p.fname,
path=p,
parent=self,
is_single_file=True,
)
self.sub_modules[p.fname] = module
def __str__(self):
tpl = (
"Package("
"\n{tab}name=%r,"
"\n{tab}path='%s',"
"\n{tab}sub_packages=%r,"
"\n{tab}sub_modules=%r,"
"\n)"
).format(tab=Tab)
s = tpl % (
self.name,
self.path,
list(self.sub_packages),
list(self.sub_modules),
)
return s
def __repr__(self):
return "Package(name=%r, path='%s')" % (self.name, self.path)
def __getitem__(self, name):
if "." in name:
item = self
for _name in name.split("."):
item = item[_name]
return item
else:
try:
return self.sub_packages[name]
except KeyError:
try:
return self.sub_modules[name]
except KeyError:
raise KeyError("%r doesn't has sub module %r!" % (self.name, name))
def walk(self, pkg_only=True):
"""
A generator that walking through all sub packages and sub modules.
:type pkg_only: bool
:param pkg_only: if True, it only yields package (folder with __init__.py)
if False, it also yields module, but they don't have
sub_packages and sub_modules
**中文文档**
遍历一个包的所有子包以及子模块.
1. current package object (包对象)
2. current package's parent (当前包对象的母包)
3. list of sub packages (所有子包)
4. list of sub modules (所有模块)
"""
current_module = self
parent_module = self.parent
sub_packages = list(self.sub_packages.values())
sub_modules = list(self.sub_modules.values())
yield (
current_module,
parent_module,
sub_packages,
sub_modules,
)
for pkg in self.sub_packages.values():
for things in pkg.walk(pkg_only=pkg_only):
yield things
if pkg_only is False:
for sub_module in self.sub_modules.values():
yield sub_module, self, [], []
def _tree_view_builder(self, indent=0, is_root=True):
"""
Build a text to represent the package structure.
"""
def pad_text(indent):
return " " * indent + "|-- "
lines = list()
if is_root:
lines.append(SP_DIR)
lines.append("%s%s (%s)" % (pad_text(indent), self.shortname, self.fullname))
indent += 1
# sub packages
for pkg in self.sub_packages.values():
lines.append(pkg._tree_view_builder(indent=indent, is_root=False))
# __init__.py
lines.append(
"%s%s (%s)"
% (
pad_text(indent),
"__init__.py",
self.fullname,
)
)
# sub modules
for mod in self.sub_modules.values():
lines.append(
"%s%s (%s)"
% (
pad_text(indent),
mod.shortname + ".py",
mod.fullname,
)
)
return "\n".join(lines)
def pprint(self):
"""
Pretty print the package structure.
"""
print(self._tree_view_builder(indent=0, is_root=True))
|
class Package(BaseModuleOrPackage):
'''
Represent a package object in Python. It is a directory having a
``__init__.py`` file.
:param name: dot seperated full name, e.g.: "pip.commands.install".
:param path: package directory/file absolute path.
:param parent parent: parent package, instance of :class:`Package`.
**中文文档**
是Python中Package概念的抽象类。指包含有 ``__init__.py`` 文件的文件夹。
Package必须可以被import命令所导入, 换言之, 就是已经被成功安装了。
Package的属性的解释:
- name: 包名称
- path: 包目录所在的路径
- fullname: 包的全名, 带母包
- shortname: 包的短名称, 也就是最后一个点之后的部分。
- parent: 母包的实例。
- is_single_file: 是否是单文件的包。
- sub_packages: 有序字典, {子包的名称: Package对象}
- sub_modules: 有序字典, {子模块的名称: Module对象}
'''
def __init__(self, name, path=None, parent=None, is_single_file=None):
pass
def __str__(self):
pass
def __repr__(self):
pass
def __getitem__(self, name):
pass
def walk(self, pkg_only=True):
'''
A generator that walking through all sub packages and sub modules.
:type pkg_only: bool
:param pkg_only: if True, it only yields package (folder with __init__.py)
if False, it also yields module, but they don't have
sub_packages and sub_modules
**中文文档**
遍历一个包的所有子包以及子模块.
1. current package object (包对象)
2. current package's parent (当前包对象的母包)
3. list of sub packages (所有子包)
4. list of sub modules (所有模块)
'''
pass
def _tree_view_builder(self, indent=0, is_root=True):
'''
Build a text to represent the package structure.
'''
pass
def pad_text(indent):
pass
def pprint(self):
'''
Pretty print the package structure.
'''
pass
| 9 | 4 | 20 | 2 | 14 | 3 | 3 | 0.42 | 1 | 5 | 1 | 0 | 7 | 2 | 7 | 11 | 186 | 31 | 109 | 28 | 100 | 46 | 61 | 28 | 52 | 6 | 1 | 4 | 24 |
148,137 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/docfly/template/__init__.py
|
docfly.template.TemplateCollection
|
class TemplateCollection(object):
toc = jinja2.Template(data["toc"])
module = jinja2.Template(data["module"])
package = jinja2.Template(data["package"])
|
class TemplateCollection(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 1 | 0 | 0 |
148,138 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/docfly/doctree.py
|
docfly.doctree.ArticleFolder
|
class ArticleFolder(object):
"""
Represent an ``index.rst`` or ``index.ipynb`` file with a Title in a directory.
:param index_file: the index file name (no file extension)
:param dir_path: A folder contains single rst file. The rst file path
**中文文档**
一篇 Article 代表着文件夹中有一个 ``index.rst`` 或 ``index.ipynb`` 文件的文件夹.
其中必然有至少一个标题元素.
"""
DEFAULT_INDEX_FILE = "index"
def __init__(self, index_file=None, dir_path=None):
if index_file is None:
index_file = self.DEFAULT_INDEX_FILE
self.index_file = index_file
self.dir_path = dir_path
self._title = None
@property
def rst_path(self):
"""
The actual rst file absolute path.
"""
return Path(self.dir_path, self.index_file + ".rst").abspath
@property
def ipynb_path(self):
"""
The actual ipynb file absolute path.
"""
return Path(self.dir_path, self.index_file + ".ipynb").abspath
@property
def rel_path(self):
"""
File relative path from the folder.
"""
return "{}/{}".format(Path(self.dir_path).basename, self.index_file)
@property
def title(self):
"""
Title for the first header.
"""
if self._title is None:
if Path(self.rst_path).exists():
self._title = self.get_title_from_rst()
elif Path(self.ipynb_path).exists():
self._title = self.get_title_from_ipynb()
else:
pass
return self._title
def get_title_from_rst(self):
"""
Get title line from .rst file.
**中文文档**
从一个 ``_filename`` 所指定的 .rst 文件中, 找到顶级标题.
也就是第一个 ``====`` 或 ``----`` 或 ``~~~~`` 上面一行.
"""
header_bar_char_list = "=-~+*#^"
lines = list()
for cursor_line in textfile.readlines(
self.rst_path, strip="both", encoding="utf-8"
):
if cursor_line.startswith(".. include::"):
relative_path = cursor_line.split("::")[-1].strip()
included_path = Path(Path(self.rst_path).parent.abspath, relative_path)
if included_path.exists():
cursor_line = included_path.read_text(encoding="utf-8")
lines.append(cursor_line)
rst_content = "\n".join(lines)
cursor_previous_line = None
for cursor_line in rst_content.split("\n"):
for header_bar_char in header_bar_char_list:
if cursor_line.startswith(header_bar_char):
flag_full_bar_char = cursor_line == header_bar_char * len(
cursor_line
)
flag_line_length_greather_than_1 = len(cursor_line) >= 1
flag_previous_line_not_empty = bool(cursor_previous_line)
if (
flag_full_bar_char
and flag_line_length_greather_than_1
and flag_previous_line_not_empty
):
return cursor_previous_line.strip()
cursor_previous_line = cursor_line
msg = (
"Warning, this document doesn't have any %s header!" % header_bar_char_list
)
return None
def get_title_from_ipynb(self):
"""
Get title line from .ipynb file.
**中文文档**
从一个 ``_filename`` 所指定的 .ipynb 文件中, 找到顶级标题.
也就是第一个 ``#`` 后面的部分.
有的时候我们会用 raw RestructuredText 来做顶级标题.
"""
header_bar_char_list = "=-~+*#^"
data = json.loads(Path(self.ipynb_path).read_text())
for row in data["cells"]:
if len(row["source"]):
if row.get("cell_type") == "markdown":
content = row["source"][0]
line = content.split("\n")[0]
if "# " in line:
return line[2:].strip()
elif (
row.get("cell_type") == "raw"
and row.get("metadata", {}).get("raw_mimetype", "unknown")
== "text/restructuredtext"
):
try:
line = row["source"][3].strip()
except IndexError:
continue
try:
title_line = row["source"][2].strip()
except IndexError:
continue
for header_bar_char in header_bar_char_list:
if line.startswith(header_bar_char):
flag_full_bar_char = line == header_bar_char * len(line)
flag_line_length_greather_than_1 = len(line) >= 1
flag_previous_line_not_empty = bool(title_line)
if (
flag_full_bar_char
and flag_line_length_greather_than_1
and flag_previous_line_not_empty
):
return title_line
else:
pass
msg = "Warning, this document doesn't have any level 1 header!"
return None
@property
def sub_article_folders(self):
"""
Returns all valid ArticleFolder sitting inside of
:attr:`ArticleFolder.dir_path`.
"""
l = list()
for p in Path.sort_by_fname(Path(self.dir_path).select_dir(recursive=False)):
af = ArticleFolder(index_file=self.index_file, dir_path=p.abspath)
try:
if af.title is not None:
l.append(af)
except:
pass
return l
def toc_directive(self, maxdepth=1):
"""
Generate toctree directive text.
:param table_of_content_header:
:param header_bar_char:
:param header_line_length:
:param maxdepth:
:return:
"""
articles_directive_content = TC.toc.render(
maxdepth=maxdepth,
article_list=self.sub_article_folders,
)
return articles_directive_content
def __repr__(self):
return "Article(index_file=%r, title=%r)" % (
self.index_file,
self.title,
)
|
class ArticleFolder(object):
'''
Represent an ``index.rst`` or ``index.ipynb`` file with a Title in a directory.
:param index_file: the index file name (no file extension)
:param dir_path: A folder contains single rst file. The rst file path
**中文文档**
一篇 Article 代表着文件夹中有一个 ``index.rst`` 或 ``index.ipynb`` 文件的文件夹.
其中必然有至少一个标题元素.
'''
def __init__(self, index_file=None, dir_path=None):
pass
@property
def rst_path(self):
'''
The actual rst file absolute path.
'''
pass
@property
def ipynb_path(self):
'''
The actual ipynb file absolute path.
'''
pass
@property
def rel_path(self):
'''
File relative path from the folder.
'''
pass
@property
def title(self):
'''
Title for the first header.
'''
pass
def get_title_from_rst(self):
'''
Get title line from .rst file.
**中文文档**
从一个 ``_filename`` 所指定的 .rst 文件中, 找到顶级标题.
也就是第一个 ``====`` 或 ``----`` 或 ``~~~~`` 上面一行.
'''
pass
def get_title_from_ipynb(self):
'''
Get title line from .ipynb file.
**中文文档**
从一个 ``_filename`` 所指定的 .ipynb 文件中, 找到顶级标题.
也就是第一个 ``#`` 后面的部分.
有的时候我们会用 raw RestructuredText 来做顶级标题.
'''
pass
@property
def sub_article_folders(self):
'''
Returns all valid ArticleFolder sitting inside of
:attr:`ArticleFolder.dir_path`.
'''
pass
def toc_directive(self, maxdepth=1):
'''
Generate toctree directive text.
:param table_of_content_header:
:param header_bar_char:
:param header_line_length:
:param maxdepth:
:return:
'''
pass
def __repr__(self):
pass
| 16 | 9 | 16 | 1 | 11 | 4 | 3 | 0.4 | 1 | 3 | 0 | 0 | 10 | 3 | 10 | 10 | 191 | 26 | 120 | 47 | 104 | 48 | 87 | 42 | 76 | 11 | 1 | 6 | 34 |
148,139 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/bin/pgr/pygitrepo_func.py
|
pygitrepo_func.PyGitRepoFunc
|
class PyGitRepoFunc(object):
def build_lbd_source(self):
print(
"[pygitrepo] "
+ Fore.CYAN
+ "build lambda source code at "
+ Style.RESET_ALL
+ pgr.PATH_LAMBDA_BUILD_SOURCE
)
dir_project_root = pgr.DIR_PROJECT_ROOT
to_zip_list = list()
for dirname, _, basename_list in os.walk(pgr.DIR_PYTHON_LIB):
for basename in basename_list:
if basename.endswith(".pyc") \
or basename.endswith(".pyo") \
or dirname.endswith("__pycache__"):
continue
abspath = os.path.join(dirname, basename)
arch_path = os.path.relpath(abspath, dir_project_root)
to_zip_list.append((abspath, arch_path))
print(
"[pygitrepo] "
+ Fore.CYAN
+ " zip "
+ Style.RESET_ALL
+ pgr.DIR_VENV_SITE_PACKAGES
)
mkdir_if_not_exists(pgr.DIR_LAMBDA_BUILD)
rm_if_exists(pgr.PATH_LAMBDA_BUILD_SOURCE)
with ZipFile(pgr.PATH_LAMBDA_BUILD_SOURCE, "w") as f:
for abspath, arch_path in to_zip_list:
f.write(abspath, arch_path)
print("[pygitrepo] " + Fore.CYAN + " done" + Style.RESET_ALL)
def build_lbd_layer(self):
print(
"[pygitrepo] "
+ Fore.CYAN
+ "build lambda layer at "
+ Style.RESET_ALL
+ pgr.PATH_LAMBDA_BUILD_LAYER
)
dir_venv_site_packages = pgr.DIR_VENV_SITE_PACKAGES
to_zip_list = list()
ignore = {
pgr.PACKAGE_NAME,
"{}-{}.dist-info".format(pgr.PACKAGE_NAME, pgr.PACKAGE_VERSION)
}.union(ignore_dependencies)
for basename in os.listdir(dir_venv_site_packages):
if basename in ignore or basename.split("-")[0] in ignore:
continue
abspath = os.path.join(dir_venv_site_packages, basename)
if os.path.isfile(abspath):
arch_path = os.path.join(
"python",
os.path.relpath(abspath, dir_venv_site_packages)
)
to_zip_list.append((abspath, arch_path))
else:
for dirname, _, basename_list in os.walk(
os.path.join(dir_venv_site_packages, basename)):
for basename in basename_list:
abspath = os.path.join(dirname, basename)
arch_path = os.path.join(
"python",
os.path.relpath(abspath, dir_venv_site_packages)
)
to_zip_list.append((abspath, arch_path))
print(
"[pygitrepo] "
+ Fore.CYAN
+ " zip "
+ Style.RESET_ALL
+ pgr.DIR_VENV_SITE_PACKAGES
)
mkdir_if_not_exists(pgr.DIR_LAMBDA_BUILD)
rm_if_exists(pgr.PATH_LAMBDA_BUILD_LAYER)
with ZipFile(pgr.PATH_LAMBDA_BUILD_LAYER, "w") as f:
for abspath, arch_path in to_zip_list:
f.write(abspath, arch_path)
print("[pygitrepo] " + Fore.CYAN + " done" + Style.RESET_ALL)
def build_lbd_deploy_package(self):
print(
"[pygitrepo] "
+ Fore.CYAN
+ "build lambda deploy package at "
+ Style.RESET_ALL
+ pgr.PATH_LAMBDA_BUILD_DEPLOY_PACKAGE
)
dir_venv_site_packages = pgr.DIR_VENV_SITE_PACKAGES
to_zip_list = list()
ignore = ignore_dependencies
for basename in os.listdir(dir_venv_site_packages):
if basename in ignore or basename.split("-")[0] in ignore:
continue
abspath = os.path.join(dir_venv_site_packages, basename)
if os.path.isfile(abspath):
arch_path = os.path.relpath(abspath, dir_venv_site_packages)
to_zip_list.append((abspath, arch_path))
else:
for dirname, _, basename_list in os.walk(
os.path.join(dir_venv_site_packages, basename)):
for basename in basename_list:
abspath = os.path.join(dirname, basename)
arch_path = os.path.relpath(abspath, dir_venv_site_packages)
to_zip_list.append((abspath, arch_path))
print(
"[pygitrepo] "
+ Fore.CYAN
+ " zip "
+ Style.RESET_ALL
+ pgr.DIR_VENV_SITE_PACKAGES
)
mkdir_if_not_exists(pgr.DIR_LAMBDA_BUILD)
rm_if_exists(pgr.PATH_LAMBDA_BUILD_DEPLOY_PACKAGE)
with ZipFile(pgr.PATH_LAMBDA_BUILD_DEPLOY_PACKAGE, "w") as f:
for abspath, arch_path in to_zip_list:
f.write(abspath, arch_path)
print("[pygitrepo] " + Fore.CYAN + " done" + Style.RESET_ALL)
|
class PyGitRepoFunc(object):
def build_lbd_source(self):
pass
def build_lbd_layer(self):
pass
def build_lbd_deploy_package(self):
pass
| 4 | 0 | 43 | 4 | 39 | 0 | 6 | 0 | 1 | 2 | 0 | 0 | 3 | 0 | 3 | 3 | 131 | 13 | 118 | 27 | 114 | 0 | 67 | 24 | 63 | 7 | 1 | 4 | 19 |
148,140 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/bin/pgr/pygitrepo.py
|
pygitrepo.PyGitRepo
|
class PyGitRepo(object):
def __init__(self):
self.DIR_HERE = dirname(abspath(__file__))
self.DIR_HOME = expanduser("~")
self.GITHUB_ACCOUNT = Config.GITHUB_ACCOUNT
self.GITHUB_REPO_NAME = Config.GITHUB_REPO_NAME
self.PACKAGE_NAME = Config.PACKAGE_NAME
self.PACKAGE_NAME_SLUGIFY = Config.PACKAGE_NAME.replace("_", "-")
self.DEV_PY_VER_MAJOR = Config.DEV_PY_VER_MAJOR
self.DEV_PY_VER_MINOR = Config.DEV_PY_VER_MINOR
self.DEV_PY_VER_MICRO = Config.DEV_PY_VER_MICRO
self.TOX_TEST_VERSIONS = Config.TOX_TEST_VERSIONS
self.DOC_HOST_RTD_PROJECT_NAME = Config.DOC_HOST_RTD_PROJECT_NAME
self.DOC_HOST_AWS_PROFILE = Config.DOC_HOST_AWS_PROFILE
self.DOC_HOST_S3_BUCKET = Config.DOC_HOST_S3_BUCKET
# --- AWS Lambda Related
self.AWS_LAMBDA_DEPLOY_AWS_PROFILE = Config.AWS_LAMBDA_DEPLOY_AWS_PROFILE
self.AWS_LAMBDA_DEPLOY_S3_BUCKET = Config.AWS_LAMBDA_DEPLOY_S3_BUCKET
self.AWS_LAMBDA_BUILD_DOCKER_IMAGE = Config.AWS_LAMBDA_BUILD_DOCKER_IMAGE
self.AWS_LAMBDA_BUILD_DOCKER_IMAGE_WORKSPACE_DIR = Config.AWS_LAMBDA_BUILD_DOCKER_IMAGE_WORKSPACE_DIR
self.AWS_LAMBDA_TEST_DOCKER_IMAGE = Config.AWS_LAMBDA_TEST_DOCKER_IMAGE
self.OS_NAME = OS_NAME
self.IS_WINDOWS = IS_WINDOWS
self.IS_MACOS = IS_MACOS
self.IS_LINUX = IS_LINUX
self.IS_JAVA = IS_JAVA
self.OPEN_COMMAND = OPEN_COMMAND
# === Code File Structure
@property
def DIR_BIN(self):
return dirname(self.DIR_HERE)
@property
def DIR_BIN_PGR(self):
return self.DIR_HERE
@property
def DIR_BIN_PY(self):
return join(self.DIR_HERE, "py")
@property
def DIR_BIN_LBD(self):
return join(self.DIR_HERE, "lbd")
@property
def DIR_PROJECT_ROOT(self):
return dirname(dirname(self.DIR_HERE))
# --- python project basics
@property
def PATH_README(self):
return join(self.DIR_PROJECT_ROOT, "README.rst")
@property
def DIR_PYTHON_LIB(self):
return join(self.DIR_PROJECT_ROOT, self.PACKAGE_NAME)
@property
def PATH_VERSION_FILE(self):
return join(self.DIR_PYTHON_LIB, "_version.py")
@property
def PACKAGE_VERSION(self):
sys.path.append(self.DIR_PYTHON_LIB)
try:
from _version import __version__
return __version__
except:
return PYGITREPO_UNKNOWN
@property
def PATH_REQUIREMENTS_FILE(self):
return join(self.DIR_PROJECT_ROOT, "requirements.txt")
@property
def PATH_REQUIREMENTS_DEV_FILE(self):
return join(self.DIR_PROJECT_ROOT, "requirements-dev.txt")
@property
def PATH_REQUIREMENTS_DOC_FILE(self):
return join(self.DIR_PROJECT_ROOT, "requirements-doc.txt")
@property
def PATH_REQUIREMENTS_TEST_FILE(self):
return join(self.DIR_PROJECT_ROOT, "requirements-test.txt")
@property
def DIR_PYPI_BUILD(self):
return join(self.DIR_PROJECT_ROOT, "build")
@property
def DIR_PYPI_DISTRIBUTE(self):
return join(self.DIR_PROJECT_ROOT, "dist")
@property
def DIR_PYPI_EGG(self):
return join(self.DIR_PROJECT_ROOT, "{}.egg-info".format(Config.PACKAGE_NAME))
# --- testing
@property
def DIR_TESTS(self):
return join(self.DIR_PROJECT_ROOT, "tests")
@property
def DIR_UNIT_TESTS(self):
return self.DIR_TESTS
@property
def DIR_INTEGRATION_TESTS(self):
return join(self.DIR_PROJECT_ROOT, "tests_integration")
@property
def DIR_PYTEST_CACHE(self):
return join(self.DIR_PROJECT_ROOT, ".pytest_cache")
@property
def PATH_CODECOV_YML(self):
return join(self.DIR_PROJECT_ROOT, "codecov.yml")
@property
def PATH_COVERAGE_CONFIG(self):
return join(self.DIR_PROJECT_ROOT, ".coveragerc")
@property
def DIR_COVERAGE_ANNOTATE(self):
return join(self.DIR_PROJECT_ROOT, ".coverage.annotate")
@property
def PYENV_LOCAL_VERSIONS_FOR_TOX(self):
try:
return " ".join(self.TOX_TEST_VERSIONS)
except:
return PYGITREPO_UNKNOWN
# --- sphinx doc
@property
def DIR_SPHINX_DOC(self):
return join(self.DIR_PROJECT_ROOT, "docs")
@property
def DIR_SPHINX_DOC_SOURCE(self):
return join(self.DIR_SPHINX_DOC, "source")
@property
def DIR_SPHINX_DOC_SOURCE_CONFIG(self):
return join(self.DIR_SPHINX_DOC_SOURCE, "conf.py")
@property
def DIR_SPHINX_DOC_BUILD(self):
return join(self.DIR_SPHINX_DOC, "build")
@property
def DIR_SPHINX_DOC_BUILD_HTML(self):
return join(self.DIR_SPHINX_DOC_BUILD, "html")
@property
def PATH_SPHINX_DOC_BUILD_HTML_INDEX(self):
return join(self.DIR_SPHINX_DOC_BUILD_HTML, "index.html")
@property
def PATH_READTHEDOCS_YML(self):
return join(self.DIR_PROJECT_ROOT, "readthedocs.yml")
@property
def URL_RTD_DOC(self):
return "https://{}.readthedocs.io/".format(Config.DOC_HOST_RTD_PROJECT_NAME)
@property
def URL_S3_DOC_LATEST(self):
return "https://{bucket}.s3.amazonaws.com/docs/{package_name}/latest/".format(
bucket=self.DOC_HOST_S3_BUCKET,
package_name=self.PACKAGE_NAME,
)
@property
def URL_S3_DOC_VERSIONED(self):
return "https://{bucket}.s3.amazonaws.com/docs/{package_name}/{version}/index.html".format(
bucket=self.DOC_HOST_S3_BUCKET,
package_name=self.PACKAGE_NAME,
version=self.PACKAGE_VERSION,
)
@property
def S3_URI_DOC_DIR_LATEST(self):
return "s3://{bucket}/docs/{package_name}/latest".format(
bucket=self.DOC_HOST_S3_BUCKET,
package_name=self.PACKAGE_NAME,
)
@property
def S3_URI_DOC_DIR_VERSIONED(self):
return "s3://{bucket}/docs/{package_name}/{version}".format(
bucket=self.DOC_HOST_S3_BUCKET,
package_name=self.PACKAGE_NAME,
version=self.PACKAGE_VERSION,
)
@property
def URL_S3_CONSOLE_LATEST_DOC_DIR(self):
return make_s3_console_url(
bucket=self.DOC_HOST_S3_BUCKET,
prefix=s3_key_join(
parts=[
"docs", self.PACKAGE_NAME, "latest"
],
is_dir=True,
)
)
@property
def URL_S3_CONSOLE_VERSIONED_DOC_DIR(self):
return make_s3_console_url(
bucket=self.DOC_HOST_S3_BUCKET,
prefix=s3_key_join(
parts=[
"docs", self.PACKAGE_NAME, self.PACKAGE_VERSION
],
is_dir=True,
)
)
# === Pyenv
@property
def PATH_BIN_GLOBAL_PYTHON(self):
if OS_NAME == OSEnum.windows:
return "/c/Python{}.{}/python.exe".format(self.DEV_PY_VER_MAJOR, self.DEV_PY_VER_MINOR)
elif OS_NAME in (OSEnum.macOS, OSEnum.linux):
return join(
self.DIR_HOME,
".pyenv",
"shims",
"python{}.{}".format(
self.DEV_PY_VER_MAJOR,
self.DEV_PY_VER_MINOR,
)
)
else:
raise EnvironmentError
# === Virtualenv
@property
def VENV_NAME(self):
return "{}_venv".format(Config.PACKAGE_NAME)
@property
def DIR_ALL_PYTHON_VERSIONED_VENV(self):
if OS_NAME in (OSEnum.windows, OSEnum.macOS, OSEnum.linux):
return join(
self.DIR_HOME,
"venvs",
"python",
"{}.{}.{}".format(
self.DEV_PY_VER_MAJOR,
self.DEV_PY_VER_MINOR,
self.DEV_PY_VER_MICRO,
),
)
else:
raise ValueError
@property
def DIR_VENV(self):
return join(self.DIR_ALL_PYTHON_VERSIONED_VENV, self.VENV_NAME)
@property
def DIR_VENV_SITE_PACKAGES(self):
if OS_NAME == OSEnum.windows:
return join(self.DIR_VENV, "Lib", "site-packages")
elif OS_NAME in (OSEnum.macOS, OSEnum.linux):
return join(
self.DIR_VENV,
"lib",
"python{}.{}".format(Config.DEV_PY_VER_MAJOR, Config.DEV_PY_VER_MINOR),
"site-packages",
)
else:
raise Exception
@property
def DIR_VENV_SITE_PACKAGES_64(self):
if OS_NAME == OSEnum.windows:
return join(self.DIR_VENV, "Lib64", "site-packages")
elif OS_NAME in (OSEnum.macOS, OSEnum.linux):
return join(
self.DIR_VENV,
"lib64",
"python{}.{}".format(Config.DEV_PY_VER_MAJOR, Config.DEV_PY_VER_MINOR),
"site-packages",
)
else:
raise Exception
@property
def DIR_VENV_SITE_PACKAGES_INSTALLED(self):
return join(self.DIR_VENV_SITE_PACKAGES, Config.PACKAGE_NAME)
@property
def DIR_VENV_SITE_PACKAGES_EGG_LINK(self):
return join(self.DIR_VENV_SITE_PACKAGES, "{}.egg-link".format(Config.PACKAGE_NAME).replace("_", "-"))
# --- venv/bin
@property
def DIR_VENV_BIN(self):
if OS_NAME == OSEnum.windows:
return join(self.DIR_VENV, "Scripts")
elif OS_NAME in (OSEnum.macOS, OSEnum.linux):
return join(self.DIR_VENV, "bin")
else:
raise Exception
@property
def PATH_VENV_BIN_PYTHON(self):
return join(self.DIR_VENV_BIN, "python")
@property
def PATH_VENV_BIN_ACTIVATE(self):
return join(self.DIR_VENV_BIN, "activate")
@property
def PATH_VENV_BIN_PIP(self):
return join(self.DIR_VENV_BIN, "pip")
@property
def PATH_VENV_BIN_PYTEST(self):
return join(self.DIR_VENV_BIN, "pytest")
@property
def PATH_VENV_BIN_SPHINX_QUICKSTART(self):
return join(self.DIR_VENV_BIN, "sphinx-quickstart")
@property
def PATH_VENV_BIN_TWINE(self):
return join(self.DIR_VENV_BIN, "twine")
@property
def PATH_VENV_BIN_TOX(self):
return join(self.DIR_VENV_BIN, "tox")
@property
def PATH_VENV_BIN_JUPYTER(self):
return join(self.DIR_VENV_BIN, "jupyter")
@property
def PATH_VENV_BIN_ANSIBLE(self):
return join(self.DIR_VENV_BIN, "ansible")
@property
def PATH_VENV_BIN_AWS(self):
return join(self.DIR_VENV_BIN, "aws")
@property
def PATH_VENV_BIN_AWS_CHALICE(self):
return join(self.DIR_VENV_BIN, "chalice")
@property
def PATH_VENV_BIN_AWS_ELASTIC_BEANSTALK(self):
return join(self.DIR_VENV_BIN, "flask")
# === AWS CLI
@property
def AWS_CLI_PROFILE_ARG_DOC_HOST(self):
if Config.DOC_HOST_AWS_PROFILE:
return Config.DOC_HOST_AWS_PROFILE
else:
return ""
@property
def AWS_CLI_PROFILE_ARG_LAMBDA_DEPLOY(self):
if Config.AWS_LAMBDA_DEPLOY_AWS_PROFILE:
return Config.AWS_LAMBDA_DEPLOY_AWS_PROFILE
else:
return ""
# === AWS Lambda
@property
def DIR_LAMBDA_BUILD(self):
return join(self.DIR_PYPI_BUILD, "lambda")
@property
def DIR_LAMBDA_BUILD_SITE_PACKAGES(self):
return join(self.DIR_LAMBDA_BUILD, "site-packages")
@property
def PATH_LAMBDA_BUILD_DEPLOY_PACKAGE(self):
return join(self.DIR_LAMBDA_BUILD, "deploy-pkg.zip")
@property
def PATH_LAMBDA_BUILD_SOURCE(self):
return join(self.DIR_LAMBDA_BUILD, "source.zip")
@property
def PATH_LAMBDA_BUILD_LAYER(self):
return join(self.DIR_LAMBDA_BUILD, "layer.zip")
@property
def S3_KEY_LAMBDA_DEPLOY_DIR(self):
return s3_key_join(
parts=[
"lambda",
self.PACKAGE_NAME,
],
is_dir=True,
)
@property
def S3_KEY_LAMBDA_DEPLOY_VERSIONED_DIR(self):
return s3_key_join(
parts=[
self.S3_KEY_LAMBDA_DEPLOY_DIR,
self.PACKAGE_VERSION,
],
is_dir=True,
)
@property
def S3_URI_LAMBDA_DEPLOY_VERSIONED_DIR(self):
return "s3://{bucket}/{key}".format(
bucket=self.AWS_LAMBDA_DEPLOY_S3_BUCKET,
key=self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_DIR,
)
@property
def S3_KEY_LAMBDA_DEPLOY_VERSIONED_SOURCE_DIR(self):
return s3_key_join(
parts=[
self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_DIR,
"source",
],
is_dir=True,
)
@property
def S3_URI_LAMBDA_DEPLOY_VERSIONED_SOURCE_DIR(self):
return "s3://{bucket}/{key}".format(
bucket=self.AWS_LAMBDA_DEPLOY_S3_BUCKET,
key=self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_SOURCE_DIR,
)
@property
def S3_KEY_LAMBDA_DEPLOY_VERSIONED_LAYER_DIR(self):
return s3_key_join(
parts=[
self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_DIR,
"layer",
],
is_dir=True,
)
@property
def S3_URI_LAMBDA_DEPLOY_VERSIONED_LAYER_DIR(self):
return "s3://{bucket}/{key}".format(
bucket=self.AWS_LAMBDA_DEPLOY_S3_BUCKET,
key=self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_LAYER_DIR,
)
@property
def S3_KEY_LAMBDA_DEPLOY_VERSIONED_DEPLOY_PKG_DIR(self):
return s3_key_join(
parts=[
self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_DIR,
"deploy-pkg",
],
is_dir=True,
)
@property
def S3_URI_LAMBDA_DEPLOY_VERSIONED_DEPLOY_PKG_DIR(self):
return "s3://{bucket}/{key}".format(
bucket=self.AWS_LAMBDA_DEPLOY_S3_BUCKET,
key=self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_DEPLOY_PKG_DIR,
)
@property
def URL_S3_CONSOLE_LAMBDA_DEPLOY_DIR(self):
return make_s3_console_url(
bucket=self.AWS_LAMBDA_DEPLOY_S3_BUCKET,
prefix=self.S3_KEY_LAMBDA_DEPLOY_DIR,
)
@property
def URL_S3_CONSOLE_LAMBDA_DEPLOY_VERSIONED_DIR(self):
return make_s3_console_url(
bucket=self.AWS_LAMBDA_DEPLOY_S3_BUCKET,
prefix=self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_DIR,
)
@property
def URL_S3_CONSOLE_LAMBDA_DEPLOY_VERSIONED_SOURCE_OBJ(self):
return make_s3_console_url(
bucket=self.AWS_LAMBDA_DEPLOY_S3_BUCKET,
prefix=s3_key_join(
parts=[
self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_DIR,
"source",
],
is_dir=False,
),
)
@property
def URL_S3_CONSOLE_LAMBDA_DEPLOY_VERSIONED_LAYER_OBJ(self):
return make_s3_console_url(
bucket=self.AWS_LAMBDA_DEPLOY_S3_BUCKET,
prefix=s3_key_join(
parts=[
self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_DIR,
"layer",
],
is_dir=False,
),
)
@property
def URL_S3_CONSOLE_LAMBDA_DEPLOY_VERSIONED_DEPLOY_PKG_OBJ(self):
return make_s3_console_url(
bucket=self.AWS_LAMBDA_DEPLOY_S3_BUCKET,
prefix=s3_key_join(
parts=[
self.S3_KEY_LAMBDA_DEPLOY_VERSIONED_DIR,
"deploy-pkg",
],
is_dir=False,
),
)
@property
def URL_LBD_LAYER_CONSOLE(self):
return "https://console.aws.amazon.com/lambda/home?#/layers/{layer_name}".format(
layer_name=self.PACKAGE_NAME
)
|
class PyGitRepo(object):
def __init__(self):
pass
@property
def DIR_BIN(self):
pass
@property
def DIR_BIN_PGR(self):
pass
@property
def DIR_BIN_PY(self):
pass
@property
def DIR_BIN_LBD(self):
pass
@property
def DIR_PROJECT_ROOT(self):
pass
@property
def PATH_README(self):
pass
@property
def DIR_PYTHON_LIB(self):
pass
@property
def PATH_VERSION_FILE(self):
pass
@property
def PACKAGE_VERSION(self):
pass
@property
def PATH_REQUIREMENTS_FILE(self):
pass
@property
def PATH_REQUIREMENTS_DEV_FILE(self):
pass
@property
def PATH_REQUIREMENTS_DOC_FILE(self):
pass
@property
def PATH_REQUIREMENTS_TEST_FILE(self):
pass
@property
def DIR_PYPI_BUILD(self):
pass
@property
def DIR_PYPI_DISTRIBUTE(self):
pass
@property
def DIR_PYPI_EGG(self):
pass
@property
def DIR_TESTS(self):
pass
@property
def DIR_UNIT_TESTS(self):
pass
@property
def DIR_INTEGRATION_TESTS(self):
pass
@property
def DIR_PYTEST_CACHE(self):
pass
@property
def PATH_CODECOV_YML(self):
pass
@property
def PATH_COVERAGE_CONFIG(self):
pass
@property
def DIR_COVERAGE_ANNOTATE(self):
pass
@property
def PYENV_LOCAL_VERSIONS_FOR_TOX(self):
pass
@property
def DIR_SPHINX_DOC(self):
pass
@property
def DIR_SPHINX_DOC_SOURCE(self):
pass
@property
def DIR_SPHINX_DOC_SOURCE_CONFIG(self):
pass
@property
def DIR_SPHINX_DOC_BUILD(self):
pass
@property
def DIR_SPHINX_DOC_BUILD_HTML(self):
pass
@property
def PATH_SPHINX_DOC_BUILD_HTML_INDEX(self):
pass
@property
def PATH_READTHEDOCS_YML(self):
pass
@property
def URL_RTD_DOC(self):
pass
@property
def URL_S3_DOC_LATEST(self):
pass
@property
def URL_S3_DOC_VERSIONED(self):
pass
@property
def S3_URI_DOC_DIR_LATEST(self):
pass
@property
def S3_URI_DOC_DIR_VERSIONED(self):
pass
@property
def URL_S3_CONSOLE_LATEST_DOC_DIR(self):
pass
@property
def URL_S3_CONSOLE_VERSIONED_DOC_DIR(self):
pass
@property
def PATH_BIN_GLOBAL_PYTHON(self):
pass
@property
def VENV_NAME(self):
pass
@property
def DIR_ALL_PYTHON_VERSIONED_VENV(self):
pass
@property
def DIR_VENV(self):
pass
@property
def DIR_VENV_SITE_PACKAGES(self):
pass
@property
def DIR_VENV_SITE_PACKAGES_64(self):
pass
@property
def DIR_VENV_SITE_PACKAGES_INSTALLED(self):
pass
@property
def DIR_VENV_SITE_PACKAGES_EGG_LINK(self):
pass
@property
def DIR_VENV_BIN(self):
pass
@property
def PATH_VENV_BIN_PYTHON(self):
pass
@property
def PATH_VENV_BIN_ACTIVATE(self):
pass
@property
def PATH_VENV_BIN_PIP(self):
pass
@property
def PATH_VENV_BIN_PYTEST(self):
pass
@property
def PATH_VENV_BIN_SPHINX_QUICKSTART(self):
pass
@property
def PATH_VENV_BIN_TWINE(self):
pass
@property
def PATH_VENV_BIN_TOX(self):
pass
@property
def PATH_VENV_BIN_JUPYTER(self):
pass
@property
def PATH_VENV_BIN_ANSIBLE(self):
pass
@property
def PATH_VENV_BIN_AWS(self):
pass
@property
def PATH_VENV_BIN_AWS_CHALICE(self):
pass
@property
def PATH_VENV_BIN_AWS_ELASTIC_BEANSTALK(self):
pass
@property
def AWS_CLI_PROFILE_ARG_DOC_HOST(self):
pass
@property
def AWS_CLI_PROFILE_ARG_LAMBDA_DEPLOY(self):
pass
@property
def DIR_LAMBDA_BUILD(self):
pass
@property
def DIR_LAMBDA_BUILD_SITE_PACKAGES(self):
pass
@property
def PATH_LAMBDA_BUILD_DEPLOY_PACKAGE(self):
pass
@property
def PATH_LAMBDA_BUILD_SOURCE(self):
pass
@property
def PATH_LAMBDA_BUILD_LAYER(self):
pass
@property
def S3_KEY_LAMBDA_DEPLOY_DIR(self):
pass
@property
def S3_KEY_LAMBDA_DEPLOY_VERSIONED_DIR(self):
pass
@property
def S3_URI_LAMBDA_DEPLOY_VERSIONED_DIR(self):
pass
@property
def S3_KEY_LAMBDA_DEPLOY_VERSIONED_SOURCE_DIR(self):
pass
@property
def S3_URI_LAMBDA_DEPLOY_VERSIONED_SOURCE_DIR(self):
pass
@property
def S3_KEY_LAMBDA_DEPLOY_VERSIONED_LAYER_DIR(self):
pass
@property
def S3_URI_LAMBDA_DEPLOY_VERSIONED_LAYER_DIR(self):
pass
@property
def S3_KEY_LAMBDA_DEPLOY_VERSIONED_DEPLOY_PKG_DIR(self):
pass
@property
def S3_URI_LAMBDA_DEPLOY_VERSIONED_DEPLOY_PKG_DIR(self):
pass
@property
def URL_S3_CONSOLE_LAMBDA_DEPLOY_DIR(self):
pass
@property
def URL_S3_CONSOLE_LAMBDA_DEPLOY_VERSIONED_DIR(self):
pass
@property
def URL_S3_CONSOLE_LAMBDA_DEPLOY_VERSIONED_SOURCE_OBJ(self):
pass
@property
def URL_S3_CONSOLE_LAMBDA_DEPLOY_VERSIONED_LAYER_OBJ(self):
pass
@property
def URL_S3_CONSOLE_LAMBDA_DEPLOY_VERSIONED_DEPLOY_PKG_OBJ(self):
pass
@property
def URL_LBD_LAYER_CONSOLE(self):
pass
| 164 | 0 | 4 | 0 | 4 | 0 | 1 | 0.03 | 1 | 4 | 2 | 0 | 82 | 24 | 82 | 82 | 535 | 85 | 440 | 189 | 275 | 11 | 214 | 108 | 130 | 3 | 1 | 1 | 95 |
148,141 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/docfly/directives/autotoctree.py
|
docfly.directives.autotoctree.AutoTocTree
|
class AutoTocTree(Directive):
"""
Automatically includes ``index.rst`` in toctree from::
<current_dir>/<any-folder>/index.rst
Any toctree directive arguments are supported.
Example, the content of ``<current_dir>/index.rst``::
.. autodoctree::
Will be converted to::
.. toctree::
./section1/index.rst
./section2/index.rst
...
"""
_opt_append_ahead = "append_ahead"
_opt_index_file = "index_file"
_opt_index_file_default = "index"
has_content = True
option_spec = TocTree.option_spec.copy()
option_spec[_opt_append_ahead] = directives.flag
option_spec[_opt_index_file] = str
def run(self):
node = nodes.Element()
node.document = self.state.document
current_file = self.state.document.current_source
output_rst = self.derive_toctree_rst(current_file)
view_list = StringList(output_rst.splitlines(), source="")
sphinx.util.nested_parse_with_titles(self.state, view_list, node)
return node.children
def derive_toctree_rst(self, current_file):
"""
Generate the rst content::
.. toctree::
args ...
example.rst
...
:param current_file:
:return:
"""
TAB = " " * 4
lines = list()
# create the .. toctree:: and its options
lines.append(".. toctree::")
for opt in TocTree.option_spec:
value = self.options.get(opt)
if value is not None:
line = "{indent}:{option}: {value}".format(
indent=TAB,
option=opt,
value=value,
).rstrip()
lines.append(line)
lines.append("")
if self._opt_append_ahead in self.options:
for line in list(self.content):
lines.append(TAB + line)
index_file = self.options.get(self._opt_index_file, self._opt_index_file_default)
article_folder = ArticleFolder(
index_file=index_file,
dir_path=Path(current_file).parent.abspath,
)
for af in article_folder.sub_article_folders:
line = "{indent}{title} <{relpath}>".format(
indent=TAB,
title=af.title,
relpath=af.rel_path,
)
lines.append(line)
if self._opt_append_ahead not in self.options:
for line in list(self.content):
lines.append(TAB + line)
lines.append("")
toctree = "\n".join(lines)
return toctree
|
class AutoTocTree(Directive):
'''
Automatically includes ``index.rst`` in toctree from::
<current_dir>/<any-folder>/index.rst
Any toctree directive arguments are supported.
Example, the content of ``<current_dir>/index.rst``::
.. autodoctree::
Will be converted to::
.. toctree::
./section1/index.rst
./section2/index.rst
...
'''
def run(self):
pass
def derive_toctree_rst(self, current_file):
'''
Generate the rst content::
.. toctree::
args ...
example.rst
...
:param current_file:
:return:
'''
pass
| 3 | 2 | 31 | 4 | 22 | 5 | 5 | 0.43 | 1 | 2 | 1 | 0 | 2 | 0 | 2 | 2 | 91 | 18 | 51 | 21 | 48 | 22 | 40 | 21 | 37 | 8 | 1 | 2 | 9 |
148,142 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/docfly/pkg/picage/model.py
|
docfly.pkg.picage.model.BaseModuleOrPackage
|
class BaseModuleOrPackage:
"""
Base Class to represent a module or package.
"""
def __init__(self, name, path=None, parent=None, is_single_file=None):
assert_is_valid_name(name)
self.name = name
self.parent = parent
self.is_single_file = is_single_file
def read_sp_dir(p: Path) -> str:
"""
Read the site-packages directory from a .egg-link or .pth file.
"""
with open(p.abspath, "rb") as f:
return f.readline().decode("utf-8").strip()
if path is None:
chain = self.name.split(".") # "a.b.c" -> ["a", "b", "c"]
root = chain[0] # take the first part of the full module import path
# test if using .egg-link or using .pth
p_egg_link_1 = Path(SP_DIR, root.replace("_", "-") + ".egg-link")
p_egg_link_2 = Path(SP_DIR, root.replace("_", "_") + ".egg-link")
p_pth_1 = Path(SP_DIR, root.replace("_", "-") + ".pth")
p_pth_2 = Path(SP_DIR, root.replace("_", "_") + ".pth")
if p_egg_link_1.exists() and p_egg_link_1.is_file():
sp_dir = read_sp_dir(p_egg_link_1)
elif p_egg_link_2.exists() and p_egg_link_2.is_file():
sp_dir = read_sp_dir(p_egg_link_2)
elif p_pth_1.exists() and p_pth_1.is_file():
sp_dir = read_sp_dir(p_pth_1)
elif p_pth_2.exists() and p_pth_2.is_file():
sp_dir = read_sp_dir(p_pth_2)
else:
sp_dir = SP_DIR
# is single file package
p = Path(Path(sp_dir, *chain).abspath + ".py")
if p.is_file() and p.exists():
self.path = p
self.is_single_file = True
return
# then has to be a directory having __init__.py file
p = Path(sp_dir, *chain)
if p.is_dir() and p.exists() and Path(p, "__init__.py").exists():
self.path = Path(sp_dir, *chain)
self.is_single_file = False
return
raise ValueError("Can't found '%s'!" % self.name)
else:
self.path = path
@property
def fullname(self):
"""
Example: for package ``pip.commands.install``, it's
``pip.commands.install``.
"""
return self.name
@property
def shortname(self):
"""
Example: for package ``pip.commands.install``, it's ``install``.
"""
if "." in self.name:
return self.name.split(".")[-1]
else:
return self.name
def __eq__(self, other):
return self.path == other.path
|
class BaseModuleOrPackage:
'''
Base Class to represent a module or package.
'''
def __init__(self, name, path=None, parent=None, is_single_file=None):
pass
def read_sp_dir(p: Path) -> str:
'''
Read the site-packages directory from a .egg-link or .pth file.
'''
pass
@property
def fullname(self):
'''
Example: for package ``pip.commands.install``, it's
``pip.commands.install``.
'''
pass
@property
def shortname(self):
'''
Example: for package ``pip.commands.install``, it's ``install``.
'''
pass
def __eq__(self, other):
pass
| 8 | 4 | 15 | 1 | 10 | 4 | 3 | 0.36 | 0 | 2 | 0 | 2 | 4 | 4 | 4 | 4 | 77 | 11 | 50 | 21 | 42 | 18 | 42 | 18 | 36 | 8 | 0 | 2 | 13 |
148,143 |
MacHu-GWU/docfly-project
|
MacHu-GWU_docfly-project/tests/test_doctree.py
|
test_doctree.TestArticleFolder
|
class TestArticleFolder(object):
def test_title(self):
af = ArticleFolder(dir_path=dir_test_source.abspath)
assert af.title == "Welcome to the Document"
af = ArticleFolder(dir_path=dir_test_source.append_parts("Section1").abspath)
assert af.title == "Section1"
af = ArticleFolder(index_file="index_cn", dir_path=dir_test_source.abspath)
assert af.title == "欢迎来到此文档"
af = ArticleFolder(index_file="index_cn", dir_path=dir_test_source.append_parts("Section1").abspath)
assert af.title == "第1章"
def test_sub_article_folders(self):
af = ArticleFolder(dir_path=dir_test_source.abspath)
assert len(af.sub_article_folders) == 3
for ind, sub_af in enumerate(af.sub_article_folders):
assert sub_af.title == "Section{}".format(ind + 1)
af = ArticleFolder(index_file="index_cn", dir_path=dir_test_source.abspath)
assert len(af.sub_article_folders) == 3
for ind, sub_af in enumerate(af.sub_article_folders):
assert sub_af.title == "第{}章".format(ind + 1)
def test_toc_directive(self):
af = ArticleFolder(dir_path=dir_test_source.abspath)
rst_directive = af.toc_directive()
assert "Section1 <Section1/index>" in rst_directive
assert "Section2 <Section2/index>" in rst_directive
assert "Section3 <Section3/index>" in rst_directive
af = ArticleFolder(index_file="index_cn", dir_path=dir_test_source.abspath)
rst_directive = af.toc_directive()
assert "第1章 <Section1/index_cn>" in rst_directive
assert "第2章 <Section2/index_cn>" in rst_directive
assert "第3章 <Section3/index_cn>" in rst_directive
|
class TestArticleFolder(object):
def test_title(self):
pass
def test_sub_article_folders(self):
pass
def test_toc_directive(self):
pass
| 4 | 0 | 11 | 1 | 10 | 0 | 2 | 0 | 1 | 2 | 1 | 0 | 3 | 0 | 3 | 3 | 35 | 5 | 30 | 9 | 26 | 0 | 30 | 9 | 26 | 3 | 1 | 1 | 5 |
148,144 |
MacHu-GWU/inspect_mate-project
|
MacHu-GWU_inspect_mate-project/inspect_mate/tests/__init__.py
|
inspect_mate.tests.Klass
|
class Klass(Base):
pass
|
class Klass(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 0 |
148,145 |
MacHu-GWU/inspect_mate-project
|
MacHu-GWU_inspect_mate-project/dev/analysis.py
|
analysis.MyClass
|
class MyClass(Base):
pass
|
class MyClass(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 0 |
148,146 |
MacHu-GWU/inspect_mate-project
|
MacHu-GWU_inspect_mate-project/inspect_mate/tests/__init__.py
|
inspect_mate.tests.Base
|
class Base(object):
attribute = "attribute"
def __init__(self):
self.initiated_attribute = "initiated_attribute"
@property
def property_method(self):
return "property_method"
def regular_method(self):
return "regular_method"
@staticmethod
def static_method():
return "static_method"
@classmethod
def class_method(cls):
return "class_method"
|
class Base(object):
def __init__(self):
pass
@property
def property_method(self):
pass
def regular_method(self):
pass
@staticmethod
def static_method():
pass
@classmethod
def class_method(cls):
pass
| 9 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 3 | 1 | 5 | 5 | 20 | 5 | 15 | 11 | 6 | 0 | 12 | 8 | 6 | 1 | 1 | 0 | 5 |
148,147 |
MacHu-GWU/inspect_mate-project
|
MacHu-GWU_inspect_mate-project/dev/analysis.py
|
analysis.Base
|
class Base(object):
attribute = "attribute"
@property
def property_method(self):
return "property_method"
def regular_method(self):
return "regular_method"
@staticmethod
def static_method():
return "static_method"
@classmethod
def class_method(cls):
return "class_method"
|
class Base(object):
@property
def property_method(self):
pass
def regular_method(self):
pass
@staticmethod
def static_method():
pass
@classmethod
def class_method(cls):
pass
| 8 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 2 | 0 | 4 | 4 | 17 | 4 | 13 | 9 | 5 | 0 | 10 | 6 | 5 | 1 | 1 | 0 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.