id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
147,848 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/baseclass/classtree.py
|
angora.baseclass.classtree.Unittest.test_all.DepartmentCollection
|
class DepartmentCollection(classtree.Base):
def __init__(self, collection_id=None, create_date=None, name=None):
self.collection_id = collection_id
self.create_date = create_date
self.name = name
department = Department(department_id=1, name='IT')
self.department_id____1 = department
self.name____IT = department
department = Department(department_id=2, name='HR')
self.department_id____2 = department
self.name____HR = department
|
class DepartmentCollection(classtree.Base):
def __init__(self, collection_id=None, create_date=None, name=None):
pass
| 2 | 0 | 12 | 2 | 10 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 7 | 1 | 1 | 13 | 2 | 11 | 10 | 9 | 0 | 11 | 10 | 9 | 1 | 1 | 0 | 1 |
147,849 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/baseclass/classtree.py
|
angora.baseclass.classtree.Unittest.test_all.Department
|
class Department(classtree.Base):
def __init__(self, department_id=None, name=None):
self.department_id = department_id
self.name = name
|
class Department(classtree.Base):
def __init__(self, department_id=None, name=None):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 2 | 1 | 1 | 4 | 0 | 4 | 4 | 2 | 0 | 4 | 4 | 2 | 1 | 1 | 0 | 1 |
147,850 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/baseclass/classtree.py
|
angora.baseclass.classtree.Unittest.test_all.Database
|
class Database(classtree.Base):
def __init__(self, db_id=None, name=None):
self.db_id = db_id
self.name = name
personCollection = PersonCollection(
collection_id=1, create_date=datetime.date(2016, 1, 8), name='Person')
self.collection_id____1 = personCollection
self.name____Person = personCollection
departmentCollection = DepartmentCollection(
collection_id=2, create_date=datetime.date(2016, 1, 1), name='Department')
self.collection_id____2 = departmentCollection
self.name____Department = departmentCollection
|
class Database(classtree.Base):
def __init__(self, db_id=None, name=None):
pass
| 2 | 0 | 11 | 2 | 9 | 0 | 1 | 0 | 1 | 3 | 2 | 0 | 1 | 6 | 1 | 1 | 12 | 2 | 10 | 10 | 8 | 0 | 10 | 10 | 8 | 1 | 1 | 0 | 1 |
147,851 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/baseclass/classtree.py
|
angora.baseclass.classtree.Unittest
|
class Unittest(unittest.TestCase):
def test_all(self):
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from angora.baseclass import classtree
class Person(classtree.Base):
def __init__(self, name=None, person_id=None):
self.name = name
self.person_id = person_id
class PersonCollection(classtree.Base):
def __init__(self, collection_id=None, create_date=None, name=None):
self.collection_id = collection_id
self.create_date = create_date
self.name = name
person = Person(name='Jack', person_id=1)
self.name____Jack = person
self.person_id____1 = person
person = Person(name='Paul', person_id=2)
self.name____Paul = person
self.person_id____2 = person
class Department(classtree.Base):
def __init__(self, department_id=None, name=None):
self.department_id = department_id
self.name = name
class DepartmentCollection(classtree.Base):
def __init__(self, collection_id=None, create_date=None, name=None):
self.collection_id = collection_id
self.create_date = create_date
self.name = name
department = Department(department_id=1, name='IT')
self.department_id____1 = department
self.name____IT = department
department = Department(department_id=2, name='HR')
self.department_id____2 = department
self.name____HR = department
class Database(classtree.Base):
def __init__(self, db_id=None, name=None):
self.db_id = db_id
self.name = name
personCollection = PersonCollection(
collection_id=1, create_date=datetime.date(2016, 1, 8), name='Person')
self.collection_id____1 = personCollection
self.name____Person = personCollection
departmentCollection = DepartmentCollection(
collection_id=2, create_date=datetime.date(2016, 1, 1), name='Department')
self.collection_id____2 = departmentCollection
self.name____Department = departmentCollection
database = Database(db_id=1, name='Database')
|
class Unittest(unittest.TestCase):
def test_all(self):
pass
class Person(classtree.Base):
def __init__(self, name=None, person_id=None):
pass
class PersonCollection(classtree.Base):
def __init__(self, name=None, person_id=None):
pass
class Department(classtree.Base):
def __init__(self, name=None, person_id=None):
pass
class DepartmentCollection(classtree.Base):
def __init__(self, name=None, person_id=None):
pass
class Database(classtree.Base):
def __init__(self, name=None, person_id=None):
pass
| 12 | 0 | 18 | 4 | 13 | 0 | 1 | 0.04 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 73 | 65 | 18 | 45 | 43 | 31 | 2 | 45 | 43 | 31 | 1 | 2 | 0 | 6 |
147,852 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/algorithm/iterable.py
|
angora.algorithm.iterable.IterToolsUnittest
|
class IterToolsUnittest(unittest.TestCase):
def setUp(self):
"""
self.iterable_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
self.iterable_set = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
self.iterable_dict = {1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E',
6: 'F', 7: 'G', 8: 'H', 9: 'I', 10: 'J',}
"""
self.iterable_generator = range(10)
self.iterable_list = list(range(10))
self.iterable_set = set(list(range(10)))
self.iterable_dict = {i: chr(j) for i, j in zip(
range(1, 11), range(65, 75))}
def test_take(self):
self.assertEqual(take(5, self.iterable_generator), [0, 1, 2, 3, 4])
self.assertEqual(take(5, self.iterable_list), [0, 1, 2, 3, 4])
self.assertEqual(take(5, self.iterable_set), [0, 1, 2, 3, 4])
self.assertEqual(take(5, self.iterable_dict), [1, 2, 3, 4, 5])
def test_flatten_functionality(self):
iterable = [[0, 1], [2, 3]]
self.assertListEqual(list(flatten(iterable)), list(range(4)))
def test_flatten_performance(self):
complexity = 1000
iterable = [list(range(complexity))] * complexity
st = time.clock()
for _ in flatten(iterable):
pass
elapse_flatten = time.clock() - st
st = time.clock()
for chunk in iterable:
for _ in chunk:
pass
elapse_double_loop = time.clock() - st
self.assertGreater(elapse_flatten, elapse_double_loop)
def test_flatten_all_functionality(self):
iterable = [[0, 1], [2, 3, [4, 5], [6, 7, 8]], [9,]]
self.assertListEqual(list(flatten_all(iterable)),
list(range(10)))
def test_nth(self):
self.assertEqual(nth(self.iterable_list, 5), 5)
self.assertEqual(nth(self.iterable_list, 100), None)
def test_shuffled(self):
self.assertNotEqual(shuffled(self.iterable_list),
self.iterable_list)
def test_grouper(self):
self.assertEqual(
list(grouper(self.iterable_list, 3, 1024)),
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 1024, 1024)],
)
def test_grouper_list(self):
self.assertEqual(
list(grouper_list(self.iterable_list, 3)),
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]],
)
def test_grouper_dict(self):
self.assertEqual(
list(grouper_dict(self.iterable_dict, 3)),
[
{1: 'A', 2: 'B', 3: 'C'},
{4: 'D', 5: 'E', 6: 'F'},
{7: 'G', 8: 'H', 9: 'I'},
{10: 'J'}
],
)
def test_running_windows(self):
self.assertEqual(
list(running_windows([1, 2, 3, 4, 5], 3)),
[[1, 2, 3], [2, 3, 4], [3, 4, 5]],
)
def test_cycle_running_windows(self):
self.assertEqual(
list(cycle_running_windows([1, 2, 3, 4, 5], 3)),
[[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1], [5, 1, 2]],
)
def test_cycle_slice(self):
self.assertEqual(
list(cycle_slice(self.iterable_list, 4, 7)),
[4, 5, 6, 7],
)
self.assertEqual(
list(cycle_slice(self.iterable_list, 8, 2)),
[8, 9, 0, 1, 2],
)
def test_cycle_dist(self):
self.assertEqual(cycle_dist(5, 13, 24), 8)
self.assertEqual(cycle_dist(1, 23, 24), 2)
self.assertAlmostEqual(cycle_dist(
0.0, 2.4, 1.0), 0.4, delta=0.0001)
self.assertAlmostEqual(cycle_dist(
0.0, 2.6, 1.0), 0.4, delta=0.0001)
def test_padding_left_shift(self):
self.assertEqual(
shift_to_the_left(self.iterable_list, 1, pad=True, trim=True),
[1, 2, 3, 4, 5, 6, 7, 8, 9, 9],
)
self.assertEqual(
shift_to_the_left(self.iterable_list, 1, pad=True, trim=False),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9],
)
self.assertEqual(
shift_to_the_left(self.iterable_list, 1, pad=False, trim=True),
[1, 2, 3, 4, 5, 6, 7, 8, 9],
)
def test_shift_to_the_left(self):
self.assertEqual(
shift_to_the_left(self.iterable_list, 1, pad=True, trim=True),
[1, 2, 3, 4, 5, 6, 7, 8, 9, 9],
)
self.assertEqual(
shift_to_the_left(self.iterable_list, 1, pad=True, trim=False),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9],
)
self.assertEqual(
shift_to_the_left(self.iterable_list, 1, pad=False, trim=True),
[1, 2, 3, 4, 5, 6, 7, 8, 9],
)
def test_shift_to_the_right(self):
self.assertEqual(
shift_to_the_right(self.iterable_list, 1, pad=True, trim=True),
[0, 0, 1, 2, 3, 4, 5, 6, 7, 8],
)
self.assertEqual(
shift_to_the_right(self.iterable_list, 1,
pad=True, trim=False),
[0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
)
self.assertEqual(
shift_to_the_right(self.iterable_list, 1,
pad=False, trim=True),
[0, 1, 2, 3, 4, 5, 6, 7, 8],
)
def test_count_generator(self):
self.assertEqual(count_generator(self.iterable_generator), 10)
|
class IterToolsUnittest(unittest.TestCase):
def setUp(self):
'''
self.iterable_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
self.iterable_set = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
self.iterable_dict = {1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E',
6: 'F', 7: 'G', 8: 'H', 9: 'I', 10: 'J',}
'''
pass
def test_take(self):
pass
def test_flatten_functionality(self):
pass
def test_flatten_performance(self):
pass
def test_flatten_all_functionality(self):
pass
def test_nth(self):
pass
def test_shuffled(self):
pass
def test_grouper(self):
pass
def test_grouper_list(self):
pass
def test_grouper_dict(self):
pass
def test_running_windows(self):
pass
def test_cycle_running_windows(self):
pass
def test_cycle_slice(self):
pass
def test_cycle_dist(self):
pass
def test_padding_left_shift(self):
pass
def test_shift_to_the_left(self):
pass
def test_shift_to_the_right(self):
pass
def test_count_generator(self):
pass
| 19 | 1 | 7 | 0 | 7 | 0 | 1 | 0.05 | 1 | 4 | 0 | 0 | 18 | 4 | 18 | 90 | 151 | 23 | 122 | 32 | 103 | 6 | 67 | 32 | 48 | 4 | 2 | 2 | 21 |
147,853 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/filesystem/filesystem.py
|
angora.filesystem.filesystem.WinFile
|
class WinFile(object):
"""Windows file class.
attributes includes:
- self.abspath absolute path (绝对路径)
- self.dirname parents directory name (父目录路径)
- self.basename complete file name (文件全名)
- self.fname the first part of file name (纯文件名)
- self.ext file extension (文件扩展名)
- self.atime last access time (文件最后一次被触碰的时间)
- self.ctime create time (文件被创建的时间)
- self.mtime last modify time (文件最后一次被修改的时间)
- self.size_on_disk file size in bytes (文件在硬盘上的大小, 单位bytes)
Appendix, The difference of (atime, ctime, mtime):
- access time (os.path.getatime)
- create time (os.path.getctime)
- modify time (os.path.getmtime)
- When rename, cut-and-paste, all 3 time stays.
- When edit the content, atime and mtime change, ctime stays.
- When copy the file to a new place, atime and ctime change, mtime stays.
**中文文档**
Windows文件对象, 可以通过 .属性名的方式访问 绝对路径, 文件夹路径,
文件名, 扩展名, 大小。免去了使用 ``os.path.split`` 等方法的麻烦。
附录, atime, ctime, mtime的区别
- 当文件被改名, 和剪切(剪切跟改名是一个操作), 所有3个时间都不变
- 当文件内容被修改, atime, mtime变化, ctime不变
- 当文件被复制到新地方时, atime, ctime变化, mtime不变
"""
__slots__ = [
"abspath", "dirname", "basename", "fname", "ext",
"atime", "ctime", "mtime", "size_on_disk", "md5",
]
init_mode = 2
def __init__(self, abspath):
if os.path.isfile(abspath): # 确保这是一个文件而不是目录
self.abspath = os.path.abspath(abspath)
self.initialize()
else:
raise FileNotFoundError(
"%s is not a file or it doesn't exist." % abspath)
def initialize(self):
"""Internal method. Initialize the value of some attributes.
"""
self.level2_initialize()
@staticmethod
def use_fast_init():
"""Set initialization mode to level1_initialize
"""
WinFile.initialize = WinFile.level1_initialize
WinFile.init_mode = 1
@staticmethod
def use_regular_init():
"""Set initialization mode to level2_initialize
"""
WinFile.initialize = WinFile.level2_initialize
WinFile.init_mode = 2
@staticmethod
def use_slow_init():
"""Set initialization mode to level3_initialize
"""
WinFile.initialize = WinFile.level3_initialize
WinFile.init_mode = 3
@staticmethod
def set_initialize_mode(complexity=2):
"""Set initialization mode. Default is slow mode.
**中文文档**
设置WinFile类的全局变量, 指定WinFile.initialize方法所绑定的初始化方式。
"""
if complexity == 3:
WinFile.initialize = WinFile.level3_initialize
WinFile.init_mode = 3
elif complexity == 2:
WinFile.initialize = WinFile.level2_initialize
WinFile.init_mode = 2
elif complexity == 1:
WinFile.initialize = WinFile.level1_initialize
WinFile.init_mode = 1
else:
raise ValueError("complexity has to be 3, 2 or 1.")
def level3_initialize(self):
"""Load abspath, dirname, basename, fname, ext, atime, ctime, mtime,
size_on_disk attributes in initialization.
**中文文档**
比较全面但稍慢的WinFile对象初始化方法, 从绝对路径中取得:
- 绝对路径
- 父目录路径
- 文件全名
- 纯文件名
- 文件扩展名
- access time
- create time
- modify time
- 文件占据磁盘大小
- 文件的哈希值
"""
self.dirname, self.basename = os.path.split(self.abspath) # 目录名, 文件名
self.fname, self.ext = os.path.splitext(self.basename) # 纯文件名, 文件扩展名
self.ext = self.ext.lower()
self.size_on_disk = os.path.getsize(self.abspath)
self.atime = os.path.getatime(self.abspath) # 接触时间
self.ctime = os.path.getctime(self.abspath) # 创建时间, 当文件被修改后不变
self.mtime = os.path.getmtime(self.abspath) # 修改时间
self.md5 = md5file(self.abspath, nbytes=1 << 20) # 文件的哈希值
def level2_initialize(self):
"""Load abspath, dirname, basename, fname, ext, atime, ctime, mtime,
size_on_disk attributes in initialization.
**中文文档**
比较全面但稍慢的WinFile对象初始化方法, 从绝对路径中取得:
- 绝对路径
- 父目录路径
- 文件全名
- 纯文件名
- 文件扩展名
- access time
- create time
- modify time
- 文件占据磁盘大小
"""
self.dirname, self.basename = os.path.split(self.abspath) # 目录名, 文件名
self.fname, self.ext = os.path.splitext(self.basename) # 纯文件名, 文件扩展名
self.ext = self.ext.lower()
self.size_on_disk = os.path.getsize(self.abspath)
self.atime = os.path.getatime(self.abspath) # 接触时间
self.ctime = os.path.getctime(self.abspath) # 创建时间, 当文件被修改后不变
self.mtime = os.path.getmtime(self.abspath) # 修改时间
def level1_initialize(self):
"""Load abspath, dirname, basename, fname, ext
attributes in initialization.
**中文文档**
快速的WinFile对象初始化方法, 只从绝对路径中取得:
- 绝对路径
- 目录路径
- 文件全名
- 纯文件名
- 文件扩展名
"""
self.dirname, self.basename = os.path.split(self.abspath)
self.fname, self.ext = os.path.splitext(self.basename)
self.ext = self.ext.lower()
def __str__(self):
return self.abspath
def __repr__(self):
info = ",\n\t".join([
"abspath='%s'" % self.abspath,
"dirname='%s'" % self.dirname,
"basename='%s'" % self.basename,
"fname='%s'" % self.fname,
"ext='%s'" % self.ext,
])
return "WinFile(\n\t%s,\n)" % info
def to_dict(self):
d = dict()
for attr in self.__slots__:
try:
d[attr] = self.__getattribute__(attr)
except AttributeError:
pass
return d
def rename(self, new_dirname=None, new_fname=None, new_ext=None):
"""Rename the dirname, fname, extension or their combinations.
**中文文档**
对文件的父目录名, 纯文件名, 扩展名, 或它们的组合进行修改。
"""
if not new_dirname:
new_dirname = self.dirname
else:
new_dirname = os.path.abspath(new_dirname)
if not new_fname:
new_fname = self.fname
if new_ext: # 检查新文件名的扩展名格式是否
if not new_ext.startswith("."):
raise ValueError("File extension must in format .ext, "
"for example: .jpg, .mp3")
else:
new_ext = self.ext
new_basename = new_fname + new_ext
new_abspath = os.path.join(new_dirname, new_basename)
os.rename(self.abspath, new_abspath)
# 如果成功重命名, 则更新文件信息
self.abspath = new_abspath
self.dirname = new_dirname
self.basename = new_basename
self.fname = new_fname
self.ext = new_ext
|
class WinFile(object):
'''Windows file class.
attributes includes:
- self.abspath absolute path (绝对路径)
- self.dirname parents directory name (父目录路径)
- self.basename complete file name (文件全名)
- self.fname the first part of file name (纯文件名)
- self.ext file extension (文件扩展名)
- self.atime last access time (文件最后一次被触碰的时间)
- self.ctime create time (文件被创建的时间)
- self.mtime last modify time (文件最后一次被修改的时间)
- self.size_on_disk file size in bytes (文件在硬盘上的大小, 单位bytes)
Appendix, The difference of (atime, ctime, mtime):
- access time (os.path.getatime)
- create time (os.path.getctime)
- modify time (os.path.getmtime)
- When rename, cut-and-paste, all 3 time stays.
- When edit the content, atime and mtime change, ctime stays.
- When copy the file to a new place, atime and ctime change, mtime stays.
**中文文档**
Windows文件对象, 可以通过 .属性名的方式访问 绝对路径, 文件夹路径,
文件名, 扩展名, 大小。免去了使用 ``os.path.split`` 等方法的麻烦。
附录, atime, ctime, mtime的区别
- 当文件被改名, 和剪切(剪切跟改名是一个操作), 所有3个时间都不变
- 当文件内容被修改, atime, mtime变化, ctime不变
- 当文件被复制到新地方时, atime, ctime变化, mtime不变
'''
def __init__(self, abspath):
pass
def initialize(self):
'''Internal method. Initialize the value of some attributes.
'''
pass
@staticmethod
def use_fast_init():
'''Set initialization mode to level1_initialize
'''
pass
@staticmethod
def use_regular_init():
'''Set initialization mode to level2_initialize
'''
pass
@staticmethod
def use_slow_init():
'''Set initialization mode to level3_initialize
'''
pass
@staticmethod
def set_initialize_mode(complexity=2):
'''Set initialization mode. Default is slow mode.
**中文文档**
设置WinFile类的全局变量, 指定WinFile.initialize方法所绑定的初始化方式。
'''
pass
def level3_initialize(self):
'''Load abspath, dirname, basename, fname, ext, atime, ctime, mtime,
size_on_disk attributes in initialization.
**中文文档**
比较全面但稍慢的WinFile对象初始化方法, 从绝对路径中取得:
- 绝对路径
- 父目录路径
- 文件全名
- 纯文件名
- 文件扩展名
- access time
- create time
- modify time
- 文件占据磁盘大小
- 文件的哈希值
'''
pass
def level2_initialize(self):
'''Load abspath, dirname, basename, fname, ext, atime, ctime, mtime,
size_on_disk attributes in initialization.
**中文文档**
比较全面但稍慢的WinFile对象初始化方法, 从绝对路径中取得:
- 绝对路径
- 父目录路径
- 文件全名
- 纯文件名
- 文件扩展名
- access time
- create time
- modify time
- 文件占据磁盘大小
'''
pass
def level1_initialize(self):
'''Load abspath, dirname, basename, fname, ext
attributes in initialization.
**中文文档**
快速的WinFile对象初始化方法, 只从绝对路径中取得:
- 绝对路径
- 目录路径
- 文件全名
- 纯文件名
- 文件扩展名
'''
pass
def __str__(self):
pass
def __repr__(self):
pass
def to_dict(self):
pass
def rename(self, new_dirname=None, new_fname=None, new_ext=None):
'''Rename the dirname, fname, extension or their combinations.
**中文文档**
对文件的父目录名, 纯文件名, 扩展名, 或它们的组合进行修改。
'''
pass
| 18 | 10 | 13 | 1 | 7 | 5 | 2 | 0.94 | 1 | 4 | 0 | 0 | 9 | 10 | 13 | 13 | 220 | 37 | 101 | 33 | 83 | 95 | 80 | 29 | 66 | 5 | 1 | 2 | 23 |
147,854 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/algorithm/binarysearch.py
|
angora.algorithm.binarysearch.FindNearestUnittest
|
class FindNearestUnittest(unittest.TestCase):
def test_all(self):
sorted_list = list(range(10))
self.assertEqual(find_nearest(sorted_list, 4.4), 4)
self.assertEqual(find_nearest(sorted_list, 4.5), 4)
self.assertEqual(find_nearest(sorted_list, 4.6), 5)
|
class FindNearestUnittest(unittest.TestCase):
def test_all(self):
pass
| 2 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 1 | 0 | 1 | 73 | 6 | 0 | 6 | 3 | 4 | 0 | 6 | 3 | 4 | 1 | 2 | 0 | 1 |
147,855 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/algorithm/binarysearch.py
|
angora.algorithm.binarysearch.BSearchUnittest
|
class BSearchUnittest(unittest.TestCase):
def test_all(self):
sorted_list = [0, 1, 2, 3]
self.assertEqual(find_index(sorted_list, 2), 2)
self.assertEqual(find_lt(sorted_list, 2.5), 2)
self.assertEqual(find_le(sorted_list, 2.0), 2)
self.assertEqual(find_gt(sorted_list, 0.5), 1)
self.assertEqual(find_ge(sorted_list, 1.0), 1)
|
class BSearchUnittest(unittest.TestCase):
def test_all(self):
pass
| 2 | 0 | 7 | 0 | 7 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 73 | 8 | 0 | 8 | 3 | 6 | 0 | 8 | 3 | 6 | 1 | 2 | 0 | 1 |
147,856 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/gadget/configuration.py
|
angora.gadget.configuration.Configuration
|
class Configuration(object):
"""Configuration class implements a basic configuration language.
"""
def __init__(self):
self._sections = OrderedDict()
self._sections["DEFAULT"] = Section("DEFAULT")
def __getattr__(self, attr):
try:
return self._sections[attr]
except KeyError:
raise Exception("Cannot find section name '%s'." % attr)
def add_section(self, section_name):
"""Add an empty section.
"""
if section_name == "DEFAULT":
raise Exception("'DEFAULT' is reserved section name.")
if section_name in self._sections:
raise Exception(
"Error! %s is already one of the sections" % section_name)
else:
self._sections[section_name] = Section(section_name)
def remove_section(self, section_name):
"""Remove a section, it cannot be the DEFAULT section.
"""
if section_name == "DEFAULT":
raise Exception("'DEFAULT' is reserved section name.")
if section_name in self._sections:
del self._sections[section_name]
else:
raise Exception("Error! cannot find section '%s'.")
def set_section(self, section):
"""Set a section. If section already exists, overwrite the old one.
"""
if not isinstance(section, Section):
raise Exception("You")
try:
self.remove_section(section.name)
except:
pass
self._sections[section.name] = copy.deepcopy(section)
def sections(self):
"""Return a list of section name.
"""
return list(self._sections.keys())
def __str__(self):
section_text_list = list()
for section in self._sections.values():
section_text_list.append(str(section))
return "\n\n".join(section_text_list)
def dump(self, path):
"""Save config to file.
"""
with open(path, "wb") as f:
f.write(str(self).encode("utf-8"))
def load(self, path):
"""Read configuration from file.
"""
with open(path, "rb") as f:
content = f.read().decode("utf-8")
section_text_list = list()
lines = list()
for line in content.split("\n"):
if line.startswith("["):
section_text_list.append("\n".join(lines))
lines = list()
lines.append(line)
else:
lines.append(line)
section_text_list.append("\n".join(lines))
self._sections = OrderedDict()
for text in section_text_list[1:]:
section = Section.from_text(text)
self._sections[section.name] = section
|
class Configuration(object):
'''Configuration class implements a basic configuration language.
'''
def __init__(self):
pass
def __getattr__(self, attr):
pass
def add_section(self, section_name):
'''Add an empty section.
'''
pass
def remove_section(self, section_name):
'''Remove a section, it cannot be the DEFAULT section.
'''
pass
def set_section(self, section):
'''Set a section. If section already exists, overwrite the old one.
'''
pass
def sections(self):
'''Return a list of section name.
'''
pass
def __str__(self):
pass
def dump(self, path):
'''Save config to file.
'''
pass
def load(self, path):
'''Read configuration from file.
'''
pass
| 10 | 7 | 9 | 1 | 6 | 1 | 2 | 0.24 | 1 | 6 | 1 | 0 | 9 | 1 | 9 | 9 | 90 | 17 | 59 | 21 | 49 | 14 | 55 | 19 | 45 | 4 | 1 | 2 | 20 |
147,857 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/gadget/configuration.py
|
angora.gadget.configuration.Section
|
class Section(object):
"""Section class.
"""
def __init__(self, section_name):
if not isinstance(section_name, str):
raise SectionNameError0
for char in section_name:
if char not in "ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789":
raise SectionNameError1
if section_name[0].isdigit():
raise SectionNameError2
self.name = section_name
self.data = OrderedDict()
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def items(self):
"""Generator method
"""
for key, value in self.data.items():
yield key, value
########################################################################
# _value_is_str() and _value_is_list() is for serving __str__() method #
########################################################################
def _value_is_str(self, key, value):
try:
return "%s = '%s'" % (key, int(value))
except:
try:
return "%s = '%s'" % (key, float(value))
except:
if value.lower() in ["true", "yes"]:
return "%s = 'True'" % key
elif value.lower() in ["false", "no"]:
return "%s = 'False'" % key
else:
return "%s = %s" % (key, value)
def _value_is_list(self, key, value):
if len(value) == 0: # empty list
return "%s = ," % key
else:
if isinstance(value[0], _str_type): # 只要是字符串, 则在两边加'号
return "%s = %s" % (key, ", ".join(["'%s'" % s for s in value]))
else: # 否则直接调用__str__()
return "%s = %s" % (key, ", ".join([str(i) for i in value]))
def __str__(self):
pairs_list = ["[%s]" % self.name]
for key, value in self.data.items():
if isinstance(value, list):
pairs_list.append(self._value_is_list(key, value))
else: # 是单个值
if isinstance(value, str): # 是字符串
pairs_list.append(self._value_is_str(key, value))
else:
pairs_list.append("%s = %s" % (key, value))
return "\n".join(pairs_list)
@staticmethod
def from_text(text):
def detect_bool_str(text):
if text.lower() in ["true", "yes"]:
return True
elif text.lower() in ["false", "no"]:
return False
else:
raise Exception(
"%s is not one of True, False, Yes, No (case-insensitive)" % text)
########################################################
# step1, process lines which has the following pattern #
# "# some comment", drop it #
# " # some comment", drop it #
# " ", drop it #
# " key = value # some comment", delete comment #
# "[section_name]" #
########################################################
lines = list()
for line in text.split("\n"):
if not line.startswith("#"):
if line.strip(): # 只要不是空格栏
if "#" in line:
line = line[:line.find("#")]
if line.strip(): # 只要去掉#之后还不是空栏
lines.append(line.strip())
else:
lines.append(line.strip())
section_name = lines[0][1:-1] # section_name is the first valid line
section = Section(section_name)
###############################################
# step2, split key, value, and process value. #
# automatically detect datatype #
###############################################
for line in lines[1:]:
key, value = line.split("=")
key = key.strip()
value = value.strip()
# 从字符串中解析value, 跟 _value_is_str, _value_is_list 是相反的过程
if "," in value: # 说明是list, 可能是 布尔值list, 整数list, 小数list, 字符串list
values = [s.strip() for s in value.split(",") if s.strip()]
try: # 尝试整数list
values = [int(s) for s in values]
except:
try: # 尝试小数list
values = [float(s) for s in values]
except:
try: # 尝试布尔值list
values = [detect_bool_str(s) for s in values]
except: # 一定是字符串list
values = [
s.replace("'", "").replace('"', '') for s in values]
section[key] = values
else: # 说明是单个项目, 可能是 布尔值, 整数, 小数, 字符串
if value.lower() in ["true", "yes"]:
section[key] = True
elif value.lower() in ["false", "no"]:
section[key] = False
else: # 整数, 小数, 字符串
try: # 整数
section[key] = int(value)
except:
try: # 小数
section[key] = float(value)
except: # 字符串
section[key] = value.replace(
"'", "").replace('"', '')
return section
|
class Section(object):
'''Section class.
'''
def __init__(self, section_name):
pass
def __getitem__(self, key):
pass
def __setitem__(self, key, value):
pass
def items(self):
'''Generator method
'''
pass
def _value_is_str(self, key, value):
pass
def _value_is_list(self, key, value):
pass
def __str__(self):
pass
@staticmethod
def from_text(text):
pass
def detect_bool_str(text):
pass
| 11 | 2 | 15 | 1 | 12 | 4 | 4 | 0.39 | 1 | 9 | 3 | 0 | 7 | 2 | 8 | 8 | 144 | 19 | 105 | 23 | 94 | 41 | 89 | 22 | 79 | 15 | 1 | 5 | 39 |
147,858 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/gadget/configuration.py
|
angora.gadget.configuration.SectionNameError0
|
class SectionNameError0(Exception):
def __str__(self):
return "Section name has to be string"
|
class SectionNameError0(Exception):
def __str__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 11 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
147,859 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/gadget/configuration.py
|
angora.gadget.configuration.SectionNameError1
|
class SectionNameError1(Exception):
def __str__(self):
return "Section name can only have capital letter, _ and numbers."
|
class SectionNameError1(Exception):
def __str__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 11 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
147,860 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/gadget/configuration.py
|
angora.gadget.configuration.SectionNameError2
|
class SectionNameError2(Exception):
def __str__(self):
return "Section name cannot start with numbers."
|
class SectionNameError2(Exception):
def __str__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 11 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
147,861 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/gadget/logger.py
|
angora.gadget.logger.EZLogger
|
class EZLogger(object):
"""A quick logger constructor.
:param name: logger name
:param path: log file path, default None, do not write to file
:param logging_level: debug level above this will be logged
:param stream_level: debug level above this will be printed to console
:param format: log format
"""
tab = " "
def __init__(self, name="root", path=None,
logging_level=logging.DEBUG,
stream_level=logging.INFO,
format="%(asctime)s; %(levelname)-8s; %(message)s"):
logger = logging.getLogger(name)
# Logging level
logger.setLevel(logging.DEBUG)
# Print screen level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
# File handler
if path:
fh = logging.FileHandler(path, encoding="utf-8")
# Formatter
formatter = logging.Formatter(format)
fh.setFormatter(formatter)
logger.addHandler(fh)
self.logger = logger
def debug(self, msg, indent=0):
self.logger.debug("%s%s" % (self.tab * indent, msg))
def info(self, msg, indent=0):
self.logger.info("%s%s" % (self.tab * indent, msg))
def warning(self, msg, indent=0):
self.logger.warning("%s%s" % (self.tab * indent, msg))
def error(self, msg, indent=0):
self.logger.error("%s%s" % (self.tab * indent, msg))
def critical(self, msg, indent=0):
self.logger.critical("%s%s" % (self.tab * indent, msg))
|
class EZLogger(object):
'''A quick logger constructor.
:param name: logger name
:param path: log file path, default None, do not write to file
:param logging_level: debug level above this will be logged
:param stream_level: debug level above this will be printed to console
:param format: log format
'''
def __init__(self, name="root", path=None,
logging_level=logging.DEBUG,
stream_level=logging.INFO,
format="%(asctime)s; %(levelname)-8s; %(message)s"):
pass
def debug(self, msg, indent=0):
pass
def info(self, msg, indent=0):
pass
def warning(self, msg, indent=0):
pass
def error(self, msg, indent=0):
pass
def critical(self, msg, indent=0):
pass
| 7 | 1 | 6 | 1 | 4 | 1 | 1 | 0.41 | 1 | 3 | 0 | 0 | 6 | 1 | 6 | 6 | 50 | 12 | 27 | 16 | 17 | 11 | 24 | 13 | 17 | 2 | 1 | 1 | 7 |
147,862 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/algorithm/binarysearch.py
|
angora.algorithm.binarysearch.FindLastTrueUnittest
|
class FindLastTrueUnittest(unittest.TestCase):
"""Find the last item less and equal than 500 in an array.
"""
def test_all(self):
def true_criterion(item):
return item <= 500
sorted_list = list({random.randint(1, 100000)
for _ in range(1000)})
sorted_list.sort()
index = find_last_true(sorted_list, true_criterion)
self.assertLessEqual(sorted_list[index], 500)
|
class FindLastTrueUnittest(unittest.TestCase):
'''Find the last item less and equal than 500 in an array.
'''
def test_all(self):
pass
def true_criterion(item):
pass
| 3 | 1 | 5 | 0 | 5 | 0 | 1 | 0.25 | 1 | 2 | 0 | 0 | 1 | 0 | 1 | 73 | 10 | 0 | 8 | 5 | 5 | 2 | 8 | 5 | 5 | 1 | 2 | 0 | 2 |
147,863 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/filesystem/filesystem.py
|
angora.filesystem.filesystem.WinDir
|
class WinDir(object):
"""Windows directory class.
**中文文档**
Windows目录对象, 可以通过 .属性名来访问 绝对路径, 目录总大小, 子目录数量,
子文件数量。免去了使用os.path.function的麻烦。并提供了prt_detail()方法直接
打印出文件夹的详细信息。
WinDir的属性:
- self.size_total: 文件夹总大小
- self.size_current_total: 文件夹一级子文件总大小
- self.num_folder_total: 子文件夹数量
- self.num_folder_current: 一级子文件夹数量
- self.num_file_total: 子文件数量
- self.num_file_current: 一级子文件数量
"""
def __init__(self, abspath):
if os.path.isdir(abspath): # 确保这是一个目录而不是文件
self.abspath = os.path.abspath(abspath)
self.dirname, self.basename = os.path.split(self.abspath)
self.get_detail()
else:
raise ValueError("'%s' is not a file." % abspath)
def __str__(self):
return self.abspath
def __repr__(self):
return self.abspath
def get_detail(self):
"""Get general stats information.
Includes:
- size_total: total size on disk
- num_folder_total: how many subfolders
- num_file_total: how many files
- size_current: total size of files on this folder. file in subfolders
doesn't count
- num_folder_current: how many files, subfolders doens't count
- num_file_current: how many files, file in subfolders doens't count
"""
self.size_total = 0
self.num_folder_total = 0
self.num_file_total = 0
self.size_current = 0
self.num_folder_current = 0
self.num_file_current = 0
for current_dir, folderlist, fnamelist in os.walk(self.abspath):
self.num_folder_total += len(folderlist)
self.num_file_total += len(fnamelist)
for fname in fnamelist:
self.size_total += os.path.getsize(os.path.join(current_dir, fname))
current_dir, folderlist, fnamelist = next(os.walk(self.abspath))
self.num_folder_current = len(folderlist)
self.num_file_current = len(fnamelist)
for fname in fnamelist:
self.size_current += os.path.getsize(os.path.join(current_dir, fname))
def prt_detail(self):
"""Nicely print stats information.
"""
screen = [
"Detail info of %s: " % self.abspath,
"total size = %s" % string_SizeInBytes(self.size_total),
"number of sub folders = %s" % self.num_folder_total,
"number of total files = %s" % self.num_file_total,
"lvl 1 file size = %s" % string_SizeInBytes(self.size_current),
"lvl 1 folder number = %s" % self.num_folder_current,
"lvl 1 file number = %s" % self.num_file_current,
]
print("\n".join(screen))
def rename(self, new_dirname=None, new_basename=None):
"""Rename the dirname, basename or their combinations.
**中文文档**
对文件的目录名, 文件夹名, 或它们的组合进行修改。
"""
if not new_basename:
new_basename = self.new_basename
if not new_dirname:
new_dirname = self.dirname
else:
new_dirname = os.path.abspath(new_dirname)
new_abspath = os.path.join(new_dirname, new_basename)
os.rename(self.abspath, new_abspath)
# 如果成功重命名, 则更新文件信息
self.abspath = new_abspath
self.dirname = new_dirname
self.basename = new_basename
|
class WinDir(object):
'''Windows directory class.
**中文文档**
Windows目录对象, 可以通过 .属性名来访问 绝对路径, 目录总大小, 子目录数量,
子文件数量。免去了使用os.path.function的麻烦。并提供了prt_detail()方法直接
打印出文件夹的详细信息。
WinDir的属性:
- self.size_total: 文件夹总大小
- self.size_current_total: 文件夹一级子文件总大小
- self.num_folder_total: 子文件夹数量
- self.num_folder_current: 一级子文件夹数量
- self.num_file_total: 子文件数量
- self.num_file_current: 一级子文件数量
'''
def __init__(self, abspath):
pass
def __str__(self):
pass
def __repr__(self):
pass
def get_detail(self):
'''Get general stats information.
Includes:
- size_total: total size on disk
- num_folder_total: how many subfolders
- num_file_total: how many files
- size_current: total size of files on this folder. file in subfolders
doesn't count
- num_folder_current: how many files, subfolders doens't count
- num_file_current: how many files, file in subfolders doens't count
'''
pass
def prt_detail(self):
'''Nicely print stats information.
'''
pass
def rename(self, new_dirname=None, new_basename=None):
'''Rename the dirname, basename or their combinations.
**中文文档**
对文件的目录名, 文件夹名, 或它们的组合进行修改。
'''
pass
| 7 | 4 | 13 | 1 | 9 | 3 | 2 | 0.6 | 1 | 1 | 0 | 0 | 6 | 9 | 6 | 6 | 101 | 19 | 52 | 19 | 45 | 31 | 42 | 19 | 35 | 4 | 1 | 2 | 12 |
147,864 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/gadget/codestats.py
|
angora.gadget.codestats.CodeStats
|
class CodeStats(object):
"""A simple Python project code, comment, docstr line counter.
Code stats analyzer should be initiated with the project workspace path and
the programming language.
- code: number of code lines, includes docstr.
- comment: number of comment lines.
- docstr: number of doc string lines.
- purecode: code lines not include docstr.
Usage example::
>>> from angora.gadget import CodeStats
>>> analyzer = CodeStats(workspace=r"C:\Python33\lib\site-packages\requests")
>>> analyzer.run()
Code statistic result for 'C:\Python33\lib\site-packages\requests'
79 'Python' files, 80 other files.
code line: 12362
comment line: 2025
docstr line: 1545
purecode line: 10817
**中文文档**
:class:`CodeStats` 是一个用来统计Python项目中所有文件的代码, 注释和
文档字符串行数的小工具。下面是对这些名词的说明:
- code (代码): 代码行数, 包括文档字符串。
- comment (注释): 注释行数。
- docstr (文档字符串): 文档字符串行数。
- purecode (纯代码): 纯代码不包括文档字符串。
用例::
>>> from angora.gadget import CodeStats
>>> analyzer = CodeStats(workspace=r"C:\Python33\lib\site-packages\requests")
>>> analyzer.run()
Code statistic result for 'C:\Python33\lib\site-packages\requests'
79 'Python' files, 80 other files.
code line: 12362
comment line: 2025
docstr line: 1545
purecode line: 10817
"""
def __init__(self, workspace, ignore=list()):
if not os.path.exists(workspace):
raise FileNotFoundError("%r doesn't exists!" % workspace)
self.workspace = os.path.abspath(workspace)
self.ignore = ignore
self.language = "Python"
self.analyzer = self.analyzePython
self.filter = self.filterPython
def run(self):
"""Run analysis.
The basic idea is to recursively find all script files in specific
programming language, and analyze each file then sum it up.
"""
n_target_file, n_other_file = 0, 0
code, comment, docstr, purecode = 0, 0, 0, 0
fc = FileCollection.from_path_except(self.workspace, self.ignore)
fc_yes, fc_no = fc.select(self.filter, keepboth=True)
n_other_file += len(fc_no)
for abspath in fc_yes:
try:
with open(abspath, "rb") as f:
code_text = f.read().decode("utf-8")
code_, comment_, docstr_, purecode_ = self.analyzer(
code_text)
code += code_
comment += comment_
docstr += docstr_
purecode += purecode_
n_target_file += 1
except Exception as e:
n_other_file += 1
lines = list()
lines.append("Code statistic result for '%s'" % self.workspace)
lines.append(" %r %r files, %r other files." %
(n_target_file, self.language, n_other_file))
lines.append(" code line: %s" % code)
lines.append(" comment line: %s" % comment)
lines.append(" docstr line: %s" % docstr)
lines.append(" purecode line: %s" % purecode)
message = "\n".join(lines)
print(message)
return message
@staticmethod
def filterPython(winfile):
if winfile.ext == ".py":
return True
else:
return False
@staticmethod
def analyzePython(code_text):
"""Count how many line of code, comment, dosstr, purecode in one
Python script file.
"""
code, comment, docstr = 0, 0, 0
p1 = r"""(?<=%s)[\s\S]*?(?=%s)""" % ('"""', '"""')
p2 = r"""(?<=%s)[\s\S]*?(?=%s)""" % ("'''", "'''")
# count docstr
for pattern in [p1, p2]:
for res in re.findall(pattern, code_text)[::2]:
lines = [i.strip() for i in res.split("\n") if i.strip()]
docstr += len(lines)
# count comment line and code
lines = [i.strip() for i in code_text.split("\n") if i.strip()]
for line in lines:
if line.startswith("#"):
comment += 1
else:
code += 1
purecode = code - docstr # pure code = code - docstr
return code, comment, docstr, purecode
def forPython(self):
self.filter = self.filterPython
self.analyzer = self.analyzePython
self.language = "Python"
@staticmethod
def filterText(winfile):
return True
@staticmethod
def analyzeText(code_text):
code, commend, docstr = 0, 0, 0
code = code_text.count("\n") + 1
comment = 0
docstr = 0
purecode = code - docstr # pure code = code - docstr
return code, comment, docstr, purecode
def forText(self):
self.filter = self.filterText
self.analyzer = self.analyzeText
self.language = "Text"
|
class CodeStats(object):
'''A simple Python project code, comment, docstr line counter.
Code stats analyzer should be initiated with the project workspace path and
the programming language.
- code: number of code lines, includes docstr.
- comment: number of comment lines.
- docstr: number of doc string lines.
- purecode: code lines not include docstr.
Usage example::
>>> from angora.gadget import CodeStats
>>> analyzer = CodeStats(workspace=r"C:\Python33\lib\site-packages
equests")
>>> analyzer.run()
Code statistic result for 'C:\Python33\lib\site-packages
equests'
79 'Python' files, 80 other files.
code line: 12362
comment line: 2025
docstr line: 1545
purecode line: 10817
**中文文档**
:class:`CodeStats` 是一个用来统计Python项目中所有文件的代码, 注释和
文档字符串行数的小工具。下面是对这些名词的说明:
- code (代码): 代码行数, 包括文档字符串。
- comment (注释): 注释行数。
- docstr (文档字符串): 文档字符串行数。
- purecode (纯代码): 纯代码不包括文档字符串。
用例::
>>> from angora.gadget import CodeStats
>>> analyzer = CodeStats(workspace=r"C:\Python33\lib\site-packages
equests")
>>> analyzer.run()
Code statistic result for 'C:\Python33\lib\site-packages
equests'
79 'Python' files, 80 other files.
code line: 12362
comment line: 2025
docstr line: 1545
purecode line: 10817
'''
def __init__(self, workspace, ignore=list()):
pass
def run(self):
'''Run analysis.
The basic idea is to recursively find all script files in specific
programming language, and analyze each file then sum it up.
'''
pass
@staticmethod
def filterPython(winfile):
pass
@staticmethod
def analyzePython(code_text):
'''Count how many line of code, comment, dosstr, purecode in one
Python script file.
'''
pass
def forPython(self):
pass
@staticmethod
def filterText(winfile):
pass
@staticmethod
def analyzeText(code_text):
pass
def forText(self):
pass
| 13 | 3 | 12 | 1 | 10 | 2 | 2 | 0.58 | 1 | 3 | 0 | 0 | 4 | 5 | 8 | 8 | 152 | 27 | 81 | 40 | 68 | 47 | 73 | 34 | 64 | 5 | 1 | 3 | 16 |
147,865 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/filesystem/filesystem.py
|
angora.filesystem.filesystem.FileCollection
|
class FileCollection(object):
"""A container class of WinFile.
Simplify file selection, removing, filtering, sorting operations.
Here's an example select all files and wrap as a WinFile::
>>> from angora.filesystem.filesystem import FileCollection
>>> fc = FileCollection.from_path("some_path")
>>> for winfile in fc.iterfiles():
... print(winfile)
**中文文档**
WinFile的专用容器, 主要用于方便的从文件夹中选取文件, 筛选文件, 并对指定文件集排序。
当然, 可以以迭代器的方式对容器内的文件对象进行访问。
"""
def __init__(self):
self.files = OrderedDict() # {文件绝对路径: 包含各种详细信息的WinFile对象}
def __str__(self):
if len(self.files) == 0:
return "***Empty FileCollection***"
try:
return "\n".join(list(self.order))
except:
return "\n".join(list(self.files.keys()))
def __len__(self):
return len(self.files)
def __getitem__(self, index):
"""Get the ``index``th winfile.
"""
try:
return self.files[self.order[index]]
except:
index += 1
for winfile in self.iterfiles():
index -= 1
if not index:
return winfile
def __contains__(self, item):
"""
"""
if isinstance(item, str): # abspath
abspath = os.path.abspath(item)
elif isinstance(item, WinFile): # WinFile
abspath = item.abspath
else: # invalid type
raise TypeError
if abspath in self.files:
return True
else:
return False
def add(self, abspath_or_winfile, enable_verbose=True):
"""Add absolute path or WinFile to FileCollection.
"""
if isinstance(abspath_or_winfile, str): # abspath
if abspath_or_winfile in self.files:
if enable_verbose:
print("'%s' already in this collections" %
abspath_or_winfile)
else:
self.files.setdefault(abspath_or_winfile, WinFile(abspath_or_winfile))
elif isinstance(abspath_or_winfile, WinFile): # WinFile
if abspath_or_winfile.abspath in self.files:
if enable_verbose:
print("'%s' already in this collections" %
abspath_or_winfile)
else:
self.files.setdefault(abspath_or_winfile.abspath, abspath_or_winfile)
else:
raise TypeError
def remove(self, abspath_or_winfile, enable_verbose=True):
"""Remove absolute path or WinFile from FileCollection.
"""
if isinstance(abspath_or_winfile, str): # abspath
try:
del self.files[abspath_or_winfile]
except KeyError:
if enable_verbose:
print("'%s' are not in this file collections" %
abspath_or_winfile)
elif isinstance(abspath_or_winfile, WinFile): # WinFile
try:
del self.files[abspath_or_winfile.abspath]
except KeyError:
if enable_verbose:
print("'%s' are not in this file collections" %
abspath_or_winfile)
else:
raise TypeError
@property
def howmany(self):
"""An alias of __len__() method.
"""
return len(self.files)
def iterfiles(self):
"""Yield all WinFile object.
"""
try:
for path in self.order:
yield self.files[path]
except:
for winfile in self.files.values():
yield winfile
def iterpaths(self):
"""Yield all WinFile's absolute path.
"""
try:
for path in self.order:
yield path
except:
for path in self.files:
yield path
def __iter__(self):
"""Default iterator is to yield absolute paht only.
"""
return self.iterpaths()
@staticmethod
def yield_all_file_path(dir_abspath):
"""
**中文文档**
遍历path目录下的所有文件, 返回绝对路径。
"""
if os.path.isdir(dir_abspath):
dir_abspath = os.path.abspath(dir_abspath)
for current_folder, _, fnamelist in os.walk(dir_abspath):
for fname in fnamelist:
yield os.path.join(current_folder, fname)
else:
raise FileNotFoundError(
"'%s' may not exists or is not a directory!" % dir_abspath)
@staticmethod
def yield_all_winfile(dir_abspath):
"""
**中文文档**
遍历path目录下的所有文件, 返回WinFile。
"""
for abspath in FileCollection.yield_all_file_path(dir_abspath):
yield WinFile(abspath)
@staticmethod
def yield_all_top_file_path(dir_abspath):
"""
**中文文档**
遍历path目录下的所有文件, 不包括子文件夹中的文件, 返回绝对路径。
"""
if os.path.isdir(dir_abspath):
dir_abspath = os.path.abspath(dir_abspath)
for current_folder, _, fnamelist in os.walk(dir_abspath):
for fname in fnamelist:
yield os.path.join(current_folder, fname)
break
else:
raise FileNotFoundError(
"'%s' may not exists or is not a directory!" % dir_abspath)
@staticmethod
def yield_all_top_winfile(dir_abspath):
"""
**中文文档**
遍历path目录下的所有文件, 不包括子文件夹中的文件, 返回WinFile。
"""
for abspath in FileCollection.yield_all_top_file_path(dir_abspath):
yield WinFile(abspath)
@staticmethod
def yield_all_dir_path(dir_abspath):
"""
**中文文档**
遍历dir_abspath目录下的所有子目录, 返回绝对路径。
"""
if os.path.isdir(dir_abspath):
for current_folder, folderlist, _ in os.walk(dir_abspath):
for folder in folderlist:
yield os.path.join(current_folder, folder)
else:
raise Exception(
"'%s' may not exists or is not a directory!" % dir_abspath)
@staticmethod
def yield_all_windir(dir_abspath):
"""
**中文文档**
遍历dir_abspath目录下的所有子目录, 返回绝对WinDir。
"""
for abspath in FileCollection.yield_all_dir_path(dir_abspath):
yield WinDir(abspath)
@staticmethod
def yield_all_top_dir_path(dir_abspath):
"""
**中文文档**
遍历dir_abspath目录下的所有子目录, 不包括子目录中的子目录, 返回绝对路径。
"""
if os.path.isdir(dir_abspath):
for current_folder, folderlist, _ in os.walk(dir_abspath):
for folder in folderlist:
yield os.path.join(current_folder, folder)
break
else:
raise Exception(
"'%s' may not exists or is not a directory!" % dir_abspath)
@staticmethod
def yield_all_top_windir(dir_abspath):
"""
**中文文档**
遍历dir_abspath目录下的所有子目录, 不包括子目录中的子目录, 返回绝对WinDir。
"""
for abspath in FileCollection.yield_all_top_dir_path(dir_abspath):
yield WinDir(abspath)
@staticmethod
def remove_children(list_of_abspath):
"""Remove all dir path that being children path of other dir path.
**中文文档**
去除list_of_abspath中所有目录的子目录, 保证其中的所有元素不可能为另一个
元素的子目录。
"""
sorted_list_of_abspath = list(list_of_abspath)
sorted_list_of_abspath.sort()
sorted_list_of_abspath.append("")
res = list()
temp = sorted_list_of_abspath[0]
for abspath in sorted_list_of_abspath:
if temp not in abspath:
res.append(temp)
temp = abspath
return res
@staticmethod
def from_path(list_of_dir):
"""Create a new FileCollection and add all files from ``dir_path``.
:param list_of_dir: absolute dir path, WinDir instance, list of
absolute dir path or list of WinDir instance.
**中文文档**
添加dir_path目录下的所有文件到一个新的FileCollection中.
"""
if isinstance(list_of_dir, str):
list_of_dir = [list_of_dir, ]
elif isinstance(list_of_dir, WinDir):
list_of_dir = [list_of_dir.abspath, ]
elif isinstance(list_of_dir, list):
list_of_dir = [str(i) for i in list_of_dir]
fc = FileCollection()
for dir_path in list_of_dir:
for winfile in FileCollection.yield_all_winfile(dir_path):
fc.files.setdefault(winfile.abspath, winfile)
return fc
@staticmethod
def from_path_by_criterion(dir_path, criterion, keepboth=False):
"""Create a new FileCollection, and select some files from ``dir_path``.
How to construct your own criterion function::
def filter_image(winfile):
if winfile.ext in [".jpg", ".png", ".bmp"]:
return True
else:
return False
fc = FileCollection.from_path_by_criterion(dir_path, filter_image)
:param dir_path: path of a directory
:type dir_path: string
:param criterion: customize filter function
:type criterion: function
:param keepboth: if True, returns two file collections, one is files
with criterion=True, another is False.
:type keepboth: boolean
**中文文档**
直接选取dir_path目录下所有文件, 根据criterion中的规则, 生成
FileCollection。
"""
if keepboth:
fc_yes, fc_no = FileCollection(), FileCollection()
for winfile in FileCollection.yield_all_winfile(dir_path):
if criterion(winfile):
fc_yes.files.setdefault(winfile.abspath, winfile)
else:
fc_no.files.setdefault(winfile.abspath, winfile)
return fc_yes, fc_no
else:
fc = FileCollection()
for winfile in FileCollection.yield_all_winfile(dir_path):
if criterion(winfile):
fc.files.setdefault(winfile.abspath, winfile)
return fc
@staticmethod
def from_path_except(dir_path,
ignore=list(), ignore_ext=list(), ignore_pattern=list()):
"""Create a new FileCollection, and select all files except file
matching ignore-rule::
dir_path = "your/path"
fc = FileCollection.from_path_except(
dir_path, ignore=["test"], ignore_ext=[".log", ".tmp"]
ignore_pattern=["some_pattern"])
:param dir_path: the root directory you want to start with
:param ignore: file or directory defined in this list will be ignored.
:param ignore_ext: file with extensions defined in this list will be ignored.
:param ignore_pattern: any file or directory that contains this pattern
will be ignored.
**中文文档**
选择dir_path下的所有文件, 在ignore, ignore_ext, ignore_pattern中所定义
的文件将被排除在外。
"""
ignore = [i.lower() for i in ignore]
ignore_ext = [i.lower() for i in ignore_ext]
ignore_pattern = [i.lower() for i in ignore_pattern]
def filter(winfile):
relpath = os.path.relpath(winfile.abspath, dir_path).lower()
# exclude ignore
for path in ignore:
if relpath.startswith(path):
return False
# exclude ignore extension
if winfile.ext in ignore_ext:
return False
# exclude ignore pattern
for pattern in ignore_pattern:
if pattern in relpath:
return False
return True
return FileCollection.from_path_by_criterion(
dir_path, filter, keepboth=False)
@staticmethod
def from_path_by_pattern(dir_path, pattern=list()):
"""Create a new FileCollection, and select all files except file
matching ignore-rule::
dir_path = "your/path"
fc = FileCollection.from_path_by_pattern(
dir_path, pattern=["log"])
:param dir_path: the root directory you want to start with
:param pattern: any file or directory that contains this pattern
will be selected.
**中文文档**
选择dir_path下的所有文件的相对路径中包含有pattern的文件。
"""
pattern = [i.lower() for i in pattern]
def filter(winfile):
relpath = os.path.relpath(winfile.abspath, dir_path).lower()
for p in pattern:
if p in relpath:
return True
return False
return FileCollection.from_path_by_criterion(
dir_path, filter, keepboth=False)
@staticmethod
def from_path_by_size(dir_path, min_size=0, max_size=1 << 40):
"""Create a new FileCollection, and select all files that size in
a range::
dir_path = "your/path"
# select by file size larger than 100MB
fc = FileCollection.from_path_by_size(
dir_path, min_size=100*1024*1024)
# select by file size smaller than 100MB
fc = FileCollection.from_path_by_size(
dir_path, max_size=100*1024*1024)
# select by file size from 1MB to 100MB
fc = FileCollection.from_path_by_size(
dir_path, min_size=1024*1024, max_size=100*1024*1024)
"""
def filter(winfile):
if (winfile.size_on_disk >= min_size) and \
(winfile.size_on_disk <= max_size):
return True
else:
return False
return FileCollection.from_path_by_criterion(
dir_path, filter, keepboth=False)
@staticmethod
def from_path_by_ext(dir_path, ext):
"""Create a new FileCollection, and select all files that extension
matching ``ext``::
dir_path = "your/path"
fc = FileCollection.from_path_by_ext(dir_path, ext=[".jpg", ".png"])
"""
if isinstance(ext, (list, set, dict)): # collection of extension
def filter(winfile):
if winfile.ext in ext:
return True
else:
return False
else: # str
def filter(winfile):
if winfile.ext == ext:
return True
else:
return False
return FileCollection.from_path_by_criterion(
dir_path, filter, keepboth=False)
@staticmethod
def from_path_by_md5(md5_value, list_of_dir):
"""Create a new FileCollection, and select all files' that md5 is
matching.
**中文文档**
给定一个文件使用WinFile模块获得的md5值, 在list_of_dir中的文件里,
找到与之相同的文件。
"""
def filter(winfile):
if winfile.md5 == md5_value:
return True
else:
return False
if not isinstance(list_of_dir, (list, set)):
list_of_dir = [list_of_dir, ]
init_mode = WinFile.init_mode
WinFile.use_slow_init()
fc = FileCollection()
for dir_path in list_of_dir:
for winfile in FileCollection.from_path_by_criterion(
dir_path, filter, keepboth=False).iterfiles():
fc.files.setdefault(winfile.abspath, winfile)
if init_mode == 1:
WinFile.use_fast_init()
elif init_mode == 2:
WinFile.use_regular_init()
elif init_mode == 3:
WinFile.use_slow_init()
return fc
def sort_by(self, attr_name, reverse=False):
"""Sort files by one of it's attributes.
**中文文档**
对容器内的WinFile根据其某一个属性升序或者降序排序。
"""
try:
d = dict()
for abspath, winfile in self.files.items():
d[abspath] = getattr(winfile, attr_name)
self.order = [item[0] for item in sorted(
list(d.items()), key=lambda t: t[1], reverse = reverse)]
except AttributeError:
raise ValueError("valid sortable attributes are: "
"abspath, dirname, basename, fname, ext, "
"size_on_disk, atime, ctime, mtime;")
def sort_by_abspath(self, reverse=False):
"""
**中文文档**
对WinFile根据 **绝对路径** 进行排序。
"""
self.sort_by("abspath", reverse=reverse)
def sort_by_dirname(self, reverse=False):
"""
**中文文档**
对WinFile根据 **父目录路径** 进行排序。
"""
self.sort_by("dirname", reverse=reverse)
def sort_by_fname(self, reverse=False):
"""
**中文文档**
对WinFile根据 **纯文件名** 进行排序。
"""
self.sort_by("fname", reverse=reverse)
def sort_by_ext(self, reverse=False):
"""
**中文文档**
对WinFile根据 **文件扩展名** 进行排序。
"""
self.sort_by("ext", reverse=reverse)
def sort_by_atime(self, reverse=False):
"""
**中文文档**
对WinFile根据 **文件最后一次被触碰的时间** 进行排序。
"""
self.sort_by("atime", reverse=reverse)
def sort_by_ctime(self, reverse=False):
"""
**中文文档**
对WinFile根据 **文件被创建的时间** 进行排序。
"""
self.sort_by("ctime", reverse=reverse)
def sort_by_mtime(self, reverse=False):
"""
**中文文档**
对WinFile根据 **文件最后一次被修改的时间** 进行排序。
"""
self.sort_by("mtime", reverse=reverse)
def sort_by_size(self, reverse=False):
"""
**中文文档**
对WinFile根据 **文件在硬盘上的大小** 进行排序。
"""
self.sort_by("size_on_disk", reverse=reverse)
def select(self, criterion, keepboth=False):
"""Filter current file collections, create another file collections
contains all winfile with criterion=True.
How to construct your own criterion function, see
:meth:`FileCollection.from_path_by_criterion`.
:param criterion: customize filter function
:type criterion: function
:param keepboth: if True, returns two file collections, one is files
with criterion=True, another is False.
:type keepboth: boolean
**中文文档**
在当前的文件集合中, 根据criterion中的规则, 选择需要的生成
FileCollection。当keepboth参数=True时, 返回两个FileCollection, 一个
是符合条件的文件集合, 一个是不符合条件的。
"""
if keepboth:
fcs_yes, fcs_no = FileCollection(), FileCollection()
for winfile in self.files.values():
if criterion(winfile):
fcs_yes.files[winfile.abspath] = winfile
else:
fcs_no.files[winfile.abspath] = winfile
return fcs_yes, fcs_no
else:
fcs = FileCollection()
for winfile in self.files.values():
if criterion(winfile):
fcs.files[winfile.abspath] = winfile
return fcs
def __add__(self, other_fc):
if not isinstance(other_fc, FileCollection):
raise TypeError(
"A FileCollection has to add with another FileCollection")
fc = copy.deepcopy(self)
for winfile in other_fc.iterfiles():
fc.files.setdefault(winfile.abspath, winfile)
return fc
@staticmethod
def sum(list_of_fc):
for fc in list_of_fc:
if not isinstance(fc, FileCollection):
raise TypeError("FileCollection.sum(list_of_fc) only take "
"list of FileCollection")
_fc = FileCollection()
for fc in list_of_fc:
for winfile in fc.iterfiles():
_fc.files.setdefault(winfile.abspath, winfile)
return _fc
def __sub__(self, other_fc):
if not isinstance(other_fc, FileCollection):
raise TypeError(
"A FileCollection has to add with another FileCollection")
fc = copy.deepcopy(self)
for abspath in other_fc.iterpaths():
try:
del fc.files[abspath]
except:
pass
return fc
#--- Useful recipe ---
@staticmethod
def show_big_file(dir_path, threshold):
"""Print all file path that file size greater and equal than
``#threshold``.
"""
fc = FileCollection.from_path_by_size(dir_path, min_size=threshold)
fc.sort_by("size_on_disk")
lines = list()
lines.append("Results:")
for winfile in fc.iterfiles():
lines.append(" %s - %s" %
(string_SizeInBytes(winfile.size_on_disk), winfile))
lines.append("Above are files' size greater than %s." %
string_SizeInBytes(threshold))
text = "\n".join(lines)
print(text)
with open("__show_big_file__.log", "wb") as f:
f.write(text.encode("utf-8"))
@staticmethod
def show_patterned_file(dir_path, pattern=list(), filename_only=True):
"""Print all file that file name contains ``pattern``.
"""
pattern = [i.lower() for i in pattern]
if filename_only:
def filter(winfile):
for p in pattern:
if p in winfile.fname.lower():
return True
return False
else:
def filter(winfile):
for p in pattern:
if p in winfile.abspath.lower():
return True
return False
fc = FileCollection.from_path_by_criterion(
dir_path, filter, keepboth=False)
if filename_only:
fc.sort_by("fname")
else:
fc.sort_by("abspath")
table = {p: "<%s>" % p for p in pattern}
lines = list()
lines.append("Results:")
for winfile in fc.iterfiles():
lines.append(" %s" % winfile)
if filename_only:
lines.append("Above are all files that file name contains %s" % pattern)
else:
lines.append("Above are all files that abspath contains %s" % pattern)
text = "\n".join(lines)
print(text)
with open("__show_patterned_file__.log", "wb") as f:
f.write(text.encode("utf-8"))
@staticmethod
def create_fake_mirror(src, dst):
"""Copy all dir, files from ``src`` to ``dst``. But only create a empty file
with same file name. Of course, the tree structure doesn't change.
A recipe gadget to create some test data set.
Make sure to use absolute path.
**中文文档**
复制整个src目录下的文件树结构到dst目录。但实际上并不复制内容, 只复制
文件名。即, 全是空文件, 但目录结构一致。
"""
src = os.path.abspath(src)
if not (os.path.exists(src) and (not os.path.exists(dst)) ):
raise Exception("source not exist or distination already exist")
folder_to_create = list()
file_to_create = list()
for current_folder, _, file_list in os.walk(src):
new_folder = os.path.join(dst, os.path.relpath(current_folder, src))
folder_to_create.append(new_folder)
for basename in file_list:
file_to_create.append(os.path.join(new_folder, basename))
for abspath in folder_to_create:
os.mkdir(abspath)
for abspath in file_to_create:
with open(abspath, "w") as _:
pass
|
class FileCollection(object):
'''A container class of WinFile.
Simplify file selection, removing, filtering, sorting operations.
Here's an example select all files and wrap as a WinFile::
>>> from angora.filesystem.filesystem import FileCollection
>>> fc = FileCollection.from_path("some_path")
>>> for winfile in fc.iterfiles():
... print(winfile)
**中文文档**
WinFile的专用容器, 主要用于方便的从文件夹中选取文件, 筛选文件, 并对指定文件集排序。
当然, 可以以迭代器的方式对容器内的文件对象进行访问。
'''
def __init__(self):
pass
def __str__(self):
pass
def __len__(self):
pass
def __getitem__(self, index):
'''Get the ``index``th winfile.
'''
pass
def __contains__(self, item):
'''
'''
pass
def add(self, abspath_or_winfile, enable_verbose=True):
'''Add absolute path or WinFile to FileCollection.
'''
pass
def remove(self, abspath_or_winfile, enable_verbose=True):
'''Remove absolute path or WinFile from FileCollection.
'''
pass
@property
def howmany(self):
'''An alias of __len__() method.
'''
pass
def iterfiles(self):
'''Yield all WinFile object.
'''
pass
def iterpaths(self):
'''Yield all WinFile's absolute path.
'''
pass
def __iter__(self):
'''Default iterator is to yield absolute paht only.
'''
pass
@staticmethod
def yield_all_file_path(dir_abspath):
'''
**中文文档**
遍历path目录下的所有文件, 返回绝对路径。
'''
pass
@staticmethod
def yield_all_winfile(dir_abspath):
'''
**中文文档**
遍历path目录下的所有文件, 返回WinFile。
'''
pass
@staticmethod
def yield_all_top_file_path(dir_abspath):
'''
**中文文档**
遍历path目录下的所有文件, 不包括子文件夹中的文件, 返回绝对路径。
'''
pass
@staticmethod
def yield_all_top_winfile(dir_abspath):
'''
**中文文档**
遍历path目录下的所有文件, 不包括子文件夹中的文件, 返回WinFile。
'''
pass
@staticmethod
def yield_all_dir_path(dir_abspath):
'''
**中文文档**
遍历dir_abspath目录下的所有子目录, 返回绝对路径。
'''
pass
@staticmethod
def yield_all_windir(dir_abspath):
'''
**中文文档**
遍历dir_abspath目录下的所有子目录, 返回绝对WinDir。
'''
pass
@staticmethod
def yield_all_top_dir_path(dir_abspath):
'''
**中文文档**
遍历dir_abspath目录下的所有子目录, 不包括子目录中的子目录, 返回绝对路径。
'''
pass
@staticmethod
def yield_all_top_windir(dir_abspath):
'''
**中文文档**
遍历dir_abspath目录下的所有子目录, 不包括子目录中的子目录, 返回绝对WinDir。
'''
pass
@staticmethod
def remove_children(list_of_abspath):
'''Remove all dir path that being children path of other dir path.
**中文文档**
去除list_of_abspath中所有目录的子目录, 保证其中的所有元素不可能为另一个
元素的子目录。
'''
pass
@staticmethod
def from_path(list_of_dir):
'''Create a new FileCollection and add all files from ``dir_path``.
:param list_of_dir: absolute dir path, WinDir instance, list of
absolute dir path or list of WinDir instance.
**中文文档**
添加dir_path目录下的所有文件到一个新的FileCollection中.
'''
pass
@staticmethod
def from_path_by_criterion(dir_path, criterion, keepboth=False):
'''Create a new FileCollection, and select some files from ``dir_path``.
How to construct your own criterion function::
def filter_image(winfile):
if winfile.ext in [".jpg", ".png", ".bmp"]:
return True
else:
return False
fc = FileCollection.from_path_by_criterion(dir_path, filter_image)
:param dir_path: path of a directory
:type dir_path: string
:param criterion: customize filter function
:type criterion: function
:param keepboth: if True, returns two file collections, one is files
with criterion=True, another is False.
:type keepboth: boolean
**中文文档**
直接选取dir_path目录下所有文件, 根据criterion中的规则, 生成
FileCollection。
'''
pass
@staticmethod
def from_path_except(dir_path,
ignore=list(), ignore_ext=list(), ignore_pattern=list()):
'''Create a new FileCollection, and select all files except file
matching ignore-rule::
dir_path = "your/path"
fc = FileCollection.from_path_except(
dir_path, ignore=["test"], ignore_ext=[".log", ".tmp"]
ignore_pattern=["some_pattern"])
:param dir_path: the root directory you want to start with
:param ignore: file or directory defined in this list will be ignored.
:param ignore_ext: file with extensions defined in this list will be ignored.
:param ignore_pattern: any file or directory that contains this pattern
will be ignored.
**中文文档**
选择dir_path下的所有文件, 在ignore, ignore_ext, ignore_pattern中所定义
的文件将被排除在外。
'''
pass
def filter_image(winfile):
pass
@staticmethod
def from_path_by_pattern(dir_path, pattern=list()):
'''Create a new FileCollection, and select all files except file
matching ignore-rule::
dir_path = "your/path"
fc = FileCollection.from_path_by_pattern(
dir_path, pattern=["log"])
:param dir_path: the root directory you want to start with
:param pattern: any file or directory that contains this pattern
will be selected.
**中文文档**
选择dir_path下的所有文件的相对路径中包含有pattern的文件。
'''
pass
def filter_image(winfile):
pass
@staticmethod
def from_path_by_size(dir_path, min_size=0, max_size=1 << 40):
'''Create a new FileCollection, and select all files that size in
a range::
dir_path = "your/path"
# select by file size larger than 100MB
fc = FileCollection.from_path_by_size(
dir_path, min_size=100*1024*1024)
# select by file size smaller than 100MB
fc = FileCollection.from_path_by_size(
dir_path, max_size=100*1024*1024)
# select by file size from 1MB to 100MB
fc = FileCollection.from_path_by_size(
dir_path, min_size=1024*1024, max_size=100*1024*1024)
'''
pass
def filter_image(winfile):
pass
@staticmethod
def from_path_by_ext(dir_path, ext):
'''Create a new FileCollection, and select all files that extension
matching ``ext``::
dir_path = "your/path"
fc = FileCollection.from_path_by_ext(dir_path, ext=[".jpg", ".png"])
'''
pass
def filter_image(winfile):
pass
def filter_image(winfile):
pass
@staticmethod
def from_path_by_md5(md5_value, list_of_dir):
'''Create a new FileCollection, and select all files' that md5 is
matching.
**中文文档**
给定一个文件使用WinFile模块获得的md5值, 在list_of_dir中的文件里,
找到与之相同的文件。
'''
pass
def filter_image(winfile):
pass
def sort_by(self, attr_name, reverse=False):
'''Sort files by one of it's attributes.
**中文文档**
对容器内的WinFile根据其某一个属性升序或者降序排序。
'''
pass
def sort_by_abspath(self, reverse=False):
'''
**中文文档**
对WinFile根据 **绝对路径** 进行排序。
'''
pass
def sort_by_dirname(self, reverse=False):
'''
**中文文档**
对WinFile根据 **父目录路径** 进行排序。
'''
pass
def sort_by_fname(self, reverse=False):
'''
**中文文档**
对WinFile根据 **纯文件名** 进行排序。
'''
pass
def sort_by_ext(self, reverse=False):
'''
**中文文档**
对WinFile根据 **文件扩展名** 进行排序。
'''
pass
def sort_by_atime(self, reverse=False):
'''
**中文文档**
对WinFile根据 **文件最后一次被触碰的时间** 进行排序。
'''
pass
def sort_by_ctime(self, reverse=False):
'''
**中文文档**
对WinFile根据 **文件被创建的时间** 进行排序。
'''
pass
def sort_by_mtime(self, reverse=False):
'''
**中文文档**
对WinFile根据 **文件最后一次被修改的时间** 进行排序。
'''
pass
def sort_by_size(self, reverse=False):
'''
**中文文档**
对WinFile根据 **文件在硬盘上的大小** 进行排序。
'''
pass
def select(self, criterion, keepboth=False):
'''Filter current file collections, create another file collections
contains all winfile with criterion=True.
How to construct your own criterion function, see
:meth:`FileCollection.from_path_by_criterion`.
:param criterion: customize filter function
:type criterion: function
:param keepboth: if True, returns two file collections, one is files
with criterion=True, another is False.
:type keepboth: boolean
**中文文档**
在当前的文件集合中, 根据criterion中的规则, 选择需要的生成
FileCollection。当keepboth参数=True时, 返回两个FileCollection, 一个
是符合条件的文件集合, 一个是不符合条件的。
'''
pass
def __add__(self, other_fc):
pass
@staticmethod
def sum(list_of_fc):
pass
def __sub__(self, other_fc):
pass
@staticmethod
def show_big_file(dir_path, threshold):
'''Print all file path that file size greater and equal than
``#threshold``.
'''
pass
@staticmethod
def show_patterned_file(dir_path, pattern=list(), filename_only=True):
'''Print all file that file name contains ``pattern``.
'''
pass
def filter_image(winfile):
pass
def filter_image(winfile):
pass
@staticmethod
def create_fake_mirror(src, dst):
'''Copy all dir, files from ``src`` to ``dst``. But only create a empty file
with same file name. Of course, the tree structure doesn't change.
A recipe gadget to create some test data set.
Make sure to use absolute path.
**中文文档**
复制整个src目录下的文件树结构到dst目录。但实际上并不复制内容, 只复制
文件名。即, 全是空文件, 但目录结构一致。
'''
pass
| 73 | 38 | 14 | 2 | 8 | 4 | 3 | 0.54 | 1 | 13 | 2 | 0 | 23 | 2 | 43 | 43 | 751 | 146 | 399 | 143 | 325 | 216 | 325 | 119 | 273 | 7 | 1 | 3 | 153 |
147,866 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/hashes/fingerprint.py
|
angora.hashes.fingerprint.FingerPrint
|
class FingerPrint(object):
"""A hashlib wrapper class allow you to use one line to do hash as you wish.
Usage::
>>> from weatherlab.lib.hashes.fingerprint import fingerprint
>>> print(fingerprint.of_bytes(bytes(123)))
b1fec41621e338896e2d26f232a6b006
>>> print(fingerprint.of_text("message"))
78e731027d8fd50ed642340b7c9a63b3
>>> print(fingerprint.of_pyobj({"key": "value"}))
4c502ab399c89c8758a2d8c37be98f69
>>> print(fingerprint.of_file("fingerprint.py"))
4cddcb5562cbff652b0e4c8a0300337a
"""
_chunk_size = 2**20
def __init__(self):
self.default_hash_method = hashlib.md5
self.return_int = False
def digest_to_int(self, digest):
"""Convert hexdigest str to int.
"""
return int(digest, 16)
def use(self, algorithm):
"""Change the hash algorithm you gonna use.
"""
algorithm = algorithm.lower()
if algorithm == "md5":
self.default_hash_method = hashlib.md5
elif algorithm == "sha1":
self.default_hash_method = hashlib.sha1
elif algorithm == "sha224":
self.default_hash_method = hashlib.sha224
elif algorithm == "sha256":
self.default_hash_method = hashlib.sha256
elif algorithm == "sha384":
self.default_hash_method = hashlib.sha384
elif algorithm == "sha512":
self.default_hash_method = hashlib.sha512
else:
raise WrongHashAlgorithmError("There's no algorithm names '%s'! "
"use one of 'md5', 'sha1', 'sha224', "
"'sha256', 'sha384', 'sha512'." % algorithm)
def set_return_int(self, flag):
"""If flag = False, return hex string, if True, return integer.
default = False.
"""
self.return_int = bool(flag)
def of_bytes(self, py_bytes):
"""Use default hash method to return hash value of bytes.
"""
m = self.default_hash_method()
m.update(py_bytes)
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest()
def of_text(self, text, encoding="utf-8"):
"""Use default hash method to return hash value of a piece of string
default setting use 'utf-8' encoding.
"""
m = self.default_hash_method()
m.update(text.encode(encoding))
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest()
def of_pyobj(self, pyobj):
"""Use default hash method to return hash value of a piece of Python
picklable object.
"""
m = self.default_hash_method()
m.update(pickle.dumps(pyobj, protocol=pk_protocol))
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest()
def of_file(self, abspath, nbytes=0):
"""Use default hash method to return hash value of a piece of a file
Estimate processing time on:
:param abspath: the absolute path to the file
:param nbytes: only has first N bytes of the file. if 0, hash all file
CPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
ATTENTION:
if you change the meta data (for example, the title, years
information in audio, video) of a multi-media file, then the hash
value gonna also change.
"""
if not os.path.exists(abspath):
raise FileNotFoundError(
"[Errno 2] No such file or directory: '%s'" % abspath)
m = self.default_hash_method()
with open(abspath, "rb") as f:
if nbytes:
data = f.read(nbytes)
if data:
m.update(data)
else:
while True:
data = f.read(self._chunk_size)
if not data:
break
m.update(data)
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest()
|
class FingerPrint(object):
'''A hashlib wrapper class allow you to use one line to do hash as you wish.
Usage::
>>> from weatherlab.lib.hashes.fingerprint import fingerprint
>>> print(fingerprint.of_bytes(bytes(123)))
b1fec41621e338896e2d26f232a6b006
>>> print(fingerprint.of_text("message"))
78e731027d8fd50ed642340b7c9a63b3
>>> print(fingerprint.of_pyobj({"key": "value"}))
4c502ab399c89c8758a2d8c37be98f69
>>> print(fingerprint.of_file("fingerprint.py"))
4cddcb5562cbff652b0e4c8a0300337a
'''
def __init__(self):
pass
def digest_to_int(self, digest):
'''Convert hexdigest str to int.
'''
pass
def use(self, algorithm):
'''Change the hash algorithm you gonna use.
'''
pass
def set_return_int(self, flag):
'''If flag = False, return hex string, if True, return integer.
default = False.
'''
pass
def of_bytes(self, py_bytes):
'''Use default hash method to return hash value of bytes.
'''
pass
def of_text(self, text, encoding="utf-8"):
'''Use default hash method to return hash value of a piece of string
default setting use 'utf-8' encoding.
'''
pass
def of_pyobj(self, pyobj):
'''Use default hash method to return hash value of a piece of Python
picklable object.
'''
pass
def of_file(self, abspath, nbytes=0):
'''Use default hash method to return hash value of a piece of a file
Estimate processing time on:
:param abspath: the absolute path to the file
:param nbytes: only has first N bytes of the file. if 0, hash all file
CPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
ATTENTION:
if you change the meta data (for example, the title, years
information in audio, video) of a multi-media file, then the hash
value gonna also change.
'''
pass
| 9 | 8 | 13 | 1 | 8 | 4 | 3 | 0.63 | 1 | 4 | 1 | 0 | 8 | 2 | 8 | 8 | 130 | 19 | 68 | 18 | 59 | 43 | 54 | 17 | 45 | 7 | 1 | 4 | 23 |
147,867 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/baseclass/classtree.py
|
angora.baseclass.classtree.Unittest.test_all.PersonCollection
|
class PersonCollection(classtree.Base):
def __init__(self, collection_id=None, create_date=None, name=None):
self.collection_id = collection_id
self.create_date = create_date
self.name = name
person = Person(name='Jack', person_id=1)
self.name____Jack = person
self.person_id____1 = person
person = Person(name='Paul', person_id=2)
self.name____Paul = person
self.person_id____2 = person
|
class PersonCollection(classtree.Base):
def __init__(self, collection_id=None, create_date=None, name=None):
pass
| 2 | 0 | 12 | 2 | 10 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 7 | 1 | 1 | 13 | 2 | 11 | 10 | 9 | 0 | 11 | 10 | 9 | 1 | 1 | 0 | 1 |
147,868 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/gadget/pytimer.py
|
angora.gadget.pytimer.Timer
|
class Timer(object):
"""Timer makes time measurement easy.
"""
def __init__(self):
self.elapse = 0.0
self.records = list()
@property
def total_elapse(self):
return sum(self.records)
# === single time, multiple time measurement ===
def start(self):
"""Start measuring.
"""
self.st = time.clock()
def stop(self):
"""Save last elapse time to self.records.
"""
self.elapse = time.clock() - self.st
self.records.append(self.elapse)
def timeup(self):
"""Print the last measurement elapse time, and return it.
"""
self.stop()
print("elapse %0.6f seconds" % self.elapse)
def click(self):
"""Record the last elapse time and start the next measurement.
"""
self.stop()
self.start()
def display(self):
"""Print the last elapse time.
"""
print("elapse %0.6f sec" % self.elapse)
def display_all(self):
"""Print detailed information.
"""
print( ("total elapse %0.6f seconds, last elapse %0.6f seconds, "
"took %s times measurement") % (
self.total_elapse, self.elapse, len(self.records)))
def reset(self):
"""Reset the timer.
"""
self.elapse = 0.0
self.records.clear()
# === function runtime measurement ===
@staticmethod
def test(func, howmany=1):
"""Run function speed test #howmany times, and display the: average, total, repeat times.
you can call this simply by Timer.test(func)
for more complicate case, use standard library
'timeit <https://docs.python.org/2/library/timeit.html>'_
"""
elapse = timeit.Timer(func).timeit(howmany)
print("average = %0.6f seconds, total = %0.6f seconds, repeat %s times." % (
elapse/howmany, elapse, howmany) )
|
class Timer(object):
'''Timer makes time measurement easy.
'''
def __init__(self):
pass
@property
def total_elapse(self):
pass
def start(self):
'''Start measuring.
'''
pass
def stop(self):
'''Save last elapse time to self.records.
'''
pass
def timeup(self):
'''Print the last measurement elapse time, and return it.
'''
pass
def click(self):
'''Record the last elapse time and start the next measurement.
'''
pass
def display(self):
'''Print the last elapse time.
'''
pass
def display_all(self):
'''Print detailed information.
'''
pass
def reset(self):
'''Reset the timer.
'''
pass
@staticmethod
def test(func, howmany=1):
'''Run function speed test #howmany times, and display the: average, total, repeat times.
you can call this simply by Timer.test(func)
for more complicate case, use standard library
'timeit <https://docs.python.org/2/library/timeit.html>'_
'''
pass
| 13 | 9 | 5 | 0 | 3 | 2 | 1 | 0.72 | 1 | 2 | 0 | 0 | 9 | 3 | 10 | 10 | 65 | 10 | 32 | 17 | 19 | 23 | 27 | 15 | 16 | 1 | 1 | 0 | 10 |
147,869 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/timelib/timewrapper.py
|
angora.timelib.timewrapper.TimeWrapperUnittest
|
class TimeWrapperUnittest(unittest.TestCase):
def test_reformat(self):
self.assertEqual(timewrapper.reformat("2014-01-05", "%Y-%m-%d", "%d/%m/%Y"),
"05/01/2014")
self.assertEqual(timewrapper.reformat("2014-01-05 19:45:32", "%Y-%m-%d %H:%M:%S", "%Y/%m/%d"),
"2014/01/05")
def test_str2date(self):
self.assertEqual(timewrapper.std_datestr("September 20, 2014"),
"2014-09-20")
self.assertEqual(timewrapper.std_datestr("Sep 20, 2014"),
"2014-09-20")
self.assertRaises(NoMatchingTemplateError,
timewrapper.std_datestr, "[2014][05][01]")
def test_str2datetime(self):
self.assertEqual(
timewrapper.std_datetimestr("2014-07-13 8:12:34"),
"2014-07-13 08:12:34",
)
self.assertEqual(
timewrapper.std_datetimestr("2014-07-13 8:12:34 PM"),
"2014-07-13 20:12:34",
)
self.assertRaises(
NoMatchingTemplateError,
timewrapper.std_datetimestr, "[2014][07][13]",
)
def test_toordinal_fromordinal(self):
a_date = date(1920, 8, 23)
self.assertEqual(a_date.toordinal(),
timewrapper.toordinal(a_date))
self.assertEqual(date.fromordinal(701135),
timewrapper.fromordinal(701135))
def test_totimestamp_fromtimestamp(self):
"""test totimestamp and fromtimestamp method
"""
a_datetime = datetime(1969, 12, 31, 19, 0, 1)
if sys.version_info[0] == 3:
self.assertEqual(a_datetime.timestamp(),
timewrapper.totimestamp(a_datetime))
else:
self.assertEqual(1,
timewrapper.totimestamp(a_datetime))
self.assertEqual(datetime.fromtimestamp(1),
timewrapper.fromtimestamp(1))
a_datetime = datetime(1924, 2, 19, 12, 0, 0)
self.assertEqual(a_datetime,
timewrapper.fromtimestamp(timewrapper.totimestamp(a_datetime)))
def test_parser(self):
"""test universal parser.
"""
# parse_date
self.assertEqual(
timewrapper.parse_date("10-1-1949"), date(1949, 10, 1))
self.assertEqual(
timewrapper.parse_date(711766), date(1949, 10, 1))
self.assertEqual(
timewrapper.parse_date(datetime(1949, 10, 1, 8, 15, 0)),
date(1949, 10, 1))
# parse_datetime
self.assertEqual(
timewrapper.parse_datetime("1949-10-1 8:15:00"),
datetime(1949, 10, 1, 8, 15),
)
self.assertEqual(
timewrapper.parse_datetime(-1.0),
datetime(1969, 12, 31, 18, 59, 59),
)
self.assertEqual(
timewrapper.parse_datetime(1.0),
datetime(1969, 12, 31, 19, 0, 1),
)
self.assertEqual(
timewrapper.parse_datetime(date(1949, 10, 1)),
datetime(1949, 10, 1),
)
def test_dtime_range(self):
"""test dtime_range generator method
"""
# test start + end
self.assertListEqual(
[
datetime(2014, 1, 1, 3, 0, 0),
datetime(2014, 1, 1, 3, 5, 0),
datetime(2014, 1, 1, 3, 10, 0),
],
list(timewrapper.dtime_range(
start="2014-01-01 03:00:00",
end="2014-01-01 03:10:00",
freq="5min")),
)
# test start + periods
self.assertListEqual(
[
datetime(2014, 1, 1, 3, 0, 0),
datetime(2014, 1, 1, 3, 5, 0),
datetime(2014, 1, 1, 3, 10, 0),
],
list(timewrapper.dtime_range(
start="2014-01-01 03:00:00",
periods=3,
freq="5min")),
)
# test end + periods
self.assertListEqual(
[
datetime(2014, 1, 1, 3, 0, 0),
datetime(2014, 1, 1, 3, 5, 0),
datetime(2014, 1, 1, 3, 10, 0),
],
list(timewrapper.dtime_range(
end="2014-01-01 03:10:00",
periods=3,
freq="5min")),
)
# test take datetime as input
self.assertListEqual(
[
datetime(2014, 1, 1, 3, 0, 0),
datetime(2014, 1, 1, 3, 5, 0),
datetime(2014, 1, 1, 3, 10, 0),
],
list(timewrapper.dtime_range(
start=datetime(2014, 1, 1, 3, 0, 0),
end=datetime(2014, 1, 1, 3, 10, 0),
freq="5min")),
)
def test_weekday_series(self):
self.assertListEqual(
timewrapper.weekday_series(
"2014-01-01 06:30:25", "2014-02-01 06:30:25", weekday=2),
[
datetime(2014, 1, 7, 6, 30, 25),
datetime(2014, 1, 14, 6, 30, 25),
datetime(2014, 1, 21, 6, 30, 25),
datetime(2014, 1, 28, 6, 30, 25),
],
)
def test_day_month_year_interval(self):
# === day_interval ===
# with no mode argument
self.assertTupleEqual(
timewrapper.day_interval(2014, 3, 5, return_str=True),
("2014-03-05 00:00:00", "2014-03-05 23:59:59")
)
# datetime mode
self.assertTupleEqual(
timewrapper.day_interval(2014, 12, 31, return_str=False),
(datetime(2014, 12, 31, 0, 0, 0),
datetime(2014, 12, 31, 23, 59, 59))
)
# === month_interval ===
self.assertTupleEqual(
timewrapper.month_interval(2014, 3, return_str=True),
("2014-03-01 00:00:00", "2014-03-31 23:59:59")
)
self.assertTupleEqual(
timewrapper.month_interval(2014, 12, return_str=False),
(datetime(2014, 12, 1, 0, 0, 0), datetime(2014, 12, 31, 23, 59, 59))
)
# === year interval ===
self.assertTupleEqual(
timewrapper.year_interval(2014, return_str=True),
("2014-01-01 00:00:00", "2014-12-31 23:59:59")
)
self.assertTupleEqual(
timewrapper.year_interval(2014, return_str=False),
(datetime(2014, 1, 1, 0, 0, 0), datetime(2014, 12, 31, 23, 59, 59))
)
def test_randdate_randdatetime(self):
# test random date is between the boundary
a_date = timewrapper.randdate("2014-01-01", date(2014, 1, 31))
self.assertGreaterEqual(a_date, date(2014, 1, 1))
self.assertLessEqual(a_date, date(2014, 1, 31))
# test random datetime is between the boundary
a_datetime = timewrapper.randdatetime(
"2014-01-01", datetime(2014, 1, 31, 23, 59, 59))
self.assertGreaterEqual(a_datetime, datetime(2014, 1, 1, 0, 0, 0))
self.assertLessEqual(a_datetime, datetime(2014, 1, 31, 23, 59, 59))
def test_add_seconds_minutes_hours(self):
self.assertEqual(timewrapper.add_seconds("2014-01-01", 1),
datetime(2014, 1, 1, 0, 0, 1))
self.assertEqual(timewrapper.add_minutes("2014-01-01", 1),
datetime(2014, 1, 1, 0, 1, 0))
self.assertEqual(timewrapper.add_hours("2014-01-01", 1),
datetime(2014, 1, 1, 1, 0, 0))
self.assertEqual(timewrapper.add_days("2014-01-01 18:30:25", 1),
datetime(2014, 1, 2, 18, 30, 25))
def test_add_months(self):
self.assertEqual(timewrapper.add_months("2012-03-31", -1),
datetime(2012, 2, 29))
self.assertEqual(timewrapper.add_months("2012-03-30", -1),
datetime(2012, 2, 29))
self.assertEqual(timewrapper.add_months("2012-03-29", -1),
datetime(2012, 2, 29))
def test_add_years(self):
self.assertEqual(timewrapper.add_years("2012-02-29", -1),
datetime(2011, 2, 28))
self.assertEqual(timewrapper.add_years("2012-02-29", -1),
datetime(2011, 2, 28))
self.assertEqual(timewrapper.add_years("2012-02-29", -1),
datetime(2011, 2, 28))
def test_round_to_specified_time(self):
self.assertEqual(
timewrapper.round_to_specified_time(
datetime(2014, 6, 1, 12, 50, 23),
hour=0, minute=0, second=0, mode="lower"
),
datetime(2014, 6, 1),
)
self.assertEqual(
timewrapper.round_to_specified_time(
datetime(2014, 6, 1, 12, 50, 23),
hour=0, minute=0, second=0, mode="upper"
),
datetime(2014, 6, 2),
)
|
class TimeWrapperUnittest(unittest.TestCase):
def test_reformat(self):
pass
def test_str2date(self):
pass
def test_str2datetime(self):
pass
def test_toordinal_fromordinal(self):
pass
def test_totimestamp_fromtimestamp(self):
'''test totimestamp and fromtimestamp method
'''
pass
def test_parser(self):
'''test universal parser.
'''
pass
def test_dtime_range(self):
'''test dtime_range generator method
'''
pass
def test_weekday_series(self):
pass
def test_day_month_year_interval(self):
pass
def test_randdate_randdatetime(self):
pass
def test_add_seconds_minutes_hours(self):
pass
def test_add_months(self):
pass
def test_add_years(self):
pass
def test_round_to_specified_time(self):
pass
| 15 | 3 | 16 | 1 | 14 | 1 | 1 | 0.1 | 1 | 4 | 1 | 0 | 14 | 0 | 14 | 86 | 243 | 29 | 195 | 19 | 180 | 19 | 69 | 19 | 54 | 2 | 2 | 1 | 15 |
147,870 |
MacHu-GWU/angora-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_angora-project/angora/timelib/timewrapper.py
|
angora.timelib.timewrapper.TemplateUnittest
|
class TemplateUnittest(unittest.TestCase):
def test_all(self):
for pattern, example in _DATE_TEMPLATE.items():
datetime.strptime(example, pattern).date()
for pattern, example in _DATETIME_TEMPLATE.items():
datetime.strptime(example, pattern)
|
class TemplateUnittest(unittest.TestCase):
def test_all(self):
pass
| 2 | 0 | 6 | 1 | 5 | 0 | 3 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 73 | 7 | 1 | 6 | 3 | 4 | 0 | 6 | 3 | 4 | 3 | 2 | 1 | 3 |
147,871 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/filesystem/filesystem.py
|
angora.filesystem.filesystem.FileFilter
|
class FileFilter(object):
"""File filter container class.
"""
@staticmethod
def image(winfile):
if winfile.ext in [".jpg", ".jpeg", ".png", ".gif", ".tiff",
".bmp", ".ppm", ".pgm", ".pbm", ".pnm", ".svg"]:
return True
else:
return False
@staticmethod
def audio(winfile):
if winfile.ext in [".mp3", ".mp4", ".aac", ".m4a", ".wma",
".wav", ".ape", ".tak", ".tta",
".3gp", ".webm", ".ogg",]:
return True
else:
return False
@staticmethod
def video(winfile):
if winfile.ext in [".avi", ".wmv", ".mkv", ".mp4", ".flv",
".vob", ".mov", ".rm", ".rmvb", "3gp", ".3g2", ".nsv", ".webm",
".mpg", ".mpeg", ".m4v", ".iso",]:
return True
else:
return False
@staticmethod
def pdf(winfile):
if winfile.ext == ".pdf":
return True
else:
return False
@staticmethod
def word(winfile):
if winfile.ext == ".doc":
return True
else:
return False
@staticmethod
def excel(winfile):
if winfile.ext == ".xlsx":
return True
else:
return False
@staticmethod
def ppt(winfile):
if winfile.ext == ".ppt":
return True
else:
return False
|
class FileFilter(object):
'''File filter container class.
'''
@staticmethod
def image(winfile):
pass
@staticmethod
def audio(winfile):
pass
@staticmethod
def video(winfile):
pass
@staticmethod
def pdf(winfile):
pass
@staticmethod
def word(winfile):
pass
@staticmethod
def excel(winfile):
pass
@staticmethod
def ppt(winfile):
pass
| 15 | 1 | 6 | 0 | 6 | 0 | 2 | 0.04 | 1 | 0 | 0 | 0 | 0 | 0 | 7 | 7 | 56 | 6 | 48 | 15 | 33 | 2 | 29 | 8 | 21 | 2 | 1 | 1 | 14 |
147,872 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/gadget/messenger.py
|
angora.gadget.messenger.Messenger
|
class Messenger(object):
"""Messenger is an utility to easily disable or enable all your ``print()``
function.
Sometime you may have a lots of ``print("something")`` in your script. But
how about if you want to disable them all or part of them? Usually we have
to comment them all.
Now you can call ``Messenger.off()`` to disable all ``print("your message")``.
Similarly, you can call ``Messenger.on()`` to enable print function.
Usage guide:
- create an instance: ``messenger = Messenger()``
- replace all your print("your message") with messenger.show("your message)
- call ``messenger.off()`` to disable all ``messenger.show()``
- call ``messenger.on()`` to enable all ``messenger.show()``
**中文文档**
在Python程序中为了调试方便, 我们通常会有大量的 ``print()``。但如果我们想要一次
性禁用大量的打印功能, 我们就需要很麻烦的注释掉许多print()。
Messenger解决这一问题的思路是:
每当我们想要用 ``print()`` 的时候, 我们可以使用Messenger.show("your message")
我们只需要调用Messenger.off()即可禁用之后所有的打印功能。同样如果需要恢复
打印功能, 我们只需要调用Messenger.on()即可。
"""
def __init__(self, enable_verbose=True):
"""echo=False to disable all Messenger.show()
"""
self.enable_verbose = enable_verbose
if self.enable_verbose:
self.show = self._print_screen
else:
self.show = self._not_print_screen
def _print_screen(self, text):
print(text)
def _not_print_screen(self, text):
pass
def on(self):
"""enable Messenger.show()"""
self.show = self._print_screen
def off(self):
"""disable Messenger.show()"""
self.show = self._not_print_screen
|
class Messenger(object):
'''Messenger is an utility to easily disable or enable all your ``print()``
function.
Sometime you may have a lots of ``print("something")`` in your script. But
how about if you want to disable them all or part of them? Usually we have
to comment them all.
Now you can call ``Messenger.off()`` to disable all ``print("your message")``.
Similarly, you can call ``Messenger.on()`` to enable print function.
Usage guide:
- create an instance: ``messenger = Messenger()``
- replace all your print("your message") with messenger.show("your message)
- call ``messenger.off()`` to disable all ``messenger.show()``
- call ``messenger.on()`` to enable all ``messenger.show()``
**中文文档**
在Python程序中为了调试方便, 我们通常会有大量的 ``print()``。但如果我们想要一次
性禁用大量的打印功能, 我们就需要很麻烦的注释掉许多print()。
Messenger解决这一问题的思路是:
每当我们想要用 ``print()`` 的时候, 我们可以使用Messenger.show("your message")
我们只需要调用Messenger.off()即可禁用之后所有的打印功能。同样如果需要恢复
打印功能, 我们只需要调用Messenger.on()即可。
'''
def __init__(self, enable_verbose=True):
'''echo=False to disable all Messenger.show()
'''
pass
def _print_screen(self, text):
pass
def _not_print_screen(self, text):
pass
def on(self):
'''enable Messenger.show()'''
pass
def off(self):
'''disable Messenger.show()'''
pass
| 6 | 4 | 4 | 0 | 3 | 1 | 1 | 1.6 | 1 | 0 | 0 | 0 | 5 | 2 | 5 | 5 | 53 | 14 | 15 | 8 | 9 | 24 | 14 | 8 | 8 | 2 | 1 | 1 | 6 |
147,873 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/baseclass/nameddict.py
|
angora.baseclass.nameddict.Base
|
class Base(object):
"""nameddict base class.
if you really care about performance, use collections.namedtuple.
"""
def __init__(self, **kwargs):
for attr, value in kwargs.items():
object.__setattr__(self, attr, value)
def __repr__(self):
kwargs = list()
for attr, value in self.items():
kwargs.append("%s=%r" % (attr, value))
return "%s(%s)" % (self.__class__.__name__, ", ".join(kwargs))
@classmethod
def _make(cls, d):
return cls(**d)
def keys(self):
return [key for key, value in self.items()]
def values(self):
return [value for key, value in self.items()]
def items(self):
return sorted(self.__dict__.items(), key=lambda x: x[0])
def to_dict(self):
return self.__dict__
|
class Base(object):
'''nameddict base class.
if you really care about performance, use collections.namedtuple.
'''
def __init__(self, **kwargs):
pass
def __repr__(self):
pass
@classmethod
def _make(cls, d):
pass
def keys(self):
pass
def values(self):
pass
def items(self):
pass
def to_dict(self):
pass
| 9 | 1 | 3 | 0 | 3 | 0 | 1 | 0.15 | 1 | 1 | 0 | 2 | 6 | 0 | 7 | 7 | 31 | 8 | 20 | 14 | 11 | 3 | 19 | 11 | 11 | 2 | 1 | 1 | 9 |
147,874 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/baseclass/classtree.py
|
angora.baseclass.classtree.CodeGenerator
|
class CodeGenerator(object):
"""Worker class
"""
def __init__(self, tab=" ", indent=0):
self.Tab = tab
self.Tab2 = tab * 2
self.indent = indent
self.basename = "classtree.Base"
self._classes = dict()
self.classes = set()
self.lines = [
"#!/usr/bin/env python",
"# -*- coding: utf-8 -*-",
"",
"import datetime",
"from angora.baseclass import classtree",
]
def reset(self):
self._classes = dict()
self.classes = set()
self.lines = [
"#!/usr/bin/env python",
"# -*- coding: utf-8 -*-",
"",
"import datetime",
"from angora.baseclass import classtree",
]
def pre_process(self, class_data):
classname = self.formatted_classname(class_data["classname"])
self._classes[classname] = set()
for key in class_data.get("metadata", dict()):
self._classes[classname].add(key)
for subclass_data in class_data.get("subclass", list()):
self.pre_process(subclass_data)
def sort_metadata(self):
for k, v in self._classes.items():
self._classes[k] = list(v)
self._classes[k].sort()
@property
def code(self):
return "\n".join([self.Tab * self.indent + line for line in self.lines])
def formatted_classname(self, text):
return text[0].upper() + text[1:]
def formatted_instancename(self, text):
return text[0].lower() + text[1:]
def sorted_dict(self, d):
return sorted(d.items(), key=lambda x: x[0], reverse=False)
def repr_def_class(self, class_data):
"""Create code like this::
class Person(Base):
def __init__(self, person_id=None, name=None):
self.person_id = person_id
self.name = name
"""
classname = self.formatted_classname(class_data["classname"])
if classname not in self.classes:
self.lines.append("")
self.lines.append("class %s(%s):" % (classname, self.basename))
kwargs = list()
setattr_arguments = list()
for attr in self._classes[classname]:
kwargs.append("%s=None" % attr)
setattr_arguments.append(
self.Tab2 + "self.%s = %s" % (attr, attr))
if len(kwargs):
line = self.Tab + "def __init__(self, %s):" % ", ".join(kwargs)
else:
line = self.Tab + "def __init__(self):"
self.lines.append(line)
for setattr_argument in setattr_arguments:
self.lines.append(setattr_argument)
if len(setattr_arguments):
self.lines.append("")
self.classes.add(classname)
def repr_new_instance(self, class_data):
"""Create code like this::
person = Person(name='Jack', person_id=1)
"""
classname = self.formatted_classname(class_data["classname"])
instancename = self.formatted_instancename(class_data["classname"])
arguments = list()
for key, value in self.sorted_dict(class_data.get("metadata", dict())):
arguments.append("%s=%r" % (key, value))
return "%s = %s(%s)" % (
instancename, classname, ", ".join(arguments))
def repr_setattr(self, class_data):
"""Create code like this::
person = Person(name='Jack', person_id=1)
self.name____Jack = person
self.person_id____1 = person
person = Person(name='Paul', person_id=2)
self.name____Paul = person
self.person_id____2 = person
"""
def get_indexable_attributes(class_data):
def isvalid(text):
for char in r"""~`!#%^&*()+=[]{}|\:;"'/.,<> """:
if char in text:
return False
return True
indexable_attributes = list()
for key, value in class_data.get("metadata", dict()).items():
if isinstance(value, _int_type):
indexable_attributes.append(key)
elif isinstance(value, _str_type):
if isvalid(value):
indexable_attributes.append(key)
return indexable_attributes
if "subclass" in class_data:
for subclass_data in class_data["subclass"]:
instancename = self.formatted_instancename(subclass_data["classname"])
self.lines.append(self.Tab2 + self.repr_new_instance(subclass_data))
indexable_attributes = get_indexable_attributes(subclass_data)
for key, value in self.sorted_dict(subclass_data.get("metadata", dict())):
if key in indexable_attributes:
if isinstance(value, _int_type):
if value < 0:
self.lines.append(self.Tab2 + "self.%s____neg%s = %s" % (
key, -value, instancename))
else:
self.lines.append(self.Tab2 + "self.%s____%s = %s" % (
key, value, instancename))
else:
self.lines.append(self.Tab2 + "self.%s____%s = %s" % (
key, value, instancename))
self.lines.append(self.Tab2)
def repr_class_data(self, class_data):
"""Create code like this::
class Person(classtree.Base):
def __init__(self, name=None, person_id=None):
self.name = name
self.person_id = person_id
class PersonCollection(classtree.Base):
def __init__(self, collection_id=None, create_date=None, name=None):
self.collection_id = collection_id
self.create_date = create_date
self.name = name
person = Person(name='Jack', person_id=1)
self.name____Jack = person
self.person_id____1 = person
person = Person(name='Paul', person_id=2)
self.name____Paul = person
self.person_id____2 = person
"""
if "subclass" in class_data:
for subclass_data in class_data["subclass"]:
self.repr_class_data(subclass_data)
self.repr_def_class(class_data)
self.repr_setattr(class_data)
|
class CodeGenerator(object):
'''Worker class
'''
def __init__(self, tab=" ", indent=0):
pass
def reset(self):
pass
def pre_process(self, class_data):
pass
def sort_metadata(self):
pass
@property
def code(self):
pass
def formatted_classname(self, text):
pass
def formatted_instancename(self, text):
pass
def sorted_dict(self, d):
pass
def repr_def_class(self, class_data):
'''Create code like this::
class Person(Base):
def __init__(self, person_id=None, name=None):
self.person_id = person_id
self.name = name
'''
pass
def repr_new_instance(self, class_data):
'''Create code like this::
person = Person(name='Jack', person_id=1)
'''
pass
def repr_setattr(self, class_data):
'''Create code like this::
person = Person(name='Jack', person_id=1)
self.name____Jack = person
self.person_id____1 = person
person = Person(name='Paul', person_id=2)
self.name____Paul = person
self.person_id____2 = person
'''
pass
def get_indexable_attributes(class_data):
pass
def isvalid(text):
pass
def repr_class_data(self, class_data):
'''Create code like this::
class Person(classtree.Base):
def __init__(self, name=None, person_id=None):
self.name = name
self.person_id = person_id
class PersonCollection(classtree.Base):
def __init__(self, collection_id=None, create_date=None, name=None):
self.collection_id = collection_id
self.create_date = create_date
self.name = name
person = Person(name='Jack', person_id=1)
self.name____Jack = person
self.person_id____1 = person
person = Person(name='Paul', person_id=2)
self.name____Paul = person
self.person_id____2 = person
'''
pass
| 16 | 5 | 13 | 1 | 9 | 3 | 3 | 0.36 | 1 | 3 | 0 | 0 | 12 | 7 | 12 | 12 | 173 | 24 | 113 | 45 | 97 | 41 | 91 | 44 | 76 | 7 | 1 | 6 | 37 |
147,875 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/bot/anjian.py
|
angora.bot.anjian.Command
|
class Command(object):
"""
"""
def __init__(self, name, code):
self.name = name
self.code = code
def __str__(self):
return self.code
|
class Command(object):
'''
'''
def __init__(self, name, code):
pass
def __str__(self):
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 1 | 0.33 | 1 | 0 | 0 | 0 | 2 | 2 | 2 | 2 | 9 | 1 | 6 | 5 | 3 | 2 | 6 | 5 | 3 | 1 | 1 | 0 | 2 |
147,876 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/bot/anjian.py
|
angora.bot.anjian.Script
|
class Script(object):
"""
"""
default_delay = 0
def __init__(self):
self.lines = list()
def add(self, command):
self.lines.append(command)
def to_script(self):
return "\n".join([str(command) for command in self.lines])
def to_file(self, abspath):
with open(abspath, "wb") as f:
f.write(self.to_script().encode("utf-8"))
def set_default_delay(self, ms):
self.default_delay = ms
def _delay(self, ms):
"""Implement default delay mechanism.
"""
if ms:
self.Delay(ms)
else:
if self.default_delay:
self.Delay(self.default_delay)
# Method
def Delay(self, ms):
"""Sleep for <ms> milliseconds.
:param ms: milliseconds
"""
cmd = Command("Delay", "Delay %s" % ms)
self.add(cmd)
def SayString(self, text, delay=0):
"""Enter some text.
:param text: the text you want to enter.
"""
self._delay(delay)
cmd = Command("SayString", 'SayString "%s"' % text)
self.add(cmd)
# Key press, down and up
def KeyPress(self, key, n=1, delay=0):
"""Press key for n times.
:param key:
:param n: press key for n times
"""
self._delay(delay)
cmd = Command("KeyPress", 'KeyPress "%s", %s' % (key, n))
self.add(cmd)
def KeyDown(self, key, n=1, delay=0):
"""Press down a key.
:param key:
:param n: press down key for n times
"""
self._delay(delay)
cmd = Command("KeyDown", 'KeyDown "%s", %s' % (key, n))
self.add(cmd)
def KeyUp(self, key, n=1, delay=0):
"""Release a key.
:param key:
:param n: release key for n times
"""
self._delay(delay)
cmd = Command("KeyUp", 'KeyUp "%s", %s' % (key, n))
self.add(cmd)
def Alt(self, n=1, delay=0):
"""
"""
self._delay(delay)
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.Alt, n)))
def Ctrl(self, n=1, delay=0):
"""
"""
self._delay(delay)
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.Ctrl, n)))
def Shift(self, n=1, delay=0):
"""
"""
self._delay(delay)
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.Shift, n)))
def Tab(self, n=1, delay=0):
"""
"""
self._delay(delay)
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.Tab, n)))
def Space(self, n=1, delay=0):
"""
"""
self._delay(delay)
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.Space, n)))
def Enter(self, n=1, delay=0):
"""
"""
self._delay(delay)
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.Enter, n)))
# Combo
def AltTab(self, n=1, delay=0):
"""Press down Alt, then press n times Tab, then release Alt.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Alt, 1)))
for i in range(n):
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.Tab, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Alt, 1)))
def Ctrl_C(self, delay=0):
"""Ctrl + C shortcut.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.C, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
def Ctrl_V(self, delay=0):
"""Ctrl + V shortcut.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.V, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
def Ctrl_W(self, delay=0):
"""Ctrl + W shortcut.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.W, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
|
class Script(object):
'''
'''
def __init__(self):
pass
def add(self, command):
pass
def to_script(self):
pass
def to_file(self, abspath):
pass
def set_default_delay(self, ms):
pass
def _delay(self, ms):
'''Implement default delay mechanism.
'''
pass
def Delay(self, ms):
'''Sleep for <ms> milliseconds.
:param ms: milliseconds
'''
pass
def SayString(self, text, delay=0):
'''Enter some text.
:param text: the text you want to enter.
'''
pass
def KeyPress(self, key, n=1, delay=0):
'''Press key for n times.
:param key:
:param n: press key for n times
'''
pass
def KeyDown(self, key, n=1, delay=0):
'''Press down a key.
:param key:
:param n: press down key for n times
'''
pass
def KeyUp(self, key, n=1, delay=0):
'''Release a key.
:param key:
:param n: release key for n times
'''
pass
def Alt(self, n=1, delay=0):
'''
'''
pass
def Ctrl(self, n=1, delay=0):
'''
'''
pass
def Shift(self, n=1, delay=0):
'''
'''
pass
def Tab(self, n=1, delay=0):
'''
'''
pass
def Space(self, n=1, delay=0):
'''
'''
pass
def Enter(self, n=1, delay=0):
'''
'''
pass
def AltTab(self, n=1, delay=0):
'''Press down Alt, then press n times Tab, then release Alt.
'''
pass
def Ctrl_C(self, delay=0):
'''Ctrl + C shortcut.
'''
pass
def Ctrl_V(self, delay=0):
'''Ctrl + V shortcut.
'''
pass
def Ctrl_W(self, delay=0):
'''Ctrl + W shortcut.
'''
pass
| 22 | 17 | 6 | 0 | 4 | 2 | 1 | 0.58 | 1 | 5 | 2 | 0 | 21 | 1 | 21 | 21 | 147 | 25 | 77 | 31 | 55 | 45 | 76 | 30 | 54 | 3 | 1 | 2 | 24 |
147,877 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/bot/macro.py
|
angora.bot.macro.Bot
|
class Bot():
def __init__(self):
self.mouse = PyMouse()
self.keyboard = PyKeyboard()
def Delay(self, n):
time.sleep(n)
""" ====== Mouse Macro ====== """
def Left_click(self, x, y, n = 1, dl = 0):
"""在屏幕某点左键点击若干次
"""
self.Delay(dl)
self.mouse.click(x, y, 1, n)
def Right_click(self, x, y, n = 1, dl = 0):
"""在屏幕某点右键点击若干次
"""
self.Delay(dl)
self.mouse.click(x, y, 2, n)
def Double_click(self, x, y, dl = 0):
"""在屏幕的某点双击
"""
self.Delay(dl)
self.mouse.click(x, y, 1, n = 2)
def Scroll_up(self, n, dl = 0):
"""鼠标滚轮向上n次
"""
self.Delay(dl)
self.mouse.scroll(vertical = n)
def Scroll_down(self, n, dl = 0):
"""鼠标滚轮向下n次
"""
self.Delay(dl)
self.mouse.scroll(vertical = -n)
def Move_to(self, x, y, dl = 0):
"""鼠标移动到x, y的坐标处
"""
self.Delay(dl)
self.mouse.move(x, y)
def Drag_and_release(self, start, end, dl = 0):
"""从start的坐标处鼠标左键单击拖曳到end的坐标处
start, end是tuple. 格式是(x, y)
"""
self.Delay(dl)
self.mouse.press(start[0], start[1], 1)
self.mouse.drag(end[0], end[1])
self.Delay(0.1)
self.mouse.release(end[0], end[1], 1)
def Screen_size(self):
width, height = self.mouse.screen_size()
return width, height
def WhereXY(self):
x_axis, y_axis = self.mouse.position()
return x_axis, y_axis
""" ====== Keyboard Macro ====== """
"""COMBINATION组合键"""
"""Ctrl系列"""
def Ctrl_c(self, dl = 0):
"""Ctrl + c 复制
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.control_key)
self.keyboard.tap_key("c")
self.keyboard.release_key(self.keyboard.control_key)
def Ctrl_x(self, dl = 0):
"""Ctrl + x 剪切
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.control_key)
self.keyboard.tap_key("x")
self.keyboard.release_key(self.keyboard.control_key)
def Ctrl_v(self, dl = 0):
"""Ctrl + v 粘贴
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.control_key)
self.keyboard.tap_key("v")
self.keyboard.release_key(self.keyboard.control_key)
def Ctrl_z(self, dl = 0):
"""Ctrl + z 撤销上一次操作
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.control_key)
self.keyboard.tap_key("z")
self.keyboard.release_key(self.keyboard.control_key)
def Ctrl_y(self, dl = 0):
"""Ctrl + y 重复上一次操作
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.control_key)
self.keyboard.tap_key("y")
self.keyboard.release_key(self.keyboard.control_key)
def Ctrl_a(self, dl = 0):
"""Ctrl + a 全选
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.control_key)
self.keyboard.tap_key("a")
self.keyboard.release_key(self.keyboard.control_key)
def Ctrl_Fn(self, n, dl = 0):
"""Ctrl + Fn1~12 组合键
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.control_key)
self.keyboard.tap_key(self.keyboard.function_keys[n])
self.keyboard.release_key(self.keyboard.control_key)
"""Alt系列"""
def Alt_Tab(self, dl = 0):
"""Alt + Tab 组合键
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.alt_key)
self.keyboard.tap_key(self.keyboard.tab_key)
self.keyboard.release_key(self.keyboard.alt_key)
def Alt_Fn(self, n, dl = 0):
"""Alt + Fn1~12 组合键
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.alt_key)
self.keyboard.tap_key(self.keyboard.function_keys[n])
self.keyboard.release_key(self.keyboard.alt_key)
"""SINGLE KEY单个键盘键"""
def Up(self, n = 1, dl = 0):
"""上方向键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.up_key, n)
def Down(self, n = 1, dl = 0):
"""下方向键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.down_key, n)
def Left(self, n = 1, dl = 0):
"""左方向键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.left_key, n)
def Right(self, n = 1, dl = 0):
"""右方向键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.right_key, n)
def Enter(self, n = 1, dl = 0):
"""回车键/换行键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.enter_key, n)
def Delete(self, n = 1, dl = 0):
"""删除键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.delete_key, n)
def Back(self, n = 1, dl = 0):
"""退格键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.backspace_key, n)
def Space(self, n = 1, dl = 0):
"""空格键n次
"""
self.Delay(dl)
self.keyboard.tap_key(" ", n)
def Fn(self, n, dl = 0):
"""功能键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.function_keys[n])
def Char(self, char, n = 1, dl = 0):
"""输入任意单字符n次,只要能在键盘上打出来的字符都可以
"""
if len(char) == 1:
self.Delay(dl)
self.keyboard.tap_key(char)
else:
raise Exception("""method "Char()" can only take one character.""")
def Type_string(self, text, interval = 0, dl = 0):
"""键盘输入字符串,interval是字符间输入时间间隔,单位"秒"
"""
self.Delay(dl)
self.keyboard.type_string(text, interval)
|
class Bot():
def __init__(self):
pass
def Delay(self, n):
pass
def Left_click(self, x, y, n = 1, dl = 0):
'''在屏幕某点左键点击若干次
'''
pass
def Right_click(self, x, y, n = 1, dl = 0):
'''在屏幕某点右键点击若干次
'''
pass
def Double_click(self, x, y, dl = 0):
'''在屏幕的某点双击
'''
pass
def Scroll_up(self, n, dl = 0):
'''鼠标滚轮向上n次
'''
pass
def Scroll_down(self, n, dl = 0):
'''鼠标滚轮向下n次
'''
pass
def Move_to(self, x, y, dl = 0):
'''鼠标移动到x, y的坐标处
'''
pass
def Drag_and_release(self, start, end, dl = 0):
'''从start的坐标处鼠标左键单击拖曳到end的坐标处
start, end是tuple. 格式是(x, y)
'''
pass
def Screen_size(self):
pass
def WhereXY(self):
pass
def Ctrl_c(self, dl = 0):
'''Ctrl + c 复制
'''
pass
def Ctrl_x(self, dl = 0):
'''Ctrl + x 剪切
'''
pass
def Ctrl_v(self, dl = 0):
'''Ctrl + v 粘贴
'''
pass
def Ctrl_z(self, dl = 0):
'''Ctrl + z 撤销上一次操作
'''
pass
def Ctrl_y(self, dl = 0):
'''Ctrl + y 重复上一次操作
'''
pass
def Ctrl_a(self, dl = 0):
'''Ctrl + a 全选
'''
pass
def Ctrl_Fn(self, n, dl = 0):
'''Ctrl + Fn1~12 组合键
'''
pass
def Alt_Tab(self, dl = 0):
'''Alt + Tab 组合键
'''
pass
def Alt_Fn(self, n, dl = 0):
'''Alt + Fn1~12 组合键
'''
pass
def Up(self, n = 1, dl = 0):
'''上方向键n次
'''
pass
def Down(self, n = 1, dl = 0):
'''下方向键n次
'''
pass
def Left_click(self, x, y, n = 1, dl = 0):
'''左方向键n次
'''
pass
def Right_click(self, x, y, n = 1, dl = 0):
'''右方向键n次
'''
pass
def Enter(self, n = 1, dl = 0):
'''回车键/换行键n次
'''
pass
def Delete(self, n = 1, dl = 0):
'''删除键n次
'''
pass
def Back(self, n = 1, dl = 0):
'''退格键n次
'''
pass
def Space(self, n = 1, dl = 0):
'''空格键n次
'''
pass
def Fn(self, n, dl = 0):
'''功能键n次
'''
pass
def Char(self, char, n = 1, dl = 0):
'''输入任意单字符n次,只要能在键盘上打出来的字符都可以
'''
pass
def Type_string(self, text, interval = 0, dl = 0):
'''键盘输入字符串,interval是字符间输入时间间隔,单位"秒"
'''
pass
| 32 | 27 | 6 | 0 | 4 | 2 | 1 | 0.52 | 0 | 1 | 0 | 0 | 31 | 2 | 31 | 31 | 210 | 32 | 117 | 36 | 85 | 61 | 116 | 36 | 84 | 2 | 0 | 1 | 32 |
147,878 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/crawler/simplecrawler.py
|
angora.crawler.simplecrawler.SimpleCrawler
|
class SimpleCrawler(object):
"""A basic web crawler class.
"""
def __init__(self, timeout=6, sleeptime=0):
self.auth = requests
self.default_timeout = timeout
self.default_sleeptime = sleeptime
self.default_header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch",
"Accept-Language": "en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4",
"Content-Type": "text/html; charset=UTF-8",
"Connection": "close",
"Referer": None,
}
self.decoder = SmartDecoder()
self.domain_encoding_map = dict()
def set_timeout(self, timeout):
"""Set default timeout limit in second.
"""
self.default_timeout = timeout
def set_sleeptime(self, sleeptime):
"""Change default_sleeptime.
"""
self.default_sleeptime = sleeptime
def set_referer(self, url):
"""Set a referer link. This is an Anti "anti-leech" technique
usually set the referer link to the website you are crawling.
"""
self.default_header["Referer"] = url
def login(self, url, payload):
"""Performe log in.
url is the login page url, for example:
https://login.secureserver.net/index.php?
payload includes the account and password for example:
``{"loginlist": "YourAccount", "password": "YourPassword"}``
"""
self.auth = requests.Session()
try:
self.auth.post(url, data=payload, timeout=self.default_timeout)
print("successfully logged in to %s" % url)
return True
except:
return False
def get_domain(self, url):
"""Return the domain of this url.
"""
return "/".join(url.split("/")[:3])
def get_response(self, url, timeout=None):
"""Return http request response.
"""
if not timeout:
timeout = self.default_timeout
if self.default_sleeptime:
time.sleep(self.default_sleeptime)
try:
return self.auth.get(url, headers=self.default_header, timeout=self.default_timeout)
except:
return None
def html_with_encoding(self, url, timeout=None, encoding="utf-8"):
"""Manually get html with user encoding setting.
"""
response = self.get_response(url, timeout=timeout)
if response:
return self.decoder.decode(response.content, encoding)[0]
else:
return None
def html(self, url, timeout=None):
"""High level method to get http request response in text.
smartly handle the encoding problem.
"""
response = self.get_response(url, timeout=timeout)
if response:
domain = self.get_domain(url)
if domain in self.domain_encoding_map: # domain have been visited
try: # apply extreme decoding
html = self.decoder.decode(response.content,
self.domain_encoding_map[domain])[0]
return html
except Exception as e:
print(e)
return None
else: # never visit this domain
try:
html, encoding = self.decoder.autodecode(response.content)
# save chardet analysis result
self.domain_encoding_map[domain] = encoding
return html
except Exception as e:
print(e)
return None
else:
return None
def binary(self, url, timeout=None):
"""High level method to get http request response in bytes.
"""
response = self.get_response(url, timeout=timeout)
if response:
return response.content
else:
return None
def download(self, url, dst, timeout=None):
"""Download the binary file at url to distination path.
"""
response = self.get_response(url, timeout=timeout)
if response:
with open(dst, "wb") as f:
for block in response.iter_content(1024):
if not block:
break
f.write(block)
|
class SimpleCrawler(object):
'''A basic web crawler class.
'''
def __init__(self, timeout=6, sleeptime=0):
pass
def set_timeout(self, timeout):
'''Set default timeout limit in second.
'''
pass
def set_sleeptime(self, sleeptime):
'''Change default_sleeptime.
'''
pass
def set_referer(self, url):
'''Set a referer link. This is an Anti "anti-leech" technique
usually set the referer link to the website you are crawling.
'''
pass
def login(self, url, payload):
'''Performe log in.
url is the login page url, for example:
https://login.secureserver.net/index.php?
payload includes the account and password for example:
``{"loginlist": "YourAccount", "password": "YourPassword"}``
'''
pass
def get_domain(self, url):
'''Return the domain of this url.
'''
pass
def get_response(self, url, timeout=None):
'''Return http request response.
'''
pass
def html_with_encoding(self, url, timeout=None, encoding="utf-8"):
'''Manually get html with user encoding setting.
'''
pass
def html_with_encoding(self, url, timeout=None, encoding="utf-8"):
'''High level method to get http request response in text.
smartly handle the encoding problem.
'''
pass
def binary(self, url, timeout=None):
'''High level method to get http request response in bytes.
'''
pass
def download(self, url, dst, timeout=None):
'''Download the binary file at url to distination path.
'''
pass
| 12 | 11 | 10 | 0 | 7 | 3 | 2 | 0.39 | 1 | 4 | 1 | 0 | 11 | 6 | 11 | 11 | 128 | 16 | 83 | 28 | 71 | 32 | 70 | 26 | 58 | 5 | 1 | 4 | 24 |
147,879 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/crawler/simplecrawler.py
|
angora.crawler.simplecrawler.SmartDecoder
|
class SmartDecoder(object):
"""A stable bytes decoder.
"""
def catch_position_in_UnicodeDecodeError_message(self, text):
for token in text.replace(":", "").split(" "):
try:
return int(token)
except:
pass
raise Exception("unable to find position from '%s'" % text)
def decode(self, a_bytes, encoding):
"""A 'try as much as we can' strategy decoding method.
'try as much as we can' feature:
Some time most of byte are encoded correctly. So chardet is able to
detect the encoding. But, sometime some bytes in the middle are not
encoded correctly. So it is still unable to apply
bytes.decode("encoding-method")
Example::
b"82347912350898143059043958290345" # 3059 is not right.
# [-----Good----][-Bad][---Good---]
What we do is to drop those bad encoded bytes, and try to recovery text
as much as possible. So this method is to recursively call it self, and
try to decode good bytes chunk, and finally concatenate them together.
:param a_bytes: the bytes that encoding is unknown.
:type a_bytes: bytes
:param encoding: how you gonna decode a_bytes
:type encoding: str
"""
try:
return (a_bytes.decode(encoding), encoding)
except Exception as e:
ind = self.catch_position_in_UnicodeDecodeError_message(str(e))
return (a_bytes[:ind].decode(encoding) + self.decode(a_bytes[(ind + 2):], encoding)[0],
encoding)
def autodecode(self, a_bytes):
"""Automatically detect encoding, and decode bytes.
"""
try: # 如果装了chardet
analysis = chardet.detect(a_bytes)
if analysis["confidence"] >= 0.75: # 如果可信
return (self.decode(a_bytes, analysis["encoding"])[0],
analysis["encoding"])
else: # 如果不可信, 打印异常
raise Exception("Failed to detect encoding. (%s, %s)" % (
analysis["confidence"],
analysis["encoding"]))
except NameError: # 如果没有装chardet
print(
"Warning! chardet not found. Use utf-8 as default encoding instead.")
return (a_bytes.decode("utf-8")[0],
"utf-8")
|
class SmartDecoder(object):
'''A stable bytes decoder.
'''
def catch_position_in_UnicodeDecodeError_message(self, text):
pass
def decode(self, a_bytes, encoding):
'''A 'try as much as we can' strategy decoding method.
'try as much as we can' feature:
Some time most of byte are encoded correctly. So chardet is able to
detect the encoding. But, sometime some bytes in the middle are not
encoded correctly. So it is still unable to apply
bytes.decode("encoding-method")
Example::
b"82347912350898143059043958290345" # 3059 is not right.
# [-----Good----][-Bad][---Good---]
What we do is to drop those bad encoded bytes, and try to recovery text
as much as possible. So this method is to recursively call it self, and
try to decode good bytes chunk, and finally concatenate them together.
:param a_bytes: the bytes that encoding is unknown.
:type a_bytes: bytes
:param encoding: how you gonna decode a_bytes
:type encoding: str
'''
pass
def autodecode(self, a_bytes):
'''Automatically detect encoding, and decode bytes.
'''
pass
| 4 | 3 | 18 | 2 | 10 | 8 | 3 | 0.83 | 1 | 4 | 0 | 0 | 3 | 0 | 3 | 3 | 60 | 9 | 30 | 8 | 26 | 25 | 23 | 7 | 19 | 3 | 1 | 2 | 8 |
147,880 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/dtypes/dicttree.py
|
angora.dtypes.dicttree.DictTree
|
class DictTree(object):
"""dicttree methods' host class. All method are staticmethod, so we can keep
the namespace clean.
"""
@staticmethod
def initial(key, **kwarg):
"""Create an empty dicttree.
The root node has a special attribute "_rootname".
Because root node is the only dictionary doesn't have key.
So we assign the key as a special attribute.
Usage::
>>> from weatherlab.lib.dtypes.dicttree import DictTree as DT
>>> d = DT.initial("US)
>>> d
{'_meta': {'_rootname': 'US'}}
"""
d = dict()
DictTree.setattr(d, _rootname = key, **kwarg)
return d
@staticmethod
def setattr(d, **kwarg):
"""Set an attribute.
set attributes is actually add a special key, value pair in this dict
under key = "_meta".
Usage::
>>> DT.setattr(d, population=27800000)
>>> d
{'_meta': {'population': 27800000, '_rootname': 'US'}}
"""
if _meta not in d:
d[_meta] = dict()
for k, v in kwarg.items():
d[_meta][k] = v
@staticmethod
def getattr(d, attribute_name):
"""Get attribute_value from the special ``attributes_dict``.
Usage::
>>> DT.getattr(d, "population")
27800000
"""
return d[_meta][attribute_name]
@staticmethod
def add_children(d, key, **kwarg):
"""Add a children with key and attributes. If children already EXISTS,
OVERWRITE it.
Usage::
>>> from pprint import pprint as ppt
>>> DT.add_children(d, "VA", name="virginia", population=100*1000)
>>> DT.add_children(d, "MD", name="maryland", population=200*1000)
>>> ppt(d)
{'_meta': {'population': 27800000, '_rootname': 'US'},
'MD': {'_meta': {'name': 'maryland', 'population': 200000}},
'VA': {'_meta': {'name': 'virginia', 'population': 100000}}}
>>> DT.add_children(d["VA"], "arlington",
name="arlington county", population=5000)
>>> DT.add_children(d["VA"], "vienna",
name="vienna county", population=5000)
>>> DT.add_children(d["MD"], "bethesta",
name="montgomery country", population=5800)
>>> DT.add_children(d["MD"], "germentown",
name="fredrick country", population=1400)
>>> DT.add_children(d["VA"]["arlington"], "riverhouse",
name="RiverHouse 1400", population=437)
>>> DT.add_children(d["VA"]["arlington"], "crystal plaza",
name="Crystal plaza South", population=681)
>>> DT.add_children(d["VA"]["arlington"], "loft",
name="loft hotel", population=216)
>>> ppt(d)
{'MD': {'_meta': {'name': 'maryland', 'population': 200000},
'bethesta': {'_meta': {'name': 'montgomery country',
'population': 5800}},
'germentown': {'_meta': {'name': 'fredrick country',
'population': 1400}}},
'VA': {'_meta': {'name': 'virginia', 'population': 100000},
'arlington': {'_meta': {'name': 'arlington county',
'population': 5000},
'crystal plaza': {'_meta': {'name': 'Crystal plaza South',
'population': 681}},
'loft': {'_meta': {'name': 'loft hotel',
'population': 216}},
'riverhouse': {'_meta': {'name': 'RiverHouse 1400',
'population': 437}}},
'vienna': {'_meta': {'name': 'vienna county', 'population': 1500}}},
'_meta': {'_rootname': 'US', 'population': 27800000.0}}
"""
if kwarg:
d[key] = {_meta: kwarg}
else:
d[key] = dict()
@staticmethod
def ac(d, key, **kwarg):
"""Alias of :meth:`self.add_children()<DictTree.add_children>`.
"""
if kwarg:
d[key] = {_meta: kwarg}
else:
d[key] = dict()
@staticmethod
def k(d):
"""Equivalent to dict.keys().
Usage reference see :meth:`DictTree.kv()<DictTree.kv>`
"""
return (key for key in iterkeys(d) if key != _meta)
@staticmethod
def v(d):
"""Equivalent to dict.values().
Usage reference see :meth:`DictTree.kv()<DictTree.kv>`
"""
return (value for key, value in iteritems(d) if key != _meta)
@staticmethod
def kv(d):
"""Equivalent to dict.items().
Usage::
>>> for key, node in DictTree.kv(d):
>>> print(key, DictTree.getattr(node, "population"))
MD 200000
VA 100000
"""
return ((key, value) for key, value in iteritems(d) if key != _meta)
@staticmethod
def k_depth(d, depth, _counter=1):
"""Iterate keys on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>`
"""
if depth == 0:
yield d[_meta]["_rootname"]
else:
if _counter == depth:
for key in DictTree.k(d):
yield key
else:
_counter += 1
for node in DictTree.v(d):
for key in DictTree.k_depth(node, depth, _counter):
yield key
@staticmethod
def v_depth(d, depth):
"""Iterate values on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>`
"""
if depth == 0:
yield d
else:
for node in DictTree.v(d):
for node1 in DictTree.v_depth(node, depth-1):
yield node1
@staticmethod
def kv_depth(d, depth, _counter=1):
"""Iterate items on specific depth.
depth has to be greater equal than 0.
Usage::
>>> for key, node in DictTree.kv_depth(d, 2):
>>> print(key, DictTree.getattr(node, "population"))
bethesta 5800
germentown 1400
vienna 1500
arlington 5000
"""
if depth == 0:
yield d[_meta]["_rootname"], d
else:
if _counter == depth:
for key, node in DictTree.kv(d):
yield key, node
else:
_counter += 1
for node in DictTree.v(d):
for key, node in DictTree.kv_depth(node, depth, _counter):
yield key, node
@staticmethod
def length(d):
"""Get the number of immediate child nodes.
"""
if _meta in d:
return len(d) - 1
else:
return len(d)
@staticmethod
def len_on_depth(d, depth):
"""Get the number of nodes on specific depth.
"""
counter = 0
for node in DictTree.v_depth(d, depth-1):
counter += DictTree.length(node)
return counter
@staticmethod
def copy(d):
"""Copy current dict.
Because members in this dicttree are also dict, which is mutable.
so we have to use deepcopy to avoid mistake.
"""
return copy.deepcopy(d)
@staticmethod
def del_depth(d, depth):
"""Delete all the nodes on specific depth in this dict
"""
for node in DictTree.v_depth(d, depth-1):
for key in [key for key in DictTree.k(node)]:
del node[key]
@staticmethod
def prettyprint(d):
"""Print dicttree in Json-like format. keys are sorted
"""
print(json.dumps(d, sort_keys=True,
indent=4, separators=("," , ": ")))
@staticmethod
def stats_on_depth(d, depth):
"""Display the node stats info on specific depth in this dict
"""
root_nodes, leaf_nodes = 0, 0
for _, node in DictTree.kv_depth(d, depth):
if DictTree.length(node) == 0:
leaf_nodes += 1
else:
root_nodes += 1
total = root_nodes + leaf_nodes
print("On depth %s, having %s root nodes, %s leaf nodes. "
"%s nodes in total." % (depth, root_nodes, leaf_nodes, total))
|
class DictTree(object):
'''dicttree methods' host class. All method are staticmethod, so we can keep
the namespace clean.
'''
@staticmethod
def initial(key, **kwarg):
'''Create an empty dicttree.
The root node has a special attribute "_rootname".
Because root node is the only dictionary doesn't have key.
So we assign the key as a special attribute.
Usage::
>>> from weatherlab.lib.dtypes.dicttree import DictTree as DT
>>> d = DT.initial("US)
>>> d
{'_meta': {'_rootname': 'US'}}
'''
pass
@staticmethod
def setattr(d, **kwarg):
'''Set an attribute.
set attributes is actually add a special key, value pair in this dict
under key = "_meta".
Usage::
>>> DT.setattr(d, population=27800000)
>>> d
{'_meta': {'population': 27800000, '_rootname': 'US'}}
'''
pass
@staticmethod
def getattr(d, attribute_name):
'''Get attribute_value from the special ``attributes_dict``.
Usage::
>>> DT.getattr(d, "population")
27800000
'''
pass
@staticmethod
def add_children(d, key, **kwarg):
'''Add a children with key and attributes. If children already EXISTS,
OVERWRITE it.
Usage::
>>> from pprint import pprint as ppt
>>> DT.add_children(d, "VA", name="virginia", population=100*1000)
>>> DT.add_children(d, "MD", name="maryland", population=200*1000)
>>> ppt(d)
{'_meta': {'population': 27800000, '_rootname': 'US'},
'MD': {'_meta': {'name': 'maryland', 'population': 200000}},
'VA': {'_meta': {'name': 'virginia', 'population': 100000}}}
>>> DT.add_children(d["VA"], "arlington",
name="arlington county", population=5000)
>>> DT.add_children(d["VA"], "vienna",
name="vienna county", population=5000)
>>> DT.add_children(d["MD"], "bethesta",
name="montgomery country", population=5800)
>>> DT.add_children(d["MD"], "germentown",
name="fredrick country", population=1400)
>>> DT.add_children(d["VA"]["arlington"], "riverhouse",
name="RiverHouse 1400", population=437)
>>> DT.add_children(d["VA"]["arlington"], "crystal plaza",
name="Crystal plaza South", population=681)
>>> DT.add_children(d["VA"]["arlington"], "loft",
name="loft hotel", population=216)
>>> ppt(d)
{'MD': {'_meta': {'name': 'maryland', 'population': 200000},
'bethesta': {'_meta': {'name': 'montgomery country',
'population': 5800}},
'germentown': {'_meta': {'name': 'fredrick country',
'population': 1400}}},
'VA': {'_meta': {'name': 'virginia', 'population': 100000},
'arlington': {'_meta': {'name': 'arlington county',
'population': 5000},
'crystal plaza': {'_meta': {'name': 'Crystal plaza South',
'population': 681}},
'loft': {'_meta': {'name': 'loft hotel',
'population': 216}},
'riverhouse': {'_meta': {'name': 'RiverHouse 1400',
'population': 437}}},
'vienna': {'_meta': {'name': 'vienna county', 'population': 1500}}},
'_meta': {'_rootname': 'US', 'population': 27800000.0}}
'''
pass
@staticmethod
def ac(d, key, **kwarg):
'''Alias of :meth:`self.add_children()<DictTree.add_children>`.
'''
pass
@staticmethod
def k(d):
'''Equivalent to dict.keys().
Usage reference see :meth:`DictTree.kv()<DictTree.kv>`
'''
pass
@staticmethod
def v(d):
'''Equivalent to dict.values().
Usage reference see :meth:`DictTree.kv()<DictTree.kv>`
'''
pass
@staticmethod
def kv(d):
'''Equivalent to dict.items().
Usage::
>>> for key, node in DictTree.kv(d):
>>> print(key, DictTree.getattr(node, "population"))
MD 200000
VA 100000
'''
pass
@staticmethod
def k_depth(d, depth, _counter=1):
'''Iterate keys on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>`
'''
pass
@staticmethod
def v_depth(d, depth):
'''Iterate values on specific depth.
depth has to be greater equal than 0.
Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>`
'''
pass
@staticmethod
def kv_depth(d, depth, _counter=1):
'''Iterate items on specific depth.
depth has to be greater equal than 0.
Usage::
>>> for key, node in DictTree.kv_depth(d, 2):
>>> print(key, DictTree.getattr(node, "population"))
bethesta 5800
germentown 1400
vienna 1500
arlington 5000
'''
pass
@staticmethod
def length(d):
'''Get the number of immediate child nodes.
'''
pass
@staticmethod
def len_on_depth(d, depth):
'''Get the number of nodes on specific depth.
'''
pass
@staticmethod
def copy(d):
'''Copy current dict.
Because members in this dicttree are also dict, which is mutable.
so we have to use deepcopy to avoid mistake.
'''
pass
@staticmethod
def del_depth(d, depth):
'''Delete all the nodes on specific depth in this dict
'''
pass
@staticmethod
def prettyprint(d):
'''Print dicttree in Json-like format. keys are sorted
'''
pass
@staticmethod
def stats_on_depth(d, depth):
'''Display the node stats info on specific depth in this dict
'''
pass
| 35 | 18 | 13 | 1 | 5 | 7 | 2 | 1.1 | 1 | 1 | 0 | 0 | 0 | 0 | 17 | 17 | 253 | 33 | 105 | 50 | 70 | 115 | 77 | 32 | 59 | 6 | 1 | 4 | 40 |
147,881 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/dtypes/orderedset.py
|
angora.dtypes.orderedset.OrderedSet
|
class OrderedSet(collections.MutableSet):
"""A light weight OrderedSet data type pure Python implementation.
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
"""Add an item to the OrderedSet.
Usage::
>>> s = OrderedSet()
>>> s.add(1)
>>> s.add(2)
>>> s.add(3)
>>> s
OrderedSet([1, 2, 3])
**中文文档**
添加一个元素, 如果该元素已经存在, 则不会有任何作用。
"""
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
"""Remove a item from its member if it is a member.
Usage::
>>> s = OrderedSet([1, 2, 3])
>>> s.discard(2)
>>> s
OrderedSet([1, 3])
**中文文档**
从有序集合中删除一个元素, 同时保持集合依然有序。
"""
if key in self.map:
key, prev, next_item = self.map.pop(key)
prev[2] = next_item
next_item[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
"""Remove and returns the last added item.
Usage::
>>> s = OrderedSet([1, 2, 3])
>>> s.pop()
3
>>> s
OrderedSet([1, 2])
**中文文档**
移除并返回最后添加的元素。
"""
if not self:
raise KeyError("set is empty")
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return "%s()" % (self.__class__.__name__,)
return "%s(%r)" % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
@staticmethod
def union(*argv):
"""Returns union of sets as a new set. basically it's
Items are ordered by set1, set2, ...
**中文文档**
求多个有序集合的并集, 按照第一个集合, 第二个, ..., 这样的顺序。
"""
res = OrderedSet()
for ods in argv:
res = res | ods
return res
@staticmethod
def intersection(*argv):
"""Returns the intersection of multiple sets.
Items are ordered by set1, set2, ...
**中文文档**
求多个有序集合的交集, 按照第一个集合, 第二个, ..., 这样的顺序。
"""
res = OrderedSet(argv[0])
for ods in argv:
res = ods & res
return res
|
class OrderedSet(collections.MutableSet):
'''A light weight OrderedSet data type pure Python implementation.
'''
def __init__(self, iterable=None):
pass
def __len__(self):
pass
def __contains__(self, key):
pass
def add(self, key):
'''Add an item to the OrderedSet.
Usage::
>>> s = OrderedSet()
>>> s.add(1)
>>> s.add(2)
>>> s.add(3)
>>> s
OrderedSet([1, 2, 3])
**中文文档**
添加一个元素, 如果该元素已经存在, 则不会有任何作用。
'''
pass
def discard(self, key):
'''Remove a item from its member if it is a member.
Usage::
>>> s = OrderedSet([1, 2, 3])
>>> s.discard(2)
>>> s
OrderedSet([1, 3])
**中文文档**
从有序集合中删除一个元素, 同时保持集合依然有序。
'''
pass
def __iter__(self):
pass
def __reversed__(self):
pass
def pop(self, last=True):
'''Remove and returns the last added item.
Usage::
>>> s = OrderedSet([1, 2, 3])
>>> s.pop()
3
>>> s
OrderedSet([1, 2])
**中文文档**
移除并返回最后添加的元素。
'''
pass
def __repr__(self):
pass
def __eq__(self, other):
pass
@staticmethod
def union(*argv):
'''Returns union of sets as a new set. basically it's
Items are ordered by set1, set2, ...
**中文文档**
求多个有序集合的并集, 按照第一个集合, 第二个, ..., 这样的顺序。
'''
pass
@staticmethod
def intersection(*argv):
'''Returns the intersection of multiple sets.
Items are ordered by set1, set2, ...
**中文文档**
求多个有序集合的交集, 按照第一个集合, 第二个, ..., 这样的顺序。
'''
pass
| 15 | 6 | 9 | 1 | 5 | 4 | 2 | 0.75 | 1 | 3 | 0 | 0 | 10 | 2 | 12 | 12 | 128 | 27 | 59 | 29 | 44 | 44 | 57 | 27 | 44 | 3 | 1 | 1 | 23 |
147,882 |
MacHu-GWU/angora-project
|
MacHu-GWU_angora-project/angora/bot/anjian.py
|
angora.bot.anjian.BoardKey
|
class BoardKey(object):
Alt = "Alt"
Ctrl = "Ctrl"
Shift = "Shift"
Tab = "Tab"
Space = "Space"
Enter = "Enter"
A = "A"
B = "B"
C = "C"
D = "D"
E = "E"
F = "F"
G = "G"
H = "H"
I = "I"
J = "J"
K = "K"
L = "L"
M = "M"
N = "N"
O = "O"
P = "P"
Q = "Q"
R = "R"
S = "S"
T = "T"
U = "U"
V = "V"
W = "W"
X = "X"
Y = "Y"
Z = "Z"
_1 = "1"
_2 = "2"
_3 = "3"
_4 = "4"
_5 = "5"
_6 = "6"
_7 = "7"
_8 = "8"
_9 = "9"
_0 = "0"
|
class BoardKey(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 2 | 43 | 43 | 42 | 0 | 43 | 43 | 42 | 0 | 1 | 0 | 0 |
147,883 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz.tzlocal
|
class tzlocal(_tzinfo):
"""
A :class:`tzinfo` subclass built around the ``time`` timezone functions.
"""
def __init__(self):
super(tzlocal, self).__init__()
self._std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
self._dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
self._dst_offset = self._std_offset
self._dst_saved = self._dst_offset - self._std_offset
self._hasdst = bool(self._dst_saved)
def utcoffset(self, dt):
if dt is None and self._hasdst:
return None
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if dt is None and self._hasdst:
return None
if self._isdst(dt):
return self._dst_offset - self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
naive_dst = self._naive_is_dst(dt)
return (not naive_dst and
(naive_dst != self._naive_is_dst(dt - self._dst_saved)))
def _naive_is_dst(self, dt):
timestamp = _datetime_to_timestamp(dt)
return time.localtime(timestamp + time.timezone).tm_isdst
def _isdst(self, dt, fold_naive=True):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
# >>> import tz, datetime
# >>> t = tz.tzlocal()
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRST'
# >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
# 'BRDT'
# >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
# 'BRDT'
#
# Here is a more stable implementation:
#
if not self._hasdst:
return False
# Check for ambiguous times:
dstval = self._naive_is_dst(dt)
fold = getattr(dt, 'fold', None)
if self.is_ambiguous(dt):
if fold is not None:
return not self._fold(dt)
else:
return True
return dstval
def __eq__(self, other):
if not isinstance(other, tzlocal):
return NotImplemented
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
|
class tzlocal(_tzinfo):
'''
A :class:`tzinfo` subclass built around the ``time`` timezone functions.
'''
def __init__(self):
pass
def utcoffset(self, dt):
pass
def dst(self, dt):
pass
@tzname_in_python2
def tzname(self, dt):
pass
def is_ambiguous(self, dt):
'''
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
'''
pass
def _naive_is_dst(self, dt):
pass
def _isdst(self, dt, fold_naive=True):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __repr__(self):
pass
| 12 | 2 | 10 | 1 | 5 | 3 | 2 | 0.66 | 1 | 3 | 0 | 0 | 10 | 4 | 10 | 20 | 117 | 24 | 56 | 22 | 44 | 37 | 49 | 21 | 38 | 4 | 2 | 2 | 19 |
147,884 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_inherit.py
|
test_inherit.FileType.Video
|
class Video(File):
id = 3
ext = ".avi"
|
class Video(File):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,885 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/_constant2.py
|
constant2._constant2.Food.Fruit.Apple.GreenApple
|
class GreenApple:
id = 2
name = "green apple"
|
class GreenApple:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 0 | 0 | 0 |
147,886 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/_constant2.py
|
constant2._constant2.Food.Fruit.Apple.RedApple
|
class RedApple:
id = 1
name = "red apple"
|
class RedApple:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 0 | 0 | 0 |
147,887 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_inherit.py
|
test_inherit.FileType.Music
|
class Music(File):
id = 2
ext = ".mp3"
|
class Music(File):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,888 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/six.py
|
constant2.pkg.superjson.pkg.six.with_metaclass.metaclass
|
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
|
class metaclass(meta):
def __new__(cls, name, this_bases, d):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 4 | 1 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
147,889 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_get_all.py
|
test_get_all.Item.Weapon
|
class Weapon(Constant):
id = 1
name = "weapon"
weight = 10
|
class Weapon(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 3 | 0 | 0 |
147,890 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_basic.py
|
test_basic.Food.Fruit.Apple.GreenApple
|
class GreenApple(Constant):
id = 2
name = "green apple"
|
class GreenApple(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 3 | 0 | 0 |
147,891 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_basic.py
|
test_basic.Food.Fruit.Apple.RedApple
|
class RedApple(Constant):
id = 1
name = "red apple"
|
class RedApple(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 3 | 0 | 0 |
147,892 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_basic.py
|
test_basic.Food.Fruit.Banana.GreenBanana
|
class GreenBanana(Constant):
id = 2
name = "green banana"
|
class GreenBanana(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 3 | 0 | 0 |
147,893 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.TagEntity.T2_Senior
|
class T2_Senior(Tag):
id = 2
name = "Senior"
|
class T2_Senior(Tag):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,894 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.TagEntity.T1_Junior
|
class T1_Junior(Tag):
id = 1
name = "Junior"
|
class T1_Junior(Tag):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,895 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.EmployeeEntity.E3_Cathy
|
class E3_Cathy(Employee):
id = 3
name = "Cathy"
|
class E3_Cathy(Employee):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,896 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.EmployeeEntity.E2_Bob
|
class E2_Bob(Employee):
id = 2
name = "Bob"
|
class E2_Bob(Employee):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,897 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.EmployeeEntity.E1_Alice
|
class E1_Alice(Employee):
id = 1
name = "Alice"
|
class E1_Alice(Employee):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,898 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.DepartmentEntity.D2_IT
|
class D2_IT(Department):
id = 2
name = "IT"
|
class D2_IT(Department):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,899 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.DepartmentEntity.D1_HR
|
class D1_HR(Department):
id = 1
name = "HR"
|
class D1_HR(Department):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,900 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_deepcopy.py
|
test_deepcopy.Config.Setting
|
class Setting(Constant):
data = dict(a=1)
|
class Setting(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
147,901 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_basic.py
|
test_basic.Food.Meat.Pork
|
class Pork(Constant):
id = 1
name = "pork"
|
class Pork(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 3 | 0 | 0 |
147,902 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_basic.py
|
test_basic.Food.Meat.Beef
|
class Beef(Constant):
id = 2
name = "beef"
|
class Beef(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 3 | 0 | 0 |
147,903 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_basic.py
|
test_basic.Food.Fruit.Banana.YellowBanana
|
class YellowBanana(Constant):
id = 1
name = "yellow banana"
|
class YellowBanana(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 3 | 0 | 0 |
147,904 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/_constant2.py
|
constant2._constant2.Food.Fruit.Banana.GreenBanana
|
class GreenBanana:
id = 2
name = "green banana"
|
class GreenBanana:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 0 | 0 | 0 |
147,905 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/six.py
|
constant2.pkg.superjson.pkg.six.Iterator
|
class Iterator(object):
def next(self):
return type(self).__next__(self)
|
class Iterator(object):
def next(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 4 | 1 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
147,906 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/_common.py
|
constant2.pkg.superjson.pkg.dateutil.tz._common._DatetimeWithFold
|
class _DatetimeWithFold(datetime):
"""
This is a class designed to provide a PEP 495-compliant interface for
Python versions before 3.6. It is used only for dates in a fold, so
the ``fold`` attribute is fixed at ``1``.
.. versionadded:: 2.6.0
"""
__slots__ = ()
@property
def fold(self):
return 1
|
class _DatetimeWithFold(datetime):
'''
This is a class designed to provide a PEP 495-compliant interface for
Python versions before 3.6. It is used only for dates in a fold, so
the ``fold`` attribute is fixed at ``1``.
.. versionadded:: 2.6.0
'''
@property
def fold(self):
pass
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 1.2 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 78 | 13 | 2 | 5 | 4 | 2 | 6 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
147,907 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/six.py
|
constant2.pkg.superjson.pkg.six.X
|
class X(object):
def __len__(self):
return 1 << 31
|
class X(object):
def __len__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 4 | 1 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
147,908 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/_constant2.py
|
constant2._constant2.Food.Meat.Pork
|
class Pork:
id = 1
name = "pork"
|
class Pork:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 0 | 0 | 0 |
147,909 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/_constant2.py
|
constant2._constant2._Constant
|
class _Constant(object):
"""Generic Constantant.
Inherit from this class to define a data container class.
all nested Constant class automatically inherit from :class:`Constant`.
"""
__creation_index__ = 0 # Used for sorting
def __init__(self):
"""
.. versionadded:: 0.0.3
"""
for attr, value in self.__class__.Items():
value = deepcopy(value)
setattr(self, attr, value)
for attr, Subclass in self.Subclasses():
value = Subclass()
setattr(self, attr, value)
self.__creation_index__ = Constant.__creation_index__
Constant.__creation_index__ += 1
def __repr__(self):
items_str = ", ".join([
"%s=%r" % (attr, value) for attr, value in self.items()
])
nested_str = ", ".join([
"%s=%r" % (attr, subclass) for attr, subclass in self.subclasses()
])
l = list()
if items_str:
l.append(items_str)
if nested_str:
l.append(nested_str)
return "{classname}({args})".format(
classname=self.__class__.__name__, args=", ".join(l))
@classmethod
def Items(cls):
"""non-class attributes ordered by alphabetical order.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> MyClass.Items()
[("a", 1), ("b", 2)]
.. versionadded:: 0.0.5
"""
l = list()
for attr, value in get_all_attributes(cls):
# if it's not a class(Constant)
if not inspect.isclass(value):
l.append((attr, value))
return list(sorted(l, key=lambda x: x[0]))
def items(self):
"""non-class attributes ordered by alphabetical order.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> my_class = MyClass()
>>> my_class.items()
[("a", 1), ("b", 2)]
.. versionchanged:: 0.0.5
"""
l = list()
# 为什么这里是 get_all_attributes(self.__class__) 而不是
# get_all_attributes(self) ? 因为有些实例不支持
# get_all_attributes(instance) 方法, 会报错。
# 所以我们从类里得到所有的属性信息, 然后获得这些属性在实例中
# 对应的值。
for attr, value in get_all_attributes(self.__class__):
value = getattr(self, attr)
# if it is not a instance of class(Constant)
if not isinstance(value, Constant):
l.append((attr, value))
return list(sorted(l, key=lambda x: x[0]))
def __eq__(self, other):
return self.items() == other.items()
@classmethod
def Keys(cls):
"""All non-class attribute name list.
.. versionadded:: 0.0.5
"""
return [attr for attr, _ in cls.Items()]
def keys(self):
"""All non-class attribute name list.
.. versionchanged:: 0.0.5
"""
return [attr for attr, _ in self.items()]
@classmethod
def Values(cls):
"""All non-class attribute value list.
.. versionadded:: 0.0.5
"""
return [value for _, value in cls.Items()]
def values(self):
"""All non-class attribute value list.
.. versionchanged:: 0.0.5
"""
return [value for _, value in self.items()]
@classmethod
def ToDict(cls):
"""Return regular class variable and it's value as a dictionary data.
.. versionadded:: 0.0.5
"""
return dict(cls.Items())
def to_dict(self):
"""Return regular class variable and it's value as a dictionary data.
.. versionchanged:: 0.0.5
"""
return dict(self.items())
@classmethod
def Subclasses(cls, sort_by=None, reverse=False):
"""Get all nested Constant class and it's name pair.
:param sort_by: the attribute name used for sorting.
:param reverse: if True, return in descend order.
:returns: [(attr, value),...] pairs.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> MyClass.Subclasses()
[("C", MyClass.C), ("D", MyClass.D)]
.. versionadded:: 0.0.3
"""
l = list()
for attr, value in get_all_attributes(cls):
try:
if issubclass(value, Constant):
l.append((attr, value))
except:
pass
if sort_by is None:
sort_by = "__creation_index__"
l = list(
sorted(l, key=lambda x: getattr(x[1], sort_by), reverse=reverse))
return l
def subclasses(self, sort_by=None, reverse=False):
"""Get all nested Constant class instance and it's name pair.
:param sort_by: the attribute name used for sorting.
:param reverse: if True, return in descend order.
:returns: [(attr, value),...] pairs.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> my_class = MyClass()
>>> my_class.subclasses()
[("C", my_class.C), ("D", my_class.D)]
.. versionadded:: 0.0.4
"""
l = list()
for attr, _ in self.Subclasses(sort_by, reverse):
value = getattr(self, attr)
l.append((attr, value))
return l
@classmethod
@lrudecorator(size=64)
def GetFirst(cls, attr, value, e=0.000001, sort_by="__name__"):
"""Get the first nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionadded:: 0.0.5
"""
for _, klass in cls.Subclasses(sort_by=sort_by):
try:
if klass.__dict__[attr] == approx(value, e):
return klass
except:
pass
return None
def get_first(self, attr, value, e=0.000001,
sort_by="__name__", reverse=False):
"""Get the first nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionchanged:: 0.0.5
"""
for _, klass in self.subclasses(sort_by, reverse):
try:
if getattr(klass, attr) == approx(value, e):
return klass
except:
pass
return None
@classmethod
@lrudecorator(size=64)
def GetAll(cls, attr, value, e=0.000001, sort_by="__name__"):
"""Get all nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionadded:: 0.0.5
"""
matched = list()
for _, klass in cls.Subclasses(sort_by=sort_by):
try:
if klass.__dict__[attr] == approx(value, e):
matched.append(klass)
except: # pragma: no cover
pass
return matched
def get_all(self, attr, value, e=0.000001,
sort_by="__name__", reverse=False):
"""Get all nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionchanged:: 0.0.5
"""
matched = list()
for _, klass in self.subclasses(sort_by, reverse):
try:
if getattr(klass, attr) == approx(value, e):
matched.append(klass)
except: # pragma: no cover
pass
return matched
@classmethod
def ToIds(cls, klass_list, id_field="id"):
return [getattr(klass, id_field) for klass in klass_list]
def to_ids(self, instance_list, id_field="id"):
return [getattr(instance, id_field) for instance in instance_list]
@classmethod
def ToClasses(cls, klass_id_list, id_field="id"):
return [cls.GetFirst(id_field, klass_id) for klass_id in klass_id_list]
def to_instances(self, instance_id_list, id_field="id"):
return [self.get_first(id_field, instance_id) for instance_id in instance_id_list]
@classmethod
def SubIds(cls, id_field="id", sort_by=None, reverse=False):
return [
getattr(klass, id_field)
for _, klass in cls.Subclasses(sort_by=sort_by, reverse=reverse)
]
def sub_ids(self, id_field="id", sort_by=None, reverse=False):
return [
getattr(instance, id_field)
for _, instance in self.subclasses(sort_by=sort_by, reverse=reverse)
]
@classmethod
def BackAssign(cls,
other_entity_klass,
this_entity_backpopulate_field,
other_entity_backpopulate_field,
is_many_to_one=False):
"""
Assign defined one side mapping relationship to other side.
For example, each employee belongs to one department, then one department
includes many employees. If you defined each employee's department,
this method will assign employees to ``Department.employees`` field.
This is an one to many (department to employee) example.
Another example would be, each employee has multiple tags. If you defined
tags for each employee, this method will assign employees to
``Tag.employees`` field. This is and many to many (employee to tag) example.
Support:
- many to many mapping
- one to many mapping
:param other_entity_klass: a :class:`Constant` class.
:param this_entity_backpopulate_field: str
:param other_entity_backpopulate_field: str
:param is_many_to_one: bool
:return:
"""
data = dict()
for _, other_klass in other_entity_klass.Subclasses():
other_field_value = getattr(
other_klass, this_entity_backpopulate_field)
if isinstance(other_field_value, (tuple, list)):
for self_klass in other_field_value:
self_key = self_klass.__name__
try:
data[self_key].append(other_klass)
except KeyError:
data[self_key] = [other_klass, ]
else:
if other_field_value is not None:
self_klass = other_field_value
self_key = self_klass.__name__
try:
data[self_key].append(other_klass)
except KeyError:
data[self_key] = [other_klass, ]
if is_many_to_one:
new_data = dict()
for key, value in data.items():
try:
new_data[key] = value[0]
except: # pragma: no cover
pass
data = new_data
for self_key, other_klass_list in data.items():
setattr(getattr(cls, self_key),
other_entity_backpopulate_field, other_klass_list)
@classmethod
def dump(cls):
"""Dump data into a dict.
.. versionadded:: 0.0.2
"""
d = OrderedDict(cls.Items())
d["__classname__"] = cls.__name__
for attr, klass in cls.Subclasses():
d[attr] = klass.dump()
return OrderedDict([(cls.__name__, d)])
@classmethod
def load(cls, data):
"""Construct a Constant class from it's dict data.
.. versionadded:: 0.0.2
"""
if len(data) == 1:
for key, value in data.items():
if "__classname__" not in value: # pragma: no cover
raise ValueError
name = key
bases = (Constant,)
attrs = dict()
for k, v in value.items():
if isinstance(v, dict):
if "__classname__" in v:
attrs[k] = cls.load({k: v})
else:
attrs[k] = v
else:
attrs[k] = v
return type(name, bases, attrs)
else: # pragma: no cover
raise ValueError
@classmethod
def pprint(cls): # pragma: no cover
"""Pretty print it's data.
.. versionadded:: 0.0.2
"""
pprint(cls.dump())
@classmethod
def jprint(cls): # pragma: no cover
"""Json print it's data.
.. versionadded:: 0.0.2
"""
print(json.dumps(cls.dump(), pretty=4))
|
class _Constant(object):
'''Generic Constantant.
Inherit from this class to define a data container class.
all nested Constant class automatically inherit from :class:`Constant`.
'''
def __init__(self):
'''
.. versionadded:: 0.0.3
'''
pass
def __repr__(self):
pass
@classmethod
def Items(cls):
'''non-class attributes ordered by alphabetical order.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> MyClass.Items()
[("a", 1), ("b", 2)]
.. versionadded:: 0.0.5
'''
pass
def items(self):
'''non-class attributes ordered by alphabetical order.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> my_class = MyClass()
>>> my_class.items()
[("a", 1), ("b", 2)]
.. versionchanged:: 0.0.5
'''
pass
def __eq__(self, other):
pass
@classmethod
def Keys(cls):
'''All non-class attribute name list.
.. versionadded:: 0.0.5
'''
pass
def keys(self):
'''All non-class attribute name list.
.. versionchanged:: 0.0.5
'''
pass
@classmethod
def Values(cls):
'''All non-class attribute value list.
.. versionadded:: 0.0.5
'''
pass
def values(self):
'''All non-class attribute value list.
.. versionchanged:: 0.0.5
'''
pass
@classmethod
def ToDict(cls):
'''Return regular class variable and it's value as a dictionary data.
.. versionadded:: 0.0.5
'''
pass
def to_dict(self):
'''Return regular class variable and it's value as a dictionary data.
.. versionchanged:: 0.0.5
'''
pass
@classmethod
def Subclasses(cls, sort_by=None, reverse=False):
'''Get all nested Constant class and it's name pair.
:param sort_by: the attribute name used for sorting.
:param reverse: if True, return in descend order.
:returns: [(attr, value),...] pairs.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> MyClass.Subclasses()
[("C", MyClass.C), ("D", MyClass.D)]
.. versionadded:: 0.0.3
'''
pass
def subclasses(self, sort_by=None, reverse=False):
'''Get all nested Constant class instance and it's name pair.
:param sort_by: the attribute name used for sorting.
:param reverse: if True, return in descend order.
:returns: [(attr, value),...] pairs.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> my_class = MyClass()
>>> my_class.subclasses()
[("C", my_class.C), ("D", my_class.D)]
.. versionadded:: 0.0.4
'''
pass
@classmethod
@lrudecorator(size=64)
def GetFirst(cls, attr, value, e=0.000001, sort_by="__name__"):
'''Get the first nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionadded:: 0.0.5
'''
pass
def get_first(self, attr, value, e=0.000001,
sort_by="__name__", reverse=False):
'''Get the first nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionchanged:: 0.0.5
'''
pass
@classmethod
@lrudecorator(size=64)
def GetAll(cls, attr, value, e=0.000001, sort_by="__name__"):
'''Get all nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionadded:: 0.0.5
'''
pass
def get_all(self, attr, value, e=0.000001,
sort_by="__name__", reverse=False):
'''Get all nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionchanged:: 0.0.5
'''
pass
@classmethod
def ToIds(cls, klass_list, id_field="id"):
pass
def to_ids(self, instance_list, id_field="id"):
pass
@classmethod
def ToClasses(cls, klass_id_list, id_field="id"):
pass
def to_instances(self, instance_id_list, id_field="id"):
pass
@classmethod
def SubIds(cls, id_field="id", sort_by=None, reverse=False):
pass
def sub_ids(self, id_field="id", sort_by=None, reverse=False):
pass
@classmethod
def BackAssign(cls,
other_entity_klass,
this_entity_backpopulate_field,
other_entity_backpopulate_field,
is_many_to_one=False):
'''
Assign defined one side mapping relationship to other side.
For example, each employee belongs to one department, then one department
includes many employees. If you defined each employee's department,
this method will assign employees to ``Department.employees`` field.
This is an one to many (department to employee) example.
Another example would be, each employee has multiple tags. If you defined
tags for each employee, this method will assign employees to
``Tag.employees`` field. This is and many to many (employee to tag) example.
Support:
- many to many mapping
- one to many mapping
:param other_entity_klass: a :class:`Constant` class.
:param this_entity_backpopulate_field: str
:param other_entity_backpopulate_field: str
:param is_many_to_one: bool
:return:
'''
pass
@classmethod
def dump(cls):
'''Dump data into a dict.
.. versionadded:: 0.0.2
'''
pass
@classmethod
def load(cls, data):
'''Construct a Constant class from it's dict data.
.. versionadded:: 0.0.2
'''
pass
@classmethod
def pprint(cls):
'''Pretty print it's data.
.. versionadded:: 0.0.2
'''
pass
@classmethod
def jprint(cls):
'''Json print it's data.
.. versionadded:: 0.0.2
'''
pass
| 46 | 21 | 14 | 2 | 7 | 6 | 3 | 0.81 | 1 | 9 | 2 | 1 | 13 | 0 | 28 | 28 | 453 | 89 | 206 | 92 | 154 | 166 | 165 | 65 | 136 | 11 | 1 | 5 | 70 |
147,910 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/parser.py
|
constant2.pkg.superjson.pkg.dateutil.parser._tzparser._result
|
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
|
class _result(_resultbase):
class _attr(_resultbase):
def __repr__(self):
pass
def __init__(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 2 | 2 | 6 | 16 | 4 | 12 | 8 | 8 | 0 | 10 | 8 | 6 | 1 | 2 | 0 | 2 |
147,911 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/parser.py
|
constant2.pkg.superjson.pkg.dateutil.parser._tzparser._result._attr
|
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
|
class _attr(_resultbase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 3 | 0 | 3 | 2 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
147,912 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/parser.py
|
constant2.pkg.superjson.pkg.dateutil.parser.parser._result
|
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset", "ampm"]
|
class _result(_resultbase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 4 | 0 | 4 | 2 | 3 | 0 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
147,913 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/rrule.py
|
constant2.pkg.superjson.pkg.dateutil.rrule.rruleset._genitem
|
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
if self.genlist[0] is self:
heapq.heappop(self.genlist)
else:
self.genlist.remove(self)
heapq.heapify(self.genlist)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
|
class _genitem(object):
def __init__(self, genlist, gen):
pass
def __next__(self):
pass
def __lt__(self, other):
pass
def __gt__(self, other):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
| 7 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 6 | 3 | 6 | 6 | 33 | 6 | 27 | 10 | 20 | 0 | 26 | 10 | 19 | 3 | 1 | 2 | 9 |
147,914 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.TagEntity.T3_Python
|
class T3_Python(Tag):
id = 3
name = "Python"
|
class T3_Python(Tag):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,915 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.TagEntity.T4_Java
|
class T4_Java(Tag):
id = 4
name = "Java"
|
class T4_Java(Tag):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,916 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_get_all.py
|
test_get_all.Item.Armor
|
class Armor(Constant):
id = 2
name = "armor"
weight = 10
|
class Armor(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 3 | 0 | 0 |
147,917 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/tests/test_inherit.py
|
test_inherit.FileType.Image
|
class Image(File):
id = 1
ext = ".jpg"
|
class Image(File):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 4 | 0 | 0 |
147,918 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/_constant2.py
|
constant2._constant2.Food.Fruit.Banana.YellowBanana
|
class YellowBanana:
id = 1
name = "yellow banana"
|
class YellowBanana:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 0 | 0 | 0 |
147,919 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz.tzical
|
class tzical(object):
"""
This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
as set out in `RFC 2445`_ Section 4.6.5 into one or more `tzinfo` objects.
:param `fileobj`:
A file or stream in iCalendar format, which should be UTF-8 encoded
with CRLF endings.
.. _`RFC 2445`: https://www.ietf.org/rfc/rfc2445.txt
"""
def __init__(self, fileobj):
global rrule
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
# ical should be encoded in UTF-8 with CRLF
fileobj = open(fileobj, 'r')
else:
self._s = getattr(fileobj, 'name', repr(fileobj))
fileobj = _ContextWrapper(fileobj)
self._vtz = {}
with fileobj as fobj:
self._parse_rfc(fobj.read())
def keys(self):
"""
Retrieves the available time zones as a list.
"""
return list(self._vtz.keys())
def get(self, tzid=None):
"""
Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
:param tzid:
If there is exactly one time zone available, omitting ``tzid``
or passing :py:const:`None` value returns it. Otherwise a valid
key (which can be retrieved from :func:`keys`) is required.
:raises ValueError:
Raised if ``tzid`` is not specified but there are either more
or fewer than 1 zone defined.
:returns:
Returns either a :py:class:`datetime.tzinfo` object representing
the relevant time zone or :py:const:`None` if the ``tzid`` was
not found.
"""
if tzid is None:
if len(self._vtz) == 0:
raise ValueError("no timezones defined")
elif len(self._vtz) > 1:
raise ValueError("more than one timezone available")
tzid = next(iter(self._vtz))
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0] == '+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal
elif len(s) == 6:
return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal
else:
raise ValueError("invalid offset: " + s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i - 1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: " + value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError(
"component not closed: " + comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError(
"at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError(
"mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: " + value)
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError(
"unsupported %s parm: %s " % (name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError(
"unsupported TZOFFSETTO parm: " + parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError(
"unsupported TZNAME parm: " + parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: " + name)
else:
if name == "TZID":
if parms:
raise ValueError(
"unsupported TZID parm: " + parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: " + name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
|
class tzical(object):
'''
This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
as set out in `RFC 2445`_ Section 4.6.5 into one or more `tzinfo` objects.
:param `fileobj`:
A file or stream in iCalendar format, which should be UTF-8 encoded
with CRLF endings.
.. _`RFC 2445`: https://www.ietf.org/rfc/rfc2445.txt
'''
def __init__(self, fileobj):
pass
def keys(self):
'''
Retrieves the available time zones as a list.
'''
pass
def get(self, tzid=None):
'''
Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
:param tzid:
If there is exactly one time zone available, omitting ``tzid``
or passing :py:const:`None` value returns it. Otherwise a valid
key (which can be retrieved from :func:`keys`) is required.
:raises ValueError:
Raised if ``tzid`` is not specified but there are either more
or fewer than 1 zone defined.
:returns:
Returns either a :py:class:`datetime.tzinfo` object representing
the relevant time zone or :py:const:`None` if the ``tzid`` was
not found.
'''
pass
def _parse_offset(self, s):
pass
def _parse_rfc(self, s):
pass
def __repr__(self):
pass
| 7 | 3 | 30 | 2 | 25 | 4 | 8 | 0.2 | 1 | 6 | 3 | 0 | 6 | 2 | 6 | 6 | 199 | 17 | 152 | 29 | 143 | 30 | 117 | 28 | 108 | 35 | 1 | 5 | 48 |
147,920 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/_constant2.py
|
constant2._constant2.Constant
|
class Constant(_Constant):
pass
|
class Constant(_Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 29 | 0 | 0 | 0 | 28 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 0 |
147,921 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/_superjson.py
|
constant2.pkg.superjson._superjson.Meta
|
class Meta(type):
def __new__(cls, name, bases, attrs):
klass = super(Meta, cls).__new__(cls, name, bases, attrs)
_dumpers = dict()
_loaders = dict()
for base in inspect.getmro(klass):
for attr, value in base.__dict__.items():
dumper_warning_message = WARN_MSG.format(
attr=attr,
method_type="dumper",
obj_or_dct="obj",
dump_or_load="dump",
)
loader_warning_message = WARN_MSG.format(
attr=attr,
method_type="loader",
obj_or_dct="dct",
dump_or_load="load",
)
# find dumper method,
if attr.startswith("dump_"):
try:
if is_dumper_method(value):
class_name = get_class_name_from_dumper_loader_method(
value)
_dumpers[class_name] = value
else:
logger.warning(dumper_warning_message)
except TypeError:
logger.warning(dumper_warning_message)
# find loader method
if attr.startswith("load_"):
try:
if is_loader_method(value):
class_name = get_class_name_from_dumper_loader_method(
value)
_loaders[class_name] = value
else:
logger.warning(loader_warning_message)
except TypeError:
logger.warning(loader_warning_message)
klass._dumpers = _dumpers
klass._loaders = _loaders
return klass
|
class Meta(type):
def __new__(cls, name, bases, attrs):
pass
| 2 | 0 | 49 | 6 | 41 | 2 | 9 | 0.05 | 1 | 3 | 0 | 0 | 1 | 0 | 1 | 14 | 51 | 7 | 42 | 10 | 40 | 2 | 28 | 10 | 26 | 9 | 2 | 5 | 9 |
147,922 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/pytest.py
|
constant2.pkg.pytest.approx
|
class approx(object):
"""
Assert that two numbers (or two sets of numbers) are equal to each other
within some tolerance.
Due to the `intricacies of floating-point arithmetic`__, numbers that we
would intuitively expect to be equal are not always so::
>>> 0.1 + 0.2 == 0.3
False
__ https://docs.python.org/3/tutorial/floatingpoint.html
This problem is commonly encountered when writing tests, e.g. when making
sure that floating-point values are what you expect them to be. One way to
deal with this problem is to assert that two floating-point numbers are
equal to within some appropriate tolerance::
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
True
However, comparisons like this are tedious to write and difficult to
understand. Furthermore, absolute comparisons like the one above are
usually discouraged because there's no tolerance that works well for all
situations. ``1e-6`` is good for numbers around ``1``, but too small for
very big numbers and too big for very small ones. It's better to express
the tolerance as a fraction of the expected value, but relative comparisons
like that are even more difficult to write correctly and concisely.
The ``approx`` class performs floating-point comparisons using a syntax
that's as intuitive as possible::
>>> from pytest import approx
>>> 0.1 + 0.2 == approx(0.3)
True
The same syntax also works on sequences of numbers::
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
True
By default, ``approx`` considers numbers within a relative tolerance of
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
This treatment would lead to surprising results if the expected value was
``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
To handle this case less surprisingly, ``approx`` also considers numbers
within an absolute tolerance of ``1e-12`` of its expected value to be
equal. Infinite numbers are another special case. They are only
considered equal to themselves, regardless of the relative tolerance. Both
the relative and absolute tolerances can be changed by passing arguments to
the ``approx`` constructor::
>>> 1.0001 == approx(1)
False
>>> 1.0001 == approx(1, rel=1e-3)
True
>>> 1.0001 == approx(1, abs=1e-3)
True
If you specify ``abs`` but not ``rel``, the comparison will not consider
the relative tolerance at all. In other words, two numbers that are within
the default relative tolerance of ``1e-6`` will still be considered unequal
if they exceed the specified absolute tolerance. If you specify both
``abs`` and ``rel``, the numbers will be considered equal if either
tolerance is met::
>>> 1 + 1e-8 == approx(1)
True
>>> 1 + 1e-8 == approx(1, abs=1e-12)
False
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
True
If you're thinking about using ``approx``, then you might want to know how
it compares to other good ways of comparing floating-point numbers. All of
these algorithms are based on relative and absolute tolerances and should
agree for the most part, but they do have meaningful differences:
- ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
tolerance is met. Because the relative tolerance is calculated w.r.t.
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
``b`` is a "reference value"). You have to specify an absolute tolerance
if you want to compare to ``0.0`` because there is no tolerance by
default. Only available in python>=3.5. `More information...`__
__ https://docs.python.org/3/library/math.html#math.isclose
- ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
between ``a`` and ``b`` is less that the sum of the relative tolerance
w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
is only calculated w.r.t. ``b``, this test is asymmetric and you can
think of ``b`` as the reference value. Support for comparing sequences
is provided by ``numpy.allclose``. `More information...`__
__ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
are within an absolute tolerance of ``1e-7``. No relative tolerance is
considered and the absolute tolerance cannot be changed, so this function
is not appropriate for very large or very small numbers. Also, it's only
available in subclasses of ``unittest.TestCase`` and it's ugly because it
doesn't follow PEP8. `More information...`__
__ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
- ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
Because the relative tolerance is only calculated w.r.t. ``b``, this test
is asymmetric and you can think of ``b`` as the reference value. In the
special case that you explicitly specify an absolute tolerance but not a
relative tolerance, only the absolute tolerance is considered.
"""
def __init__(self, expected, rel=None, abs=None):
self.expected = expected
self.abs = abs
self.rel = rel
def __repr__(self):
return ', '.join(repr(x) for x in self.expected)
def __eq__(self, actual):
from collections import Iterable
if not isinstance(actual, Iterable):
actual = [actual]
if len(actual) != len(self.expected):
return False
return all(a == x for a, x in zip(actual, self.expected))
__hash__ = None
def __ne__(self, actual):
return not (actual == self)
@property
def expected(self):
# Regardless of whether the user-specified expected value is a number
# or a sequence of numbers, return a list of ApproxNotIterable objects
# that can be compared against.
from collections import Iterable
def approx_non_iter(x): return ApproxNonIterable(x, self.rel, self.abs)
if isinstance(self._expected, Iterable):
return [approx_non_iter(x) for x in self._expected]
else:
return [approx_non_iter(self._expected)]
@expected.setter
def expected(self, expected):
self._expected = expected
|
class approx(object):
'''
Assert that two numbers (or two sets of numbers) are equal to each other
within some tolerance.
Due to the `intricacies of floating-point arithmetic`__, numbers that we
would intuitively expect to be equal are not always so::
>>> 0.1 + 0.2 == 0.3
False
__ https://docs.python.org/3/tutorial/floatingpoint.html
This problem is commonly encountered when writing tests, e.g. when making
sure that floating-point values are what you expect them to be. One way to
deal with this problem is to assert that two floating-point numbers are
equal to within some appropriate tolerance::
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
True
However, comparisons like this are tedious to write and difficult to
understand. Furthermore, absolute comparisons like the one above are
usually discouraged because there's no tolerance that works well for all
situations. ``1e-6`` is good for numbers around ``1``, but too small for
very big numbers and too big for very small ones. It's better to express
the tolerance as a fraction of the expected value, but relative comparisons
like that are even more difficult to write correctly and concisely.
The ``approx`` class performs floating-point comparisons using a syntax
that's as intuitive as possible::
>>> from pytest import approx
>>> 0.1 + 0.2 == approx(0.3)
True
The same syntax also works on sequences of numbers::
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
True
By default, ``approx`` considers numbers within a relative tolerance of
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
This treatment would lead to surprising results if the expected value was
``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
To handle this case less surprisingly, ``approx`` also considers numbers
within an absolute tolerance of ``1e-12`` of its expected value to be
equal. Infinite numbers are another special case. They are only
considered equal to themselves, regardless of the relative tolerance. Both
the relative and absolute tolerances can be changed by passing arguments to
the ``approx`` constructor::
>>> 1.0001 == approx(1)
False
>>> 1.0001 == approx(1, rel=1e-3)
True
>>> 1.0001 == approx(1, abs=1e-3)
True
If you specify ``abs`` but not ``rel``, the comparison will not consider
the relative tolerance at all. In other words, two numbers that are within
the default relative tolerance of ``1e-6`` will still be considered unequal
if they exceed the specified absolute tolerance. If you specify both
``abs`` and ``rel``, the numbers will be considered equal if either
tolerance is met::
>>> 1 + 1e-8 == approx(1)
True
>>> 1 + 1e-8 == approx(1, abs=1e-12)
False
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
True
If you're thinking about using ``approx``, then you might want to know how
it compares to other good ways of comparing floating-point numbers. All of
these algorithms are based on relative and absolute tolerances and should
agree for the most part, but they do have meaningful differences:
- ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
tolerance is met. Because the relative tolerance is calculated w.r.t.
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
``b`` is a "reference value"). You have to specify an absolute tolerance
if you want to compare to ``0.0`` because there is no tolerance by
default. Only available in python>=3.5. `More information...`__
__ https://docs.python.org/3/library/math.html#math.isclose
- ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
between ``a`` and ``b`` is less that the sum of the relative tolerance
w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
is only calculated w.r.t. ``b``, this test is asymmetric and you can
think of ``b`` as the reference value. Support for comparing sequences
is provided by ``numpy.allclose``. `More information...`__
__ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
are within an absolute tolerance of ``1e-7``. No relative tolerance is
considered and the absolute tolerance cannot be changed, so this function
is not appropriate for very large or very small numbers. Also, it's only
available in subclasses of ``unittest.TestCase`` and it's ugly because it
doesn't follow PEP8. `More information...`__
__ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
- ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
Because the relative tolerance is only calculated w.r.t. ``b``, this test
is asymmetric and you can think of ``b`` as the reference value. In the
special case that you explicitly specify an absolute tolerance but not a
relative tolerance, only the absolute tolerance is considered.
'''
def __init__(self, expected, rel=None, abs=None):
pass
def __repr__(self):
pass
def __eq__(self, actual):
pass
def __ne__(self, actual):
pass
@property
def expected(self):
pass
def approx_non_iter(x):
pass
@expected.setter
def expected(self):
pass
| 10 | 1 | 4 | 0 | 4 | 0 | 1 | 3.32 | 1 | 2 | 1 | 0 | 6 | 3 | 6 | 6 | 151 | 30 | 28 | 16 | 17 | 93 | 26 | 14 | 16 | 3 | 1 | 1 | 10 |
147,923 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/pylru.py
|
constant2.pkg.pylru.lrudecorator
|
class lrudecorator(object):
def __init__(self, size):
self.cache = lrucache(size)
def __call__(self, func):
def wrapper(*args, **kwargs):
kwtuple = tuple((key, kwargs[key])
for key in sorted(kwargs.keys()))
key = (args, kwtuple)
try:
return self.cache[key]
except KeyError:
pass
value = func(*args, **kwargs)
self.cache[key] = value
return value
wrapper.cache = self.cache
wrapper.size = self.cache.size
wrapper.clear = self.cache.clear
return functools.update_wrapper(wrapper, func)
|
class lrudecorator(object):
def __init__(self, size):
pass
def __call__(self, func):
pass
def wrapper(*args, **kwargs):
pass
| 4 | 0 | 11 | 1 | 10 | 0 | 1 | 0 | 1 | 3 | 1 | 0 | 2 | 1 | 2 | 2 | 22 | 3 | 19 | 7 | 15 | 0 | 18 | 7 | 14 | 2 | 1 | 1 | 4 |
147,924 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/pylru.py
|
constant2.pkg.pylru.lrucache
|
class lrucache(object):
def __init__(self, size, callback=None):
self.callback = callback
# Create an empty hash table.
self.table = {}
# Initialize the doubly linked list with one empty node. This is an
# invariant. The cache size must always be greater than zero. Each
# node has a 'prev' and 'next' variable to hold the node that comes
# before it and after it respectively. Initially the two variables
# each point to the head node itself, creating a circular doubly
# linked list of size one. Then the size() method is used to adjust
# the list to the desired size.
self.head = _dlnode()
self.head.next = self.head
self.head.prev = self.head
self.listSize = 1
# Adjust the size
self.size(size)
def __len__(self):
return len(self.table)
def clear(self):
for node in self.dli():
node.empty = True
node.key = None
node.value = None
self.table.clear()
def __contains__(self, key):
return key in self.table
# Looks up a value in the cache without affecting cache order.
def peek(self, key):
# Look up the node
node = self.table[key]
return node.value
def __getitem__(self, key):
# Look up the node
node = self.table[key]
# Update the list ordering. Move this node so that is directly
# proceeds the head node. Then set the 'head' variable to it. This
# makes it the new head of the list.
self.mtf(node)
self.head = node
# Return the value.
return node.value
def get(self, key, default=None):
"""Get an item - return default (None) if not present"""
try:
return self[key]
except KeyError:
return default
def __setitem__(self, key, value):
# First, see if any value is stored under 'key' in the cache already.
# If so we are going to replace that value with the new one.
if key in self.table:
# Lookup the node
node = self.table[key]
# Replace the value.
node.value = value
# Update the list ordering.
self.mtf(node)
self.head = node
return
# Ok, no value is currently stored under 'key' in the cache. We need
# to choose a node to place the new item in. There are two cases. If
# the cache is full some item will have to be pushed out of the
# cache. We want to choose the node with the least recently used
# item. This is the node at the tail of the list. If the cache is not
# full we want to choose a node that is empty. Because of the way the
# list is managed, the empty nodes are always together at the tail
# end of the list. Thus, in either case, by chooseing the node at the
# tail of the list our conditions are satisfied.
# Since the list is circular, the tail node directly preceeds the
# 'head' node.
node = self.head.prev
# If the node already contains something we need to remove the old
# key from the dictionary.
if not node.empty:
if self.callback is not None:
self.callback(node.key, node.value)
del self.table[node.key]
# Place the new key and value in the node
node.empty = False
node.key = key
node.value = value
# Add the node to the dictionary under the new key.
self.table[key] = node
# We need to move the node to the head of the list. The node is the
# tail node, so it directly preceeds the head node due to the list
# being circular. Therefore, the ordering is already correct, we just
# need to adjust the 'head' variable.
self.head = node
def __delitem__(self, key):
# Lookup the node, then remove it from the hash table.
node = self.table[key]
del self.table[key]
node.empty = True
# Not strictly necessary.
node.key = None
node.value = None
# Because this node is now empty we want to reuse it before any
# non-empty node. To do that we want to move it to the tail of the
# list. We move it so that it directly preceeds the 'head' node. This
# makes it the tail node. The 'head' is then adjusted. This
# adjustment ensures correctness even for the case where the 'node'
# is the 'head' node.
self.mtf(node)
self.head = node.next
def __iter__(self):
# Return an iterator that returns the keys in the cache in order from
# the most recently to least recently used. Does not modify the cache
# order.
for node in self.dli():
yield node.key
def items(self):
# Return an iterator that returns the (key, value) pairs in the cache
# in order from the most recently to least recently used. Does not
# modify the cache order.
for node in self.dli():
yield (node.key, node.value)
def keys(self):
# Return an iterator that returns the keys in the cache in order from
# the most recently to least recently used. Does not modify the cache
# order.
for node in self.dli():
yield node.key
def values(self):
# Return an iterator that returns the values in the cache in order
# from the most recently to least recently used. Does not modify the
# cache order.
for node in self.dli():
yield node.value
def size(self, size=None):
if size is not None:
assert size > 0
if size > self.listSize:
self.addTailNode(size - self.listSize)
elif size < self.listSize:
self.removeTailNode(self.listSize - size)
return self.listSize
# Increases the size of the cache by inserting n empty nodes at the tail
# of the list.
def addTailNode(self, n):
for i in range(n):
node = _dlnode()
node.next = self.head
node.prev = self.head.prev
self.head.prev.next = node
self.head.prev = node
self.listSize += n
# Decreases the size of the list by removing n nodes from the tail of the
# list.
def removeTailNode(self, n):
assert self.listSize > n
for i in range(n):
node = self.head.prev
if not node.empty:
if self.callback is not None:
self.callback(node.key, node.value)
del self.table[node.key]
# Splice the tail node out of the list
self.head.prev = node.prev
node.prev.next = self.head
# The next four lines are not strictly necessary.
node.prev = None
node.next = None
node.key = None
node.value = None
self.listSize -= n
# This method adjusts the ordering of the doubly linked list so that
# 'node' directly precedes the 'head' node. Because of the order of
# operations, if 'node' already directly precedes the 'head' node or if
# 'node' is the 'head' node the order of the list will be unchanged.
def mtf(self, node):
node.prev.next = node.next
node.next.prev = node.prev
node.prev = self.head.prev
node.next = self.head.prev.next
node.next.prev = node
node.prev.next = node
# This method returns an iterator that iterates over the non-empty nodes
# in the doubly linked list in order from the most recently to the least
# recently used.
def dli(self):
node = self.head
for i in range(len(self.table)):
yield node
node = node.next
|
class lrucache(object):
def __init__(self, size, callback=None):
pass
def __len__(self):
pass
def clear(self):
pass
def __contains__(self, key):
pass
def peek(self, key):
pass
def __getitem__(self, key):
pass
def get(self, key, default=None):
'''Get an item - return default (None) if not present'''
pass
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
def __iter__(self):
pass
def items(self):
pass
def keys(self):
pass
def values(self):
pass
def size(self, size=None):
pass
def addTailNode(self, n):
pass
def removeTailNode(self, n):
pass
def mtf(self, node):
pass
def dli(self):
pass
| 19 | 1 | 12 | 2 | 6 | 3 | 2 | 0.66 | 1 | 3 | 1 | 0 | 18 | 4 | 18 | 18 | 241 | 55 | 112 | 38 | 93 | 74 | 111 | 38 | 92 | 4 | 1 | 3 | 35 |
147,925 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/tests/test_inherit.py
|
test_inherit.TestFileType
|
class TestFileType(object):
def test(self):
assert FileType.Image.Items() == [
("ext", ".jpg"), ("id", 1), ("type", "File")
]
|
class TestFileType(object):
def test(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 2 | 2 | 0 | 1 | 0 | 1 | 1 | 5 | 0 | 5 | 2 | 3 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
147,926 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/tests/test_inherit.py
|
test_inherit.File
|
class File(Constant):
id = None
ext = None
type = "File"
|
class File(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 3 | 0 | 0 | 0 | 28 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 3 | 0 | 0 |
147,927 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/tests/test_get_all.py
|
test_get_all.TestItem
|
class TestItem(object):
"""
**中文文档**
测试GetAll的排序功能是否有效。
"""
def test_GetAll(self):
assert Item.GetAll("weight", 10, sort_by="name") == [
Item.Armor, Item.Weapon,
]
def test_get_all(self):
assert item.get_all("weight", 10, sort_by="name") == [
item.Armor, item.Weapon,
]
|
class TestItem(object):
'''
**中文文档**
测试GetAll的排序功能是否有效。
'''
def test_GetAll(self):
pass
def test_get_all(self):
pass
| 3 | 1 | 4 | 0 | 4 | 0 | 1 | 0.44 | 1 | 3 | 3 | 0 | 2 | 0 | 2 | 2 | 17 | 4 | 9 | 3 | 6 | 4 | 5 | 3 | 2 | 1 | 1 | 0 | 2 |
147,928 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.TestDesignPattern
|
class TestDesignPattern(object):
def test_BackAssign(self):
# Employee vs Department
assert EmployeeEntity.E1_Alice.department == DepartmentEntity.D1_HR
assert EmployeeEntity.E2_Bob.department == None
assert EmployeeEntity.E3_Cathy.department == DepartmentEntity.D2_IT
assert DepartmentEntity.D1_HR.employees == [EmployeeEntity.E1_Alice, ]
assert DepartmentEntity.D2_IT.employees == [EmployeeEntity.E3_Cathy]
# Employee vs Tag
assert EmployeeEntity.E1_Alice.tags == [TagEntity.T2_Senior, ]
assert EmployeeEntity.E2_Bob.tags == [
TagEntity.T1_Junior, TagEntity.T3_Python]
assert EmployeeEntity.E3_Cathy.tags == [
TagEntity.T2_Senior, TagEntity.T3_Python, TagEntity.T4_Java]
assert TagEntity.T1_Junior.employees == [EmployeeEntity.E2_Bob, ]
assert TagEntity.T2_Senior.employees == [
EmployeeEntity.E1_Alice, EmployeeEntity.E3_Cathy]
assert TagEntity.T3_Python.employees == [
EmployeeEntity.E2_Bob, EmployeeEntity.E3_Cathy]
assert TagEntity.T4_Java.employees == [EmployeeEntity.E3_Cathy, ]
|
class TestDesignPattern(object):
def test_BackAssign(self):
pass
| 2 | 0 | 22 | 3 | 17 | 2 | 1 | 0.11 | 1 | 12 | 12 | 0 | 1 | 0 | 1 | 1 | 23 | 3 | 18 | 2 | 16 | 2 | 14 | 2 | 12 | 1 | 1 | 0 | 1 |
147,929 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.Tag
|
class Tag(Constant):
id = None
name = None
employees = list()
|
class Tag(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 4 | 0 | 0 | 0 | 28 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 3 | 0 | 0 |
147,930 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.Employee
|
class Employee(Constant):
id = None
name = None
department = None
tags = list()
|
class Employee(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 3 | 0 | 0 | 0 | 28 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 3 | 0 | 0 |
147,931 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/_superjson.py
|
constant2.pkg.superjson._superjson.SuperJson
|
class SuperJson(object):
"""A extensable json encoder/decoder. You can easily custom converter for
any types.
"""
_dumpers = dict()
_loaders = dict()
def _dump(self, obj):
"""Dump single object to json serializable value.
"""
class_name = get_class_name(obj)
if class_name in self._dumpers:
return self._dumpers[class_name](self, obj)
raise TypeError("%r is not JSON serializable" % obj)
def _json_convert(self, obj):
"""Recursive helper method that converts dict types to standard library
json serializable types, so they can be converted into json.
"""
# OrderedDict
if isinstance(obj, OrderedDict):
try:
return self._dump(obj)
except TypeError:
return {k: self._json_convert(v) for k, v in iteritems(obj)}
# nested dict
elif isinstance(obj, dict):
return {k: self._json_convert(v) for k, v in iteritems(obj)}
# list or tuple
elif isinstance(obj, (list, tuple)):
return list((self._json_convert(v) for v in obj))
# float
elif isinstance(obj, float):
return float(json.encoder.FLOAT_REPR(obj))
# single object
try:
return self._dump(obj)
except TypeError:
return obj
def _object_hook1(self, dct):
"""A function can convert dict data into object.
it's an O(1) implementation.
"""
# {"$class_name": obj_data}
if len(dct) == 1:
for key, value in iteritems(dct):
class_name = key[1:]
if class_name in self._loaders:
return self._loaders[class_name](self, dct)
return dct
return dct
def _object_hook2(self, dct): # pragma: no cover
"""Another object hook implementation.
it's an O(N) implementation.
"""
for class_name, loader in self._loaders.items():
if ("$" + class_name) in dct:
return loader(self, dct)
return dct
def dumps(self, obj,
indent=None,
sort_keys=None,
pretty=False,
float_precision=None,
ensure_ascii=True,
compress=False,
**kwargs):
"""Dump any object into json string.
:param pretty: if True, dump json into pretty indent and sorted key
format.
:type pretty: bool
:param float_precision: default ``None``, limit floats to
N-decimal points.
:type float_precision: integer
:param compress: default ``False. If True, then compress encoded string.
:type compress: bool
"""
if pretty:
indent = 4
sort_keys = True
if float_precision is None:
json.encoder.FLOAT_REPR = repr
else:
json.encoder.FLOAT_REPR = lambda x: format(
x, ".%sf" % float_precision)
s = json.dumps(
self._json_convert(obj),
indent=indent,
sort_keys=sort_keys,
ensure_ascii=ensure_ascii,
**kwargs
)
if compress:
s = compresslib.compress(s, return_type="str")
return s
def loads(self, s,
object_hook=None,
decompress=False,
ignore_comments=False,
**kwargs):
"""load object from json encoded string.
:param decompress: default ``False. If True, then decompress string.
:type decompress: bool
:param ignore_comments: default ``False. If True, then ignore comments.
:type ignore_comments: bool
"""
if decompress:
s = compresslib.decompress(s, return_type="str")
if ignore_comments:
s = strip_comments(s)
if object_hook is None:
object_hook = self._object_hook1
if "object_pairs_hook" in kwargs:
del kwargs["object_pairs_hook"]
obj = json.loads(
s,
object_hook=object_hook,
object_pairs_hook=None,
**kwargs
)
return obj
def dump(self, obj,
abspath,
indent=None,
sort_keys=None,
pretty=False,
float_precision=None,
ensure_ascii=True,
overwrite=False,
verbose=True,
**kwargs):
"""Dump any object into file.
:param abspath: if ``*.json, *.js** then do regular dump. if ``*.gz``,
then perform compression.
:type abspath: str
:param pretty: if True, dump json into pretty indent and sorted key
format.
:type pretty: bool
:param float_precision: default ``None``, limit floats to
N-decimal points.
:type float_precision: integer
:param overwrite: default ``False``, If ``True``, when you dump to
existing file, it silently overwrite it. If ``False``, an alert
message is shown. Default setting ``False`` is to prevent overwrite
file by mistake.
:type overwrite: boolean
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
"""
prt_console("\nDump to '%s' ..." % abspath, verbose)
is_compressed = is_compressed_json_file(abspath)
if not overwrite:
if os.path.exists(abspath): # pragma: no cover
prt_console(
" Stop! File exists and overwrite is not allowed",
verbose,
)
return
st = time.clock()
s = self.dumps(
obj,
indent=indent,
sort_keys=sort_keys,
pretty=pretty,
float_precision=float_precision,
ensure_ascii=ensure_ascii,
compress=False, # use uncompressed string, and directly write to file
**kwargs
)
with atomic_write(abspath, mode="wb", overwrite=True) as f:
if is_compressed:
f.write(compresslib.compress(s, return_type="bytes"))
else:
f.write(s.encode("utf-8"))
prt_console(
" Complete! Elapse %.6f sec." % (time.clock() - st),
verbose,
)
return s
def load(self, abspath,
object_hook=None,
ignore_comments=False,
verbose=True,
**kwargs):
"""load object from json file.
:param abspath: if ``*.json, *.js** then do regular dump. if ``*.gz``,
then perform decompression.
:type abspath: str
:param ignore_comments: default ``False. If True, then ignore comments.
:type ignore_comments: bool
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
"""
prt_console("\nLoad from '%s' ..." % abspath, verbose)
is_compressed = is_compressed_json_file(abspath)
if not os.path.exists(abspath):
raise EnvironmentError("'%s' doesn't exist." % abspath)
st = time.clock()
with open(abspath, "rb") as f:
if is_compressed:
s = compresslib.decompress(f.read(), return_type="str")
else:
s = f.read().decode("utf-8")
obj = self.loads(
s,
object_hook=object_hook,
decompress=False,
ignore_comments=ignore_comments,
)
prt_console(" Complete! Elapse %.6f sec." % (time.clock() - st),
verbose)
return obj
def dump_bytes(self, obj, class_name=bytes_class_name):
"""
``btyes`` dumper.
"""
return {"$" + class_name: b64encode(obj).decode()}
def load_bytes(self, dct, class_name=bytes_class_name):
"""
``btyes`` loader.
"""
return b64decode(dct["$" + class_name].encode())
def dump_datetime(self, obj, class_name="datetime.datetime"):
"""
``datetime.datetime`` dumper.
"""
return {"$" + class_name: obj.isoformat()}
def load_datetime(self, dct, class_name="datetime.datetime"):
"""
``datetime.datetime`` loader.
"""
return parse(dct["$" + class_name])
def dump_date(self, obj, class_name="datetime.date"):
"""
``datetime.date`` dumper.
"""
return {"$" + class_name: str(obj)}
def load_date(self, dct, class_name="datetime.date"):
"""
``datetime.date`` loader.
"""
return datetime.strptime(dct["$" + class_name], "%Y-%m-%d").date()
def dump_set(self, obj, class_name=set_class_name):
"""
``set`` dumper.
"""
return {"$" + class_name: [self._json_convert(item) for item in obj]}
def load_set(self, dct, class_name=set_class_name):
"""
``set`` loader.
"""
return set(dct["$" + class_name])
def dump_deque(self, obj, class_name="collections.deque"):
"""
``collections.deque`` dumper.
"""
return {"$" + class_name: [self._json_convert(item) for item in obj]}
def load_deque(self, dct, class_name="collections.deque"):
"""
``collections.deque`` loader.
"""
return deque(dct["$" + class_name])
def dump_OrderedDict(self, obj, class_name="collections.OrderedDict"):
"""
``collections.OrderedDict`` dumper.
"""
return {
"$" + class_name: [
(key, self._json_convert(value)) for key, value in iteritems(obj)
]
}
def load_OrderedDict(self, dct, class_name="collections.OrderedDict"):
"""
``collections.OrderedDict`` loader.
"""
return OrderedDict(dct["$" + class_name])
def dump_nparray(self, obj, class_name="numpy.ndarray"):
"""
``numpy.ndarray`` dumper.
"""
return {"$" + class_name: self._json_convert(obj.tolist())}
def load_nparray(self, dct, class_name="numpy.ndarray"):
"""
``numpy.ndarray`` loader.
"""
return np.array(dct["$" + class_name])
|
class SuperJson(object):
'''A extensable json encoder/decoder. You can easily custom converter for
any types.
'''
def _dump(self, obj):
'''Dump single object to json serializable value.
'''
pass
def _json_convert(self, obj):
'''Recursive helper method that converts dict types to standard library
json serializable types, so they can be converted into json.
'''
pass
def _object_hook1(self, dct):
'''A function can convert dict data into object.
it's an O(1) implementation.
'''
pass
def _object_hook2(self, dct):
'''Another object hook implementation.
it's an O(N) implementation.
'''
pass
def dumps(self, obj,
indent=None,
sort_keys=None,
pretty=False,
float_precision=None,
ensure_ascii=True,
compress=False,
**kwargs):
'''Dump any object into json string.
:param pretty: if True, dump json into pretty indent and sorted key
format.
:type pretty: bool
:param float_precision: default ``None``, limit floats to
N-decimal points.
:type float_precision: integer
:param compress: default ``False. If True, then compress encoded string.
:type compress: bool
'''
pass
def loads(self, s,
object_hook=None,
decompress=False,
ignore_comments=False,
**kwargs):
'''load object from json encoded string.
:param decompress: default ``False. If True, then decompress string.
:type decompress: bool
:param ignore_comments: default ``False. If True, then ignore comments.
:type ignore_comments: bool
'''
pass
def dumps(self, obj,
indent=None,
sort_keys=None,
pretty=False,
float_precision=None,
ensure_ascii=True,
compress=False,
**kwargs):
'''Dump any object into file.
:param abspath: if ``*.json, *.js** then do regular dump. if ``*.gz``,
then perform compression.
:type abspath: str
:param pretty: if True, dump json into pretty indent and sorted key
format.
:type pretty: bool
:param float_precision: default ``None``, limit floats to
N-decimal points.
:type float_precision: integer
:param overwrite: default ``False``, If ``True``, when you dump to
existing file, it silently overwrite it. If ``False``, an alert
message is shown. Default setting ``False`` is to prevent overwrite
file by mistake.
:type overwrite: boolean
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
'''
pass
def loads(self, s,
object_hook=None,
decompress=False,
ignore_comments=False,
**kwargs):
'''load object from json file.
:param abspath: if ``*.json, *.js** then do regular dump. if ``*.gz``,
then perform decompression.
:type abspath: str
:param ignore_comments: default ``False. If True, then ignore comments.
:type ignore_comments: bool
:param verbose: default True, help-message-display trigger.
:type verbose: boolean
'''
pass
def dump_bytes(self, obj, class_name=bytes_class_name):
'''
``btyes`` dumper.
'''
pass
def load_bytes(self, dct, class_name=bytes_class_name):
'''
``btyes`` loader.
'''
pass
def dump_datetime(self, obj, class_name="datetime.datetime"):
'''
``datetime.datetime`` dumper.
'''
pass
def load_datetime(self, dct, class_name="datetime.datetime"):
'''
``datetime.datetime`` loader.
'''
pass
def dump_datetime(self, obj, class_name="datetime.datetime"):
'''
``datetime.date`` dumper.
'''
pass
def load_datetime(self, dct, class_name="datetime.datetime"):
'''
``datetime.date`` loader.
'''
pass
def dump_set(self, obj, class_name=set_class_name):
'''
``set`` dumper.
'''
pass
def load_set(self, dct, class_name=set_class_name):
'''
``set`` loader.
'''
pass
def dump_deque(self, obj, class_name="collections.deque"):
'''
``collections.deque`` dumper.
'''
pass
def load_deque(self, dct, class_name="collections.deque"):
'''
``collections.deque`` loader.
'''
pass
def dump_OrderedDict(self, obj, class_name="collections.OrderedDict"):
'''
``collections.OrderedDict`` dumper.
'''
pass
def load_OrderedDict(self, dct, class_name="collections.OrderedDict"):
'''
``collections.OrderedDict`` loader.
'''
pass
def dump_nparray(self, obj, class_name="numpy.ndarray"):
'''
``numpy.ndarray`` dumper.
'''
pass
def load_nparray(self, dct, class_name="numpy.ndarray"):
'''
``numpy.ndarray`` loader.
'''
pass
| 23 | 23 | 15 | 2 | 8 | 5 | 2 | 0.6 | 1 | 9 | 0 | 0 | 22 | 0 | 22 | 22 | 347 | 63 | 179 | 64 | 132 | 108 | 112 | 38 | 89 | 7 | 1 | 3 | 46 |
147,932 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/tests/test_entity_relationship.py
|
test_entity_relationship.Department
|
class Department(Constant):
id = None
name = None
head = None
employees = list()
|
class Department(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 | 0 | 0 | 0 | 28 | 5 | 0 | 5 | 4 | 4 | 0 | 5 | 4 | 4 | 0 | 3 | 0 | 0 |
147,933 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/tests/test_assign_class.py
|
test_assign_class.User
|
class User(Constant):
id = None
name = None
|
class User(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 3 | 0 | 0 |
147,934 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/tests/test_assign_class.py
|
test_assign_class.AddressBook
|
class AddressBook(Constant):
alice = User
bob = User
|
class AddressBook(Constant):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 3 | 0 | 0 |
147,935 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/zoneinfo/__init__.py
|
constant2.pkg.superjson.pkg.dateutil.zoneinfo.ZoneInfoFile
|
class ZoneInfoFile(object):
def __init__(self, zonefile_stream=None):
if zonefile_stream is not None:
with tar_open(fileobj=zonefile_stream, mode='r') as tf:
# dict comprehension does not work on python2.6
# TODO: get back to the nicer syntax when we ditch python2.6
# self.zones = {zf.name: tzfile(tf.extractfile(zf),
# filename = zf.name)
# for zf in tf.getmembers() if zf.isfile()}
self.zones = dict((zf.name, tzfile(tf.extractfile(zf),
filename=zf.name))
for zf in tf.getmembers()
if zf.isfile() and zf.name != METADATA_FN)
# deal with links: They'll point to their parent object. Less
# waste of memory
# links = {zl.name: self.zones[zl.linkname]
# for zl in tf.getmembers() if zl.islnk() or zl.issym()}
links = dict((zl.name, self.zones[zl.linkname])
for zl in tf.getmembers() if
zl.islnk() or zl.issym())
self.zones.update(links)
try:
metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
metadata_str = metadata_json.read().decode('UTF-8')
self.metadata = json.loads(metadata_str)
except KeyError:
# no metadata in tar file
self.metadata = None
else:
self.zones = dict()
self.metadata = None
def get(self, name, default=None):
"""
Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
for retrieving zones from the zone dictionary.
:param name:
The name of the zone to retrieve. (Generally IANA zone names)
:param default:
The value to return in the event of a missing key.
.. versionadded:: 2.6.0
"""
return self.zones.get(name, default)
|
class ZoneInfoFile(object):
def __init__(self, zonefile_stream=None):
pass
def get(self, name, default=None):
'''
Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
for retrieving zones from the zone dictionary.
:param name:
The name of the zone to retrieve. (Generally IANA zone names)
:param default:
The value to return in the event of a missing key.
.. versionadded:: 2.6.0
'''
pass
| 3 | 1 | 23 | 2 | 11 | 10 | 2 | 0.83 | 1 | 3 | 1 | 0 | 2 | 2 | 2 | 2 | 47 | 5 | 23 | 9 | 20 | 19 | 17 | 8 | 14 | 3 | 1 | 3 | 4 |
147,936 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/win.py
|
constant2.pkg.superjson.pkg.dateutil.tz.win.tzwinlocal
|
class tzwinlocal(tzwinbase):
def __init__(self):
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
keydict = valuestodict(tzlocalkey)
self._std_abbr = keydict["StandardName"]
self._dst_abbr = keydict["DaylightName"]
try:
tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
sn=self._std_abbr)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
except OSError:
self._display = None
stdoffset = -keydict["Bias"] - keydict["StandardBias"]
dstoffset = stdoffset - keydict["DaylightBias"]
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# For reasons unclear, in this particular key, the day of week has been
# moved to the END of the SYSTEMTIME structure.
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:5]
self._stddayofweek = tup[7]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:5]
self._dstdayofweek = tup[7]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwinlocal()"
def __str__(self):
# str will return the standard name, not the daylight name.
return "tzwinlocal(%s)" % repr(self._std_abbr)
def __reduce__(self):
return (self.__class__, ())
|
class tzwinlocal(tzwinbase):
def __init__(self):
pass
def __repr__(self):
pass
def __str__(self):
pass
def __reduce__(self):
pass
| 5 | 0 | 13 | 3 | 10 | 1 | 1 | 0.13 | 1 | 2 | 0 | 0 | 4 | 17 | 4 | 32 | 56 | 14 | 39 | 31 | 34 | 5 | 32 | 22 | 27 | 2 | 4 | 3 | 5 |
147,937 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/win.py
|
constant2.pkg.superjson.pkg.dateutil.tz.win.tzwin
|
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
# multiple contexts only possible in 2.7 and 3.1, we still support 2.6
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
tzkeyname = text_type("{kn}\\{name}").format(
kn=TZKEYNAME, name=name)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
keydict = valuestodict(tzkey)
self._std_abbr = keydict["Std"]
self._dst_abbr = keydict["Dlt"]
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
stdoffset = -tup[0] - tup[1] # Bias + StandardBias * -1
dstoffset = stdoffset - tup[2] # + DaylightBias * -1
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
|
class tzwin(tzwinbase):
def __init__(self, name):
pass
def __repr__(self):
pass
def __reduce__(self):
pass
| 4 | 0 | 14 | 2 | 10 | 3 | 1 | 0.31 | 1 | 1 | 0 | 0 | 3 | 18 | 3 | 31 | 46 | 10 | 32 | 29 | 28 | 10 | 23 | 19 | 19 | 1 | 4 | 2 | 3 |
147,938 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/win.py
|
constant2.pkg.superjson.pkg.dateutil.tz.win.tzres
|
class tzres(object):
"""
Class for accessing `tzres.dll`, which contains timezone name related
resources.
.. versionadded:: 2.5.0
"""
p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
def __init__(self, tzres_loc='tzres.dll'):
# Load the user32 DLL so we can load strings from tzres
user32 = ctypes.WinDLL('user32')
# Specify the LoadStringW function
user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
wintypes.UINT,
wintypes.LPWSTR,
ctypes.c_int)
self.LoadStringW = user32.LoadStringW
self._tzres = ctypes.WinDLL(tzres_loc)
self.tzres_loc = tzres_loc
def load_name(self, offset):
"""
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
..note:
Offsets found in the registry are generally of the form
`@tzres.dll,-114`. The offset in this case if 114, not -114.
"""
resource = self.p_wchar()
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
return resource[:nchar]
def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith('@'):
return tzname_str
name_splt = tzname_str.split(',-')
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset)
|
class tzres(object):
'''
Class for accessing `tzres.dll`, which contains timezone name related
resources.
.. versionadded:: 2.5.0
'''
def __init__(self, tzres_loc='tzres.dll'):
pass
def load_name(self, offset):
'''
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
..note:
Offsets found in the registry are generally of the form
`@tzres.dll,-114`. The offset in this case if 114, not -114.
'''
pass
def name_from_string(self, tzname_str):
'''
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
'''
pass
| 4 | 3 | 21 | 4 | 8 | 10 | 2 | 1.4 | 1 | 3 | 0 | 0 | 3 | 3 | 3 | 3 | 74 | 15 | 25 | 14 | 21 | 35 | 22 | 14 | 18 | 3 | 1 | 1 | 5 |
147,939 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/pylru.py
|
constant2.pkg.pylru.WriteBackCacheManager
|
class WriteBackCacheManager(object):
def __init__(self, store, size):
self.store = store
# Create a set to hold the dirty keys.
self.dirty = set()
# Define a callback function to be called by the cache when a
# key/value pair is about to be ejected. This callback will check to
# see if the key is in the dirty set. If so, then it will update the
# store object and remove the key from the dirty set.
def callback(key, value):
if key in self.dirty:
self.store[key] = value
self.dirty.remove(key)
# Create a cache and give it the callback function.
self.cache = lrucache(size, callback)
# Returns/sets the size of the managed cache.
def size(self, size=None):
return self.cache.size(size)
def clear(self):
self.cache.clear()
self.dirty.clear()
self.store.clear()
def __contains__(self, key):
# Check the cache first, since if it is there we can return quickly.
if key in self.cache:
return True
# Not in the cache. Might be in the underlying store.
if key in self.store:
return True
return False
def __getitem__(self, key):
# First we try the cache. If successful we just return the value. If
# not we catch KeyError and ignore it since that just means the key
# was not in the cache.
try:
return self.cache[key]
except KeyError:
pass
# It wasn't in the cache. Look it up in the store, add the entry to
# the cache, and return the value.
value = self.store[key]
self.cache[key] = value
return value
def get(self, key, default=None):
"""Get an item - return default (None) if not present"""
try:
return self[key]
except KeyError:
return default
def __setitem__(self, key, value):
# Add the key/value pair to the cache.
self.cache[key] = value
self.dirty.add(key)
def __delitem__(self, key):
found = False
try:
del self.cache[key]
found = True
self.dirty.remove(key)
except KeyError:
pass
try:
del self.store[key]
found = True
except KeyError:
pass
if not found: # If not found in cache or store, raise error.
raise KeyError
def __iter__(self):
return self.keys()
def keys(self):
for key in self.store.keys():
if key not in self.dirty:
yield key
for key in self.dirty:
yield key
def values(self):
for key, value in self.items():
yield value
def items(self):
for key, value in self.store.items():
if key not in self.dirty:
yield (key, value)
for key in self.dirty:
value = self.cache.peek(key)
yield (key, value)
def sync(self):
# For each dirty key, peek at its value in the cache and update the
# store. Doesn't change the cache's order.
for key in self.dirty:
self.store[key] = self.cache.peek(key)
# There are no dirty keys now.
self.dirty.clear()
def flush(self):
self.sync()
self.cache.clear()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.sync()
return False
|
class WriteBackCacheManager(object):
def __init__(self, store, size):
pass
def callback(key, value):
pass
def size(self, size=None):
pass
def clear(self):
pass
def __contains__(self, key):
pass
def __getitem__(self, key):
pass
def get(self, key, default=None):
'''Get an item - return default (None) if not present'''
pass
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
def __iter__(self):
pass
def keys(self):
pass
def values(self):
pass
def items(self):
pass
def sync(self):
pass
def flush(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
| 18 | 1 | 7 | 1 | 5 | 1 | 2 | 0.24 | 1 | 3 | 1 | 0 | 16 | 3 | 16 | 16 | 127 | 26 | 82 | 27 | 64 | 20 | 82 | 27 | 64 | 4 | 1 | 2 | 33 |
147,940 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/_constant2.py
|
constant2._constant2.Meta
|
class Meta(type):
"""Meta class for :class:`Constant`.
"""
def __new__(cls, name, bases, attrs):
klass = super(Meta, cls).__new__(cls, name, bases, attrs)
for attr in attrs:
# Make sure reserved attributes are not been overridden
if attr in _reserved_attrs:
if (is_class_method(klass, attr) or is_regular_method(klass, attr)):
# raise exception if it is been overridden
if not (getattr(klass, attr) == getattr(_Constant, attr)):
msg = "%s is a reserved attribute / method name" % attr
raise AttributeError(msg)
else:
# raise exception if it is just a value
raise AttributeError(
"%r is not a valid attribute name" % attr
)
return klass
|
class Meta(type):
'''Meta class for :class:`Constant`.
'''
def __new__(cls, name, bases, attrs):
pass
| 2 | 1 | 17 | 1 | 13 | 3 | 5 | 0.36 | 1 | 3 | 1 | 0 | 1 | 0 | 1 | 14 | 21 | 2 | 14 | 5 | 12 | 5 | 11 | 5 | 9 | 5 | 2 | 4 | 5 |
147,941 |
MacHu-GWU/constant2-project
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/MacHu-GWU_constant2-project/constant2/_constant2.py
|
constant2._constant2.Food.Meat.Beef
|
class Beef:
id = 2
name = "beef"
|
class Beef:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 2 | 2 | 0 | 3 | 2 | 2 | 0 | 0 | 0 | 0 |
147,942 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz.tzutc
|
class tzutc(datetime.tzinfo):
"""
This is a tzinfo object that represents the UTC time zone.
"""
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
@_validate_fromutc_inputs
def fromutc(self, dt):
"""
Fast track version of fromutc() returns the original ``dt`` object for
any valid :py:class:`datetime.datetime` object.
"""
return dt
def __eq__(self, other):
if not isinstance(other, (tzutc, tzoffset)):
return NotImplemented
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
|
class tzutc(datetime.tzinfo):
'''
This is a tzinfo object that represents the UTC time zone.
'''
def utcoffset(self, dt):
pass
def dst(self, dt):
pass
@tzname_in_python2
def tzname(self, dt):
pass
def is_ambiguous(self, dt):
'''
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
'''
pass
@_validate_fromutc_inputs
def fromutc(self, dt):
'''
Fast track version of fromutc() returns the original ``dt`` object for
any valid :py:class:`datetime.datetime` object.
'''
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __repr__(self):
pass
| 11 | 3 | 5 | 1 | 2 | 2 | 1 | 0.67 | 1 | 1 | 1 | 0 | 8 | 0 | 8 | 13 | 55 | 15 | 24 | 13 | 13 | 16 | 21 | 11 | 12 | 2 | 1 | 1 | 9 |
147,943 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/tests/test_basic.py
|
test_basic.TestFood
|
class TestFood(object):
def test_Items(self):
assert Food.Items() == []
assert Food.Fruit.Items() == [("id", 1), ("name", "fruit")]
assert Food.Fruit.Apple.Items() == [("id", 1), ("name", "apple")]
assert Food.Fruit.Apple.RedApple.Items() == [
("id", 1), ("name", "red apple"),
]
assert Food.Keys() == []
assert Food.Fruit.Keys() == ["id", "name"]
assert Food.Fruit.Apple.Keys() == ["id", "name"]
assert Food.Fruit.Apple.RedApple.Keys() == ["id", "name"]
assert Food.Values() == []
assert Food.Fruit.Values() == [1, "fruit"]
assert Food.Fruit.Apple.Values() == [1, "apple"]
assert Food.Fruit.Apple.RedApple.Values() == [1, "red apple"]
def test_items(self):
assert food.items() == []
assert food.Fruit.items() == [("id", 1), ("name", "fruit")]
assert food.Fruit.Apple.items() == [("id", 1), ("name", "apple")]
assert food.Fruit.Apple.RedApple.items() == [
("id", 1), ("name", "red apple"),
]
assert food.keys() == []
assert food.Fruit.keys() == ["id", "name"]
assert food.Fruit.Apple.keys() == ["id", "name"]
assert food.Fruit.Apple.RedApple.keys() == ["id", "name"]
assert food.values() == []
assert food.Fruit.values() == [1, "fruit"]
assert food.Fruit.Apple.values() == [1, "apple"]
assert food.Fruit.Apple.RedApple.values() == [1, "red apple"]
def test_Subclass(self):
assert Food.Subclasses() == [
("Fruit", Food.Fruit),
("Meat", Food.Meat),
]
assert Food.Fruit.Subclasses() == [
("Apple", Food.Fruit.Apple),
("Banana", Food.Fruit.Banana),
]
assert Food.Fruit.Apple.Subclasses(sort_by="__name__") == [
("GreenApple", Food.Fruit.Apple.GreenApple),
("RedApple", Food.Fruit.Apple.RedApple),
]
assert Food.Fruit.Apple.Subclasses(sort_by="id") == [
("RedApple", Food.Fruit.Apple.RedApple),
("GreenApple", Food.Fruit.Apple.GreenApple),
]
assert Food.Fruit.Apple.RedApple.Subclasses() == []
def test_subclass(self):
assert food.subclasses() == [
("Fruit", food.Fruit), ("Meat", food.Meat)]
assert food.Fruit.subclasses() == [
("Apple", food.Fruit.Apple), ("Banana", food.Fruit.Banana),
]
assert food.Fruit.Apple.subclasses(sort_by="__name__") == [
("GreenApple", food.Fruit.Apple.GreenApple),
("RedApple", food.Fruit.Apple.RedApple),
]
assert food.Fruit.Apple.subclasses(sort_by="id") == [
("RedApple", food.Fruit.Apple.RedApple),
("GreenApple", food.Fruit.Apple.GreenApple),
]
def test_GetFirst(self):
assert Food.GetFirst("id", 1) == Food.Fruit
assert Food.GetFirst("name", "meat") == Food.Meat
assert Food.GetFirst("value", "Hello World") is None
def test_get_first(self):
assert food.get_first("id", 1) == food.Fruit
assert food.get_first("name", "meat") == food.Meat
assert food.get_first("value", "Hello World") is None
def test_GetFirst_performance(self):
st = time.clock()
for i in range(1000):
Food.GetFirst("id", 2)
elapsed = time.clock() - st
# print("with lfu_cache elapsed %.6f second." % elapsed)
def test_get_first_performance(self):
st = time.clock()
for i in range(1000):
food.get_first("id", 2)
elapsed = time.clock() - st
# print("without lfu_cache elapsed %.6f second." % elapsed)
def test_ToIds(self):
assert Food.ToIds([Food.Fruit, Food.Meat]) == [1, 2]
def test_to_ids(self):
assert food.to_ids([food.Fruit, food.Meat]) == [1, 2]
def test_ToClasses(self):
assert Food.ToClasses([1, 2]) == [Food.Fruit, Food.Meat]
def test_to_instances(self):
assert food.to_instances([1, 2]) == [food.Fruit, food.Meat]
def test_SubIds(self):
assert Food.SubIds() == [1, 2]
def test_sub_ids(self):
assert food.sub_ids() == [1, 2]
def test_ToDict(self):
assert Food.ToDict() == {}
assert Food.Fruit.ToDict() == {"id": 1, "name": "fruit"}
def test_to_dict(self):
assert food.to_dict() == {}
assert food.Fruit.to_dict() == {"id": 1, "name": "fruit"}
def test_dump_load(self):
data = Food.dump()
Food1 = Constant.load(data)
data1 = Food1.dump()
is_same_dict(data, data1)
|
class TestFood(object):
def test_Items(self):
pass
def test_items(self):
pass
def test_Subclass(self):
pass
def test_subclass(self):
pass
def test_GetFirst(self):
pass
def test_get_first(self):
pass
def test_GetFirst_performance(self):
pass
def test_get_first_performance(self):
pass
def test_ToIds(self):
pass
def test_to_ids(self):
pass
def test_ToClasses(self):
pass
def test_to_instances(self):
pass
def test_SubIds(self):
pass
def test_sub_ids(self):
pass
def test_ToDict(self):
pass
def test_to_dict(self):
pass
def test_dump_load(self):
pass
| 18 | 0 | 7 | 1 | 6 | 0 | 1 | 0.02 | 1 | 9 | 8 | 0 | 17 | 0 | 17 | 17 | 133 | 27 | 104 | 27 | 86 | 2 | 79 | 27 | 61 | 2 | 1 | 1 | 19 |
147,944 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/atomicwrites.py
|
constant2.pkg.superjson.pkg.atomicwrites.AtomicWriter
|
class AtomicWriter(object):
'''
A helper class for performing atomic writes. Usage::
with AtomicWriter(path).open() as f:
f.write(...)
:param path: The destination filepath. May or may not exist.
:param mode: The filemode for the temporary file. This defaults to `wb` in
Python 2 and `w` in Python 3.
:param overwrite: If set to false, an error is raised if ``path`` exists.
Errors are only raised after the file has been written to. Either way,
the operation is atomic.
If you need further control over the exact behavior, you are encouraged to
subclass.
'''
def __init__(self, path, mode=DEFAULT_MODE, overwrite=False,
**open_kwargs):
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('AtomicWriters can only be written to.')
self._path = path
self._mode = mode
self._overwrite = overwrite
self._open_kwargs = open_kwargs
def open(self):
'''
Open the temporary file.
'''
return self._open(self.get_fileobject)
@contextlib.contextmanager
def _open(self, get_fileobject):
f = None # make sure f exists even if get_fileobject() fails
try:
success = False
with get_fileobject(**self._open_kwargs) as f:
yield f
self.sync(f)
self.commit(f)
success = True
finally:
if not success:
try:
self.rollback(f)
except Exception:
pass
def get_fileobject(self, dir=None, **kwargs):
'''Return the temporary file to use.'''
if dir is None:
dir = os.path.normpath(os.path.dirname(self._path))
descriptor, name = tempfile.mkstemp(dir=dir)
# io.open() will take either the descriptor or the name, but we need
# the name later for commit()/replace_atomic() and couldn't find a way
# to get the filename from the descriptor.
os.close(descriptor)
kwargs['mode'] = self._mode
kwargs['file'] = name
return io.open(**kwargs)
def sync(self, f):
'''responsible for clearing as many file caches as possible before
commit'''
f.flush()
_proper_fsync(f.fileno())
def commit(self, f):
'''Move the temporary file to the target location.'''
if self._overwrite:
replace_atomic(f.name, self._path)
else:
move_atomic(f.name, self._path)
def rollback(self, f):
'''Clean up all temporary resources.'''
os.unlink(f.name)
|
class AtomicWriter(object):
'''
A helper class for performing atomic writes. Usage::
with AtomicWriter(path).open() as f:
f.write(...)
:param path: The destination filepath. May or may not exist.
:param mode: The filemode for the temporary file. This defaults to `wb` in
Python 2 and `w` in Python 3.
:param overwrite: If set to false, an error is raised if ``path`` exists.
Errors are only raised after the file has been written to. Either way,
the operation is atomic.
If you need further control over the exact behavior, you are encouraged to
subclass.
'''
def __init__(self, path, mode=DEFAULT_MODE, overwrite=False,
**open_kwargs):
pass
def open(self):
'''
Open the temporary file.
'''
pass
@contextlib.contextmanager
def _open(self, get_fileobject):
pass
def get_fileobject(self, dir=None, **kwargs):
'''Return the temporary file to use.'''
pass
def sync(self, f):
'''responsible for clearing as many file caches as possible before
commit'''
pass
def commit(self, f):
'''Move the temporary file to the target location.'''
pass
def rollback(self, f):
'''Clean up all temporary resources.'''
pass
| 9 | 6 | 9 | 0 | 7 | 2 | 2 | 0.46 | 1 | 2 | 0 | 0 | 7 | 4 | 7 | 7 | 89 | 11 | 54 | 17 | 44 | 25 | 45 | 15 | 37 | 4 | 1 | 3 | 14 |
147,945 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/pytest.py
|
constant2.pkg.pytest.ApproxNonIterable
|
class ApproxNonIterable(object):
"""
Perform approximate comparisons for single numbers only.
In other words, the ``expected`` attribute for objects of this class must
be some sort of number. This is in contrast to the ``approx`` class, where
the ``expected`` attribute can either be a number of a sequence of numbers.
This class is responsible for making comparisons, while ``approx`` is
responsible for abstracting the difference between numbers and sequences of
numbers. Although this class can stand on its own, it's only meant to be
used within ``approx``.
"""
def __init__(self, expected, rel=None, abs=None):
self.expected = expected
self.abs = abs
self.rel = rel
def __repr__(self):
if isinstance(self.expected, complex):
return str(self.expected)
# Infinities aren't compared using tolerances, so don't show a
# tolerance.
if math.isinf(self.expected):
return str(self.expected)
# If a sensible tolerance can't be calculated, self.tolerance will
# raise a ValueError. In this case, display '???'.
try:
vetted_tolerance = '{:.1e}'.format(self.tolerance)
except ValueError:
vetted_tolerance = '???'
if sys.version_info[0] == 2:
return '{0} +- {1}'.format(self.expected, vetted_tolerance)
else:
return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance)
def __eq__(self, actual):
# Short-circuit exact equality.
if actual == self.expected:
return True
# Infinity shouldn't be approximately equal to anything but itself, but
# if there's a relative tolerance, it will be infinite and infinity
# will seem approximately equal to everything. The equal-to-itself
# case would have been short circuited above, so here we can just
# return false if the expected value is infinite. The abs() call is
# for compatibility with complex numbers.
if math.isinf(abs(self.expected)):
return False
# Return true if the two numbers are within the tolerance.
return abs(self.expected - actual) <= self.tolerance
__hash__ = None
def __ne__(self, actual):
return not (actual == self)
@property
def tolerance(self):
def set_default(x, default): return x if x is not None else default
# Figure out what the absolute tolerance should be. ``self.abs`` is
# either None or a value specified by the user.
absolute_tolerance = set_default(self.abs, 1e-12)
if absolute_tolerance < 0:
raise ValueError(
"absolute tolerance can't be negative: {}".format(absolute_tolerance))
if math.isnan(absolute_tolerance):
raise ValueError("absolute tolerance can't be NaN.")
# If the user specified an absolute tolerance but not a relative one,
# just return the absolute tolerance.
if self.rel is None:
if self.abs is not None:
return absolute_tolerance
# Figure out what the relative tolerance should be. ``self.rel`` is
# either None or a value specified by the user. This is done after
# we've made sure the user didn't ask for an absolute tolerance only,
# because we don't want to raise errors about the relative tolerance if
# we aren't even going to use it.
relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected)
if relative_tolerance < 0:
raise ValueError(
"relative tolerance can't be negative: {}".format(absolute_tolerance))
if math.isnan(relative_tolerance):
raise ValueError("relative tolerance can't be NaN.")
# Return the larger of the relative and absolute tolerances.
return max(relative_tolerance, absolute_tolerance)
|
class ApproxNonIterable(object):
'''
Perform approximate comparisons for single numbers only.
In other words, the ``expected`` attribute for objects of this class must
be some sort of number. This is in contrast to the ``approx`` class, where
the ``expected`` attribute can either be a number of a sequence of numbers.
This class is responsible for making comparisons, while ``approx`` is
responsible for abstracting the difference between numbers and sequences of
numbers. Although this class can stand on its own, it's only meant to be
used within ``approx``.
'''
def __init__(self, expected, rel=None, abs=None):
pass
def __repr__(self):
pass
def __eq__(self, actual):
pass
def __ne__(self, actual):
pass
@property
def tolerance(self):
pass
def set_default(x, default):
pass
| 8 | 1 | 13 | 2 | 7 | 4 | 3 | 0.7 | 1 | 3 | 0 | 0 | 5 | 3 | 5 | 5 | 96 | 18 | 46 | 15 | 39 | 32 | 43 | 14 | 36 | 7 | 1 | 2 | 19 |
147,946 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/parser.py
|
constant2.pkg.superjson.pkg.dateutil.parser._resultbase
|
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __len__(self):
return (sum(getattr(self, attr) is not None
for attr in self.__slots__))
def __repr__(self):
return self._repr(self.__class__.__name__)
|
class _resultbase(object):
def __init__(self):
pass
def _repr(self, classname):
pass
def __len__(self):
pass
def __repr__(self):
pass
| 5 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 0 | 0 | 3 | 4 | 0 | 4 | 4 | 20 | 4 | 16 | 9 | 11 | 0 | 15 | 9 | 10 | 3 | 1 | 2 | 7 |
147,947 |
MacHu-GWU/constant2-project
|
MacHu-GWU_constant2-project/constant2/pkg/superjson/pkg/dateutil/tz/tz.py
|
constant2.pkg.superjson.pkg.dateutil.tz.tz.tzoffset
|
class tzoffset(datetime.tzinfo):
"""
A simple class for representing a fixed offset from UTC.
:param name:
The timezone name, to be returned when ``tzname()`` is called.
:param offset:
The time zone offset in seconds, or (since version 2.6.0, represented
as a :py:class:`datetime.timedelta` object.
"""
def __init__(self, name, offset):
self._name = name
try:
# Allow a timedelta
offset = _total_seconds(offset)
except (TypeError, AttributeError):
pass
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
@_validate_fromutc_inputs
def fromutc(self, dt):
return dt + self._offset
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
def __eq__(self, other):
if not isinstance(other, tzoffset):
return NotImplemented
return self._offset == other._offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
int(_total_seconds(self._offset)))
__reduce__ = object.__reduce__
|
class tzoffset(datetime.tzinfo):
'''
A simple class for representing a fixed offset from UTC.
:param name:
The timezone name, to be returned when ``tzname()`` is called.
:param offset:
The time zone offset in seconds, or (since version 2.6.0, represented
as a :py:class:`datetime.timedelta` object.
'''
def __init__(self, name, offset):
pass
def utcoffset(self, dt):
pass
def dst(self, dt):
pass
@tzname_in_python2
def tzname(self, dt):
pass
@_validate_fromutc_inputs
def fromutc(self, dt):
pass
def is_ambiguous(self, dt):
'''
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
'''
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __repr__(self):
pass
| 12 | 2 | 5 | 1 | 3 | 1 | 1 | 0.56 | 1 | 4 | 0 | 0 | 9 | 2 | 9 | 14 | 69 | 19 | 32 | 16 | 20 | 18 | 28 | 14 | 18 | 2 | 1 | 1 | 11 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.