hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a212cd846ad07e2bd4850b890c2752ed5981518
| 116,442 |
py
|
Python
|
python/ccxt/async_support/okex3.py
|
quadency/ccxt
|
12d45145825e48cff0dde577a8048afe96ede233
|
[
"MIT"
] | 1 |
2020-09-29T05:46:16.000Z
|
2020-09-29T05:46:16.000Z
|
python/ccxt/async_support/okex3.py
|
quadency/ccxt
|
12d45145825e48cff0dde577a8048afe96ede233
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/okex3.py
|
quadency/ccxt
|
12d45145825e48cff0dde577a8048afe96ede233
|
[
"MIT"
] | 1 |
2020-06-09T04:29:45.000Z
|
2020-06-09T04:29:45.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
class okex3(Exchange):
def describe(self):
return self.deep_extend(super(okex3, self).describe(), {
'id': 'okex3',
'name': 'OKEX',
'countries': ['CN', 'US'],
'version': 'v3',
'rateLimit': 1000, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'has': {
'CORS': False,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchCurrencies': False, # see below
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchTime': True,
'fetchTransactions': False,
'fetchMyTrades': False, # they don't have it
'fetchDepositAddress': True,
'fetchOrderTrades': True,
'fetchTickers': True,
'fetchLedger': True,
'withdraw': True,
'futures': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/32552768-0d6dd3c6-c4a6-11e7-90f8-c043b64756a7.jpg',
'api': 'https://www.okex.com',
'www': 'https://www.okex.com',
'doc': 'https://www.okex.com/docs/en/',
'fees': 'https://www.okex.com/pages/products/fees.html',
},
'api': {
'general': {
'get': [
'time',
],
},
'account': {
'get': [
'currencies',
'wallet',
'wallet/{currency}',
'withdrawal/fee',
'withdrawal/history',
'withdrawal/history/{currency}',
'ledger',
'deposit/address',
'deposit/history',
'deposit/history/{currency}',
],
'post': [
'transfer',
'withdrawal',
],
},
'spot': {
'get': [
'accounts',
'accounts/{currency}',
'accounts/{currency}/ledger',
'orders',
'orders_pending',
'orders/{order_id}',
'orders/{client_oid}',
'fills',
'algo',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
],
'post': [
'order_algo',
'orders',
'batch_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_algos',
'cancel_batch_orders',
],
},
'margin': {
'get': [
'accounts',
'accounts/{instrument_id}',
'accounts/{instrument_id}/ledger',
'accounts/availability',
'accounts/{instrument_id}/availability',
'accounts/borrowed',
'accounts/{instrument_id}/borrowed',
'orders',
'orders/{order_id}',
'orders/{client_oid}',
'orders_pending',
'fills',
],
'post': [
'accounts/borrow',
'accounts/repayment',
'orders',
'batch_orders',
'cancel_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_orders',
],
},
'futures': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'accounts/{currency}',
'accounts/{currency}/leverage',
'accounts/{currency}/ledger',
'order_algo/{instrument_id}',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'accounts/{instrument_id}/holds',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/estimated_price',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/liquidation',
'instruments/{instrument_id}/mark_price',
],
'post': [
'accounts/{currency}/leverage',
'accounts/margin_mode',
'order',
'orders',
'order_algo',
'cancel_algos',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'close_position',
'cancel_all',
],
},
'swap': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'{instrument_id}/accounts',
'accounts/{instrument_id}/settings',
'accounts/{instrument_id}/ledger',
'accounts/{instrument_id}/holds',
'order_algo/{instrument_id}',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
# public
'instruments',
'instruments/{instrument_id}/depth',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/liquidation',
'instruments/{instrument_id}/funding_time',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/historical_funding_rate',
],
'post': [
'accounts/{instrument_id}/leverage',
'order',
'order_algo',
'orders',
'cancel_algos',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
],
},
# they have removed self part from public
'ett': {
'get': [
'accounts',
'accounts/{currency}',
'accounts/{currency}/ledger',
'orders', # fetchOrder, fetchOrders
# public
'constituents/{ett}',
'define-price/{ett}',
],
'post': [
'orders',
'orders/{order_id}',
],
},
},
'fees': {
'trading': {
'taker': 0.0015,
'maker': 0.0010,
},
'spot': {
'taker': 0.0015,
'maker': 0.0010,
},
'futures': {
'taker': 0.0005,
'maker': 0.0002,
},
'swap': {
'taker': 0.00075,
'maker': 0.00020,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
# http error codes
# 400 Bad Request — Invalid request format
# 401 Unauthorized — Invalid API Key
# 403 Forbidden — You do not have access to the requested resource
# 404 Not Found
# 500 Internal Server Error — We had a problem with our server
'exact': {
'1': ExchangeError, # {"code": 1, "message": "System error"}
# undocumented
'failure to get a peer from the ring-balancer': ExchangeNotAvailable, # {"message": "failure to get a peer from the ring-balancer"}
'4010': PermissionDenied, # {"code": 4010, "message": "For the security of your funds, withdrawals are not permitted within 24 hours after changing fund password / mobile number / Google Authenticator settings "}
# common
'30001': AuthenticationError, # {"code": 30001, "message": 'request header "OK_ACCESS_KEY" cannot be blank'}
'30002': AuthenticationError, # {"code": 30002, "message": 'request header "OK_ACCESS_SIGN" cannot be blank'}
'30003': AuthenticationError, # {"code": 30003, "message": 'request header "OK_ACCESS_TIMESTAMP" cannot be blank'}
'30004': AuthenticationError, # {"code": 30004, "message": 'request header "OK_ACCESS_PASSPHRASE" cannot be blank'}
'30005': InvalidNonce, # {"code": 30005, "message": "invalid OK_ACCESS_TIMESTAMP"}
'30006': AuthenticationError, # {"code": 30006, "message": "invalid OK_ACCESS_KEY"}
'30007': BadRequest, # {"code": 30007, "message": 'invalid Content_Type, please use "application/json" format'}
'30008': RequestTimeout, # {"code": 30008, "message": "timestamp request expired"}
'30009': ExchangeError, # {"code": 30009, "message": "system error"}
'30010': AuthenticationError, # {"code": 30010, "message": "API validation failed"}
'30011': PermissionDenied, # {"code": 30011, "message": "invalid IP"}
'30012': AuthenticationError, # {"code": 30012, "message": "invalid authorization"}
'30013': AuthenticationError, # {"code": 30013, "message": "invalid sign"}
'30014': DDoSProtection, # {"code": 30014, "message": "request too frequent"}
'30015': AuthenticationError, # {"code": 30015, "message": 'request header "OK_ACCESS_PASSPHRASE" incorrect'}
'30016': ExchangeError, # {"code": 30015, "message": "you are using v1 apiKey, please use v1 endpoint. If you would like to use v3 endpoint, please subscribe to v3 apiKey"}
'30017': ExchangeError, # {"code": 30017, "message": "apikey's broker id does not match"}
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable, # {"code": 30019, "message": "Api is offline or unavailable"}
'30020': BadRequest, # {"code": 30020, "message": "body cannot be blank"}
'30021': BadRequest, # {"code": 30021, "message": "Json data format error"}, {"code": 30021, "message": "json data format error"}
'30022': PermissionDenied, # {"code": 30022, "message": "Api has been frozen"}
'30023': BadRequest, # {"code": 30023, "message": "{0} parameter cannot be blank"}
'30024': BadSymbol, # {"code":30024,"message":"\"instrument_id\" is an invalid parameter"}
'30025': BadRequest, # {"code": 30025, "message": "{0} parameter category error"}
'30026': DDoSProtection, # {"code": 30026, "message": "requested too frequent"}
'30027': AuthenticationError, # {"code": 30027, "message": "login failure"}
'30028': PermissionDenied, # {"code": 30028, "message": "unauthorized execution"}
'30029': AccountSuspended, # {"code": 30029, "message": "account suspended"}
'30030': ExchangeError, # {"code": 30030, "message": "endpoint request failed. Please try again"}
'30031': BadRequest, # {"code": 30031, "message": "token does not exist"}
'30032': BadSymbol, # {"code": 30032, "message": "pair does not exist"}
'30033': BadRequest, # {"code": 30033, "message": "exchange domain does not exist"}
'30034': ExchangeError, # {"code": 30034, "message": "exchange ID does not exist"}
'30035': ExchangeError, # {"code": 30035, "message": "trading is not supported in self website"}
'30036': ExchangeError, # {"code": 30036, "message": "no relevant data"}
'30038': AuthenticationError, # {"code": 30038, "message": "user does not exist"}
'30037': ExchangeNotAvailable, # {"code": 30037, "message": "endpoint is offline or unavailable"}
# futures
'32001': AccountSuspended, # {"code": 32001, "message": "futures account suspended"}
'32002': PermissionDenied, # {"code": 32002, "message": "futures account does not exist"}
'32003': CancelPending, # {"code": 32003, "message": "canceling, please wait"}
'32004': ExchangeError, # {"code": 32004, "message": "you have no unfilled orders"}
'32005': InvalidOrder, # {"code": 32005, "message": "max order quantity"}
'32006': InvalidOrder, # {"code": 32006, "message": "the order price or trigger price exceeds USD 1 million"}
'32007': InvalidOrder, # {"code": 32007, "message": "leverage level must be the same for orders on the same side of the contract"}
'32008': InvalidOrder, # {"code": 32008, "message": "Max. positions to open(cross margin)"}
'32009': InvalidOrder, # {"code": 32009, "message": "Max. positions to open(fixed margin)"}
'32010': ExchangeError, # {"code": 32010, "message": "leverage cannot be changed with open positions"}
'32011': ExchangeError, # {"code": 32011, "message": "futures status error"}
'32012': ExchangeError, # {"code": 32012, "message": "futures order update error"}
'32013': ExchangeError, # {"code": 32013, "message": "token type is blank"}
'32014': ExchangeError, # {"code": 32014, "message": "your number of contracts closing is larger than the number of contracts available"}
'32015': ExchangeError, # {"code": 32015, "message": "margin ratio is lower than 100% before opening positions"}
'32016': ExchangeError, # {"code": 32016, "message": "margin ratio is lower than 100% after opening position"}
'32017': ExchangeError, # {"code": 32017, "message": "no BBO"}
'32018': ExchangeError, # {"code": 32018, "message": "the order quantity is less than 1, please try again"}
'32019': ExchangeError, # {"code": 32019, "message": "the order price deviates from the price of the previous minute by more than 3%"}
'32020': ExchangeError, # {"code": 32020, "message": "the price is not in the range of the price limit"}
'32021': ExchangeError, # {"code": 32021, "message": "leverage error"}
'32022': ExchangeError, # {"code": 32022, "message": "self function is not supported in your country or region according to the regulations"}
'32023': ExchangeError, # {"code": 32023, "message": "self account has outstanding loan"}
'32024': ExchangeError, # {"code": 32024, "message": "order cannot be placed during delivery"}
'32025': ExchangeError, # {"code": 32025, "message": "order cannot be placed during settlement"}
'32026': ExchangeError, # {"code": 32026, "message": "your account is restricted from opening positions"}
'32029': ExchangeError, # {"code": 32029, "message": "order info does not exist"}
'32028': ExchangeError, # {"code": 32028, "message": "account is suspended and liquidated"}
'32027': ExchangeError, # {"code": 32027, "message": "cancelled over 20 orders"}
'32044': ExchangeError, # {"code": 32044, "message": "The margin ratio after submitting self order is lower than the minimum requirement({0}) for your tier."}
# token and margin trading
'33001': PermissionDenied, # {"code": 33001, "message": "margin account for self pair is not enabled yet"}
'33002': AccountSuspended, # {"code": 33002, "message": "margin account for self pair is suspended"}
'33003': InsufficientFunds, # {"code": 33003, "message": "no loan balance"}
'33004': ExchangeError, # {"code": 33004, "message": "loan amount cannot be smaller than the minimum limit"}
'33005': ExchangeError, # {"code": 33005, "message": "repayment amount must exceed 0"}
'33006': ExchangeError, # {"code": 33006, "message": "loan order not found"}
'33007': ExchangeError, # {"code": 33007, "message": "status not found"}
'33008': ExchangeError, # {"code": 33008, "message": "loan amount cannot exceed the maximum limit"}
'33009': ExchangeError, # {"code": 33009, "message": "user ID is blank"}
'33010': ExchangeError, # {"code": 33010, "message": "you cannot cancel an order during session 2 of call auction"}
'33011': ExchangeError, # {"code": 33011, "message": "no new market data"}
'33012': ExchangeError, # {"code": 33012, "message": "order cancellation failed"}
'33013': InvalidOrder, # {"code": 33013, "message": "order placement failed"}
'33014': OrderNotFound, # {"code": 33014, "message": "order does not exist"}
'33015': InvalidOrder, # {"code": 33015, "message": "exceeded maximum limit"}
'33016': ExchangeError, # {"code": 33016, "message": "margin trading is not open for self token"}
'33017': InsufficientFunds, # {"code": 33017, "message": "insufficient balance"}
'33018': ExchangeError, # {"code": 33018, "message": "self parameter must be smaller than 1"}
'33020': ExchangeError, # {"code": 33020, "message": "request not supported"}
'33021': BadRequest, # {"code": 33021, "message": "token and the pair do not match"}
'33022': InvalidOrder, # {"code": 33022, "message": "pair and the order do not match"}
'33023': ExchangeError, # {"code": 33023, "message": "you can only place market orders during call auction"}
'33024': InvalidOrder, # {"code": 33024, "message": "trading amount too small"}
'33025': InvalidOrder, # {"code": 33025, "message": "base token amount is blank"}
'33026': ExchangeError, # {"code": 33026, "message": "transaction completed"}
'33027': InvalidOrder, # {"code": 33027, "message": "cancelled order or order cancelling"}
'33028': InvalidOrder, # {"code": 33028, "message": "the decimal places of the trading price exceeded the limit"}
'33029': InvalidOrder, # {"code": 33029, "message": "the decimal places of the trading size exceeded the limit"}
'33034': ExchangeError, # {"code": 33034, "message": "You can only place limit order after Call Auction has started"}
'33059': BadRequest, # {"code": 33059, "message": "client_oid or order_id is required"}
'33060': BadRequest, # {"code": 33060, "message": "Only fill in either parameter client_oid or order_id"}
# account
'34001': PermissionDenied, # {"code": 34001, "message": "withdrawal suspended"}
'34002': InvalidAddress, # {"code": 34002, "message": "please add a withdrawal address"}
'34003': ExchangeError, # {"code": 34003, "message": "sorry, self token cannot be withdrawn to xx at the moment"}
'34004': ExchangeError, # {"code": 34004, "message": "withdrawal fee is smaller than minimum limit"}
'34005': ExchangeError, # {"code": 34005, "message": "withdrawal fee exceeds the maximum limit"}
'34006': ExchangeError, # {"code": 34006, "message": "withdrawal amount is lower than the minimum limit"}
'34007': ExchangeError, # {"code": 34007, "message": "withdrawal amount exceeds the maximum limit"}
'34008': InsufficientFunds, # {"code": 34008, "message": "insufficient balance"}
'34009': ExchangeError, # {"code": 34009, "message": "your withdrawal amount exceeds the daily limit"}
'34010': ExchangeError, # {"code": 34010, "message": "transfer amount must be larger than 0"}
'34011': ExchangeError, # {"code": 34011, "message": "conditions not met"}
'34012': ExchangeError, # {"code": 34012, "message": "the minimum withdrawal amount for NEO is 1, and the amount must be an integer"}
'34013': ExchangeError, # {"code": 34013, "message": "please transfer"}
'34014': ExchangeError, # {"code": 34014, "message": "transfer limited"}
'34015': ExchangeError, # {"code": 34015, "message": "subaccount does not exist"}
'34016': PermissionDenied, # {"code": 34016, "message": "transfer suspended"}
'34017': AccountSuspended, # {"code": 34017, "message": "account suspended"}
'34018': AuthenticationError, # {"code": 34018, "message": "incorrect trades password"}
'34019': PermissionDenied, # {"code": 34019, "message": "please bind your email before withdrawal"}
'34020': PermissionDenied, # {"code": 34020, "message": "please bind your funds password before withdrawal"}
'34021': InvalidAddress, # {"code": 34021, "message": "Not verified address"}
'34022': ExchangeError, # {"code": 34022, "message": "Withdrawals are not available for sub accounts"}
'34023': PermissionDenied, # {"code": 34023, "message": "Please enable futures trading before transferring your funds"}
# swap
'35001': ExchangeError, # {"code": 35001, "message": "Contract does not exist"}
'35002': ExchangeError, # {"code": 35002, "message": "Contract settling"}
'35003': ExchangeError, # {"code": 35003, "message": "Contract paused"}
'35004': ExchangeError, # {"code": 35004, "message": "Contract pending settlement"}
'35005': AuthenticationError, # {"code": 35005, "message": "User does not exist"}
'35008': InvalidOrder, # {"code": 35008, "message": "Risk ratio too high"}
'35010': InvalidOrder, # {"code": 35010, "message": "Position closing too large"}
'35012': InvalidOrder, # {"code": 35012, "message": "Incorrect order size"}
'35014': InvalidOrder, # {"code": 35014, "message": "Order price is not within limit"}
'35015': InvalidOrder, # {"code": 35015, "message": "Invalid leverage level"}
'35017': ExchangeError, # {"code": 35017, "message": "Open orders exist"}
'35019': InvalidOrder, # {"code": 35019, "message": "Order size too large"}
'35020': InvalidOrder, # {"code": 35020, "message": "Order price too high"}
'35021': InvalidOrder, # {"code": 35021, "message": "Order size exceeded current tier limit"}
'35022': ExchangeError, # {"code": 35022, "message": "Contract status error"}
'35024': ExchangeError, # {"code": 35024, "message": "Contract not initialized"}
'35025': InsufficientFunds, # {"code": 35025, "message": "No account balance"}
'35026': ExchangeError, # {"code": 35026, "message": "Contract settings not initialized"}
'35029': OrderNotFound, # {"code": 35029, "message": "Order does not exist"}
'35030': InvalidOrder, # {"code": 35030, "message": "Order size too large"}
'35031': InvalidOrder, # {"code": 35031, "message": "Cancel order size too large"}
'35032': ExchangeError, # {"code": 35032, "message": "Invalid user status"}
'35039': ExchangeError, # {"code": 35039, "message": "Open order quantity exceeds limit"}
'35040': InvalidOrder, # {"error_message":"Invalid order type","result":"true","error_code":"35040","order_id":"-1"}
'35044': ExchangeError, # {"code": 35044, "message": "Invalid order status"}
'35046': InsufficientFunds, # {"code": 35046, "message": "Negative account balance"}
'35047': InsufficientFunds, # {"code": 35047, "message": "Insufficient account balance"}
'35048': ExchangeError, # {"code": 35048, "message": "User contract is frozen and liquidating"}
'35049': InvalidOrder, # {"code": 35049, "message": "Invalid order type"}
'35050': InvalidOrder, # {"code": 35050, "message": "Position settings are blank"}
'35052': InsufficientFunds, # {"code": 35052, "message": "Insufficient cross margin"}
'35053': ExchangeError, # {"code": 35053, "message": "Account risk too high"}
'35055': InsufficientFunds, # {"code": 35055, "message": "Insufficient account balance"}
'35057': ExchangeError, # {"code": 35057, "message": "No last traded price"}
'35058': ExchangeError, # {"code": 35058, "message": "No limit"}
'35059': BadRequest, # {"code": 35059, "message": "client_oid or order_id is required"}
'35060': BadRequest, # {"code": 35060, "message": "Only fill in either parameter client_oid or order_id"}
'35061': BadRequest, # {"code": 35061, "message": "Invalid instrument_id"}
'35062': InvalidOrder, # {"code": 35062, "message": "Invalid match_price"}
'35063': InvalidOrder, # {"code": 35063, "message": "Invalid order_size"}
'35064': InvalidOrder, # {"code": 35064, "message": "Invalid client_oid"}
},
'broad': {
},
},
'options': {
'createMarketBuyOrderRequiresPrice': True,
'fetchMarkets': ['spot', 'futures', 'swap'],
'defaultType': 'spot', # 'account', 'spot', 'margin', 'futures', 'swap'
'auth': {
'time': 'public',
'currencies': 'private',
'instruments': 'public',
'rate': 'public',
'constituents/{ett}': 'public',
'define-price/{ett}': 'public',
},
},
'commonCurrencies': {
# OKEX refers to ERC20 version of Aeternity(AEToken)
'AE': 'AET', # https://github.com/ccxt/ccxt/issues/4981
'HOT': 'Hydro Protocol',
'HSR': 'HC',
'MAG': 'Maggie',
'YOYO': 'YOYOW',
'WIN': 'WinToken', # https://github.com/ccxt/ccxt/issues/5701
},
})
async def fetch_time(self, params={}):
response = await self.generalGetTime(params)
#
# {
# "iso": "2015-01-07T23:47:25.201Z",
# "epoch": 1420674445.201
# }
#
return self.parse8601(self.safe_string(response, 'iso'))
async def fetch_markets(self, params={}):
types = self.safe_value(self.options, 'fetchMarkets')
result = []
for i in range(0, len(types)):
markets = await self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
#
# spot markets
#
# [{ base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# product_id: "EOS-OKB",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001" },
#
# ..., # the spot endpoint also returns ETT instruments
#
# { base_currency: "OK06ETT",
# base_increment: "0.00000001",
# base_min_size: "0.01",
# instrument_id: "OK06ETT-USDT",
# min_size: "0.01",
# product_id: "OK06ETT-USDT",
# quote_currency: "USDT",
# quote_increment: "0.0001",
# size_increment: "0.00000001",
# tick_size: "0.0001" }]
#
# futures markets
#
# [{ instrument_id: "BTG-USD-190329",
# underlying_index: "BTG",
# quote_currency: "USD",
# tick_size: "0.01",
# contract_val: "10",
# listing: "2018-12-14",
# delivery: "2019-03-29",
# trade_increment: "1" } ]
#
# swap markets
#
# [{ instrument_id: "BTC-USD-SWAP",
# underlying_index: "BTC",
# quote_currency: "USD",
# coin: "BTC",
# contract_val: "100",
# listing: "2018-10-23T20:11:00.443Z",
# delivery: "2018-10-24T20:11:00.443Z",
# size_increment: "4",
# tick_size: "4" } ]
#
id = self.safe_string(market, 'instrument_id')
marketType = 'spot'
spot = True
future = False
swap = False
baseId = self.safe_string(market, 'base_currency')
contractVal = self.safe_float(market, 'contract_val')
if contractVal is not None:
marketType = 'swap'
spot = False
swap = True
baseId = self.safe_string(market, 'coin')
futuresAlias = self.safe_string(market, 'alias')
if futuresAlias is not None:
swap = False
future = True
marketType = 'futures'
baseId = self.safe_string(market, 'underlying_index')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote) if spot else id
amountPrecision = self.safe_string(market, 'size_increment')
if amountPrecision is not None:
amountPrecision = self.precision_from_string(amountPrecision)
pricePrecision = self.safe_string(market, 'tick_size')
if pricePrecision is not None:
pricePrecision = self.precision_from_string(pricePrecision)
precision = {
'amount': amountPrecision,
'price': pricePrecision,
}
minAmount = self.safe_float_2(market, 'min_size', 'base_min_size')
minPrice = self.safe_float(market, 'tick_size')
if precision['price'] is not None:
minPrice = math.pow(10, -precision['price'])
minCost = None
if minAmount is not None and minPrice is not None:
minCost = minAmount * minPrice
active = True
fees = self.safe_value_2(self.fees, marketType, 'trading', {})
return self.extend(fees, {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': marketType,
'spot': spot,
'futures': future,
'swap': swap,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': minPrice,
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
})
async def fetch_markets_by_type(self, type, params={}):
method = type + 'GetInstruments'
response = await getattr(self, method)(params)
#
# spot markets
#
# [{ base_currency: "EOS",
# base_increment: "0.000001",
# base_min_size: "0.01",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# product_id: "EOS-OKB",
# quote_currency: "OKB",
# quote_increment: "0.0001",
# size_increment: "0.000001",
# tick_size: "0.0001" } ]
#
# futures markets
#
# [{ instrument_id: "BTG-USD-190329",
# underlying_index: "BTG",
# quote_currency: "USD",
# tick_size: "0.01",
# contract_val: "10",
# listing: "2018-12-14",
# delivery: "2019-03-29",
# trade_increment: "1" } ]
#
# swap markets
#
# [{ instrument_id: "BTC-USD-SWAP",
# underlying_index: "BTC",
# quote_currency: "USD",
# coin: "BTC",
# contract_val: "100",
# listing: "2018-10-23T20:11:00.443Z",
# delivery: "2018-10-24T20:11:00.443Z",
# size_increment: "4",
# tick_size: "4" } ]
#
return self.parse_markets(response)
async def fetch_currencies(self, params={}):
# has['fetchCurrencies'] is currently set to False
# despite that their docs say these endpoints are public:
# https://www.okex.com/api/account/v3/withdrawal/fee
# https://www.okex.com/api/account/v3/currencies
# it will still reply with {"code":30001, "message": "OK-ACCESS-KEY header is required"}
# if you attempt to access it without authentication
response = await self.accountGetCurrencies(params)
#
# [
# {
# name: '',
# currency: 'BTC',
# can_withdraw: '1',
# can_deposit: '1',
# min_withdrawal: '0.0100000000000000'
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
precision = 8 # default precision, todo: fix "magic constants"
name = self.safe_string(currency, 'name')
canDeposit = self.safe_integer(currency, 'can_deposit')
canWithdraw = self.safe_integer(currency, 'can_withdraw')
active = canDeposit and canWithdraw
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'name': name,
'active': active,
'fee': None, # todo: redesign
'precision': precision,
'limits': {
'amount': {'min': None, 'max': None},
'price': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_float(currency, 'min_withdrawal'),
'max': None,
},
},
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentId'
method += 'Depth' if (market['type'] == 'swap') else 'Book'
request = {
'instrument_id': market['id'],
}
if limit is not None:
request['size'] = limit # max 200
response = await getattr(self, method)(self.extend(request, params))
#
# { asks: [["0.02685268", "0.242571", "1"],
# ["0.02685493", "0.164085", "1"],
# ...
# ["0.02779", "1.039", "1"],
# ["0.027813", "0.0876", "1"] ],
# bids: [["0.02684052", "10.371849", "1"],
# ["0.02684051", "3.707", "4"],
# ...
# ["0.02634963", "0.132934", "1"],
# ["0.02634962", "0.264838", "2"] ],
# timestamp: "2018-12-17T20:24:16.159Z" }
#
timestamp = self.parse8601(self.safe_string(response, 'timestamp'))
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472", # missing in the docs
# bid: "0.02665221", # not mentioned in the docs
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
symbol = None
marketId = self.safe_string(ticker, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
open = self.safe_float(ticker, 'open_24h')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high_24h'),
'low': self.safe_float(ticker, 'low_24h'),
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'base_volume_24h'),
'quoteVolume': self.safe_float(ticker, 'quote_volume_24h'),
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTicker'
request = {
'instrument_id': market['id'],
}
response = await getattr(self, method)(self.extend(request, params))
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472",
# bid: "0.02665221",
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
return self.parse_ticker(response)
async def fetch_tickers_by_type(self, type, symbols=None, params={}):
await self.load_markets()
method = type + 'GetInstrumentsTicker'
response = await getattr(self, method)(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_tickers(self, symbols=None, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return await self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot trades
#
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
#
# futures trades, swap trades
#
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
#
# fetchOrderTrades(private)
#
# spot trades, margin trades
#
# {
# "created_at":"2019-03-15T02:52:56.000Z",
# "exec_type":"T", # whether the order is taker or maker
# "fee":"0.00000082",
# "instrument_id":"BTC-USDT",
# "ledger_id":"3963052721",
# "liquidity":"T", # whether the order is taker or maker
# "order_id":"2482659399697408",
# "price":"3888.6",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.00055306",
# "timestamp":"2019-03-15T02:52:56.000Z"
# },
#
# futures trades, swap trades
#
# {
# "trade_id":"197429674631450625",
# "instrument_id":"EOS-USD-SWAP",
# "order_id":"6a-7-54d663a28-0",
# "price":"3.633",
# "order_qty":"1.0000",
# "fee":"-0.000551",
# "created_at":"2019-03-21T04:41:58.0Z", # missing in swap trades
# "timestamp":"2019-03-25T05:56:31.287Z", # missing in futures trades
# "exec_type":"M", # whether the order is taker or maker
# "side":"short", # "buy" in futures trades
# }
#
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string_2(trade, 'timestamp', 'created_at'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'qty')
amount = self.safe_float(trade, 'order_qty', amount)
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
side = self.safe_string(trade, 'side')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
feeCost = self.safe_float(trade, 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
if market is not None:
feeCurrency = market['base'] if (side == 'buy') else market['quote']
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': -feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string_2(trade, 'trade_id', 'ledger_id'),
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTrades'
if (limit is None) or (limit > 100):
limit = 100 # maximum = default = 100
request = {
'instrument_id': market['id'],
'limit': limit,
# from: 'id',
# to: 'id',
}
response = await getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
# ]
#
# futures markets, swap markets
#
# [
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
#
# spot markets
#
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
#
# futures markets
#
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331,
# ]
#
if isinstance(ohlcv, list):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
timestamp = ohlcv[0]
if isinstance(timestamp, basestring):
timestamp = self.parse8601(timestamp)
return [
timestamp, # timestamp
float(ohlcv[1]), # Open
float(ohlcv[2]), # High
float(ohlcv[3]), # Low
float(ohlcv[4]), # Close
# float(ohlcv[5]), # Quote Volume
# float(ohlcv[6]), # Base Volume
float(ohlcv[volumeIndex]), # Volume, okex will return base volume in the 7th element for future markets
]
else:
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_float(ohlcv, 'open'), # Open
self.safe_float(ohlcv, 'high'), # High
self.safe_float(ohlcv, 'low'), # Low
self.safe_float(ohlcv, 'close'), # Close
self.safe_float(ohlcv, 'volume'), # Base Volume
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdCandles'
request = {
'instrument_id': market['id'],
'granularity': self.timeframes[timeframe],
}
if since is not None:
request['start'] = self.iso8601(since)
response = await getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [{ close: "0.02683401",
# high: "0.02683401",
# low: "0.02683401",
# open: "0.02683401",
# time: "2018-12-17T23:47:00.000Z",
# volume: "0" },
# ...
# { close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222" } ]
#
# futures
#
# [[1545090660000,
# 0.3171,
# 0.3174,
# 0.3171,
# 0.3173,
# 1648,
# 51930.38579450868],
# ...
# [1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331] ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_account_balance(self, response):
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balance, 'balance')
account['used'] = self.safe_float(balance, 'hold')
account['free'] = self.safe_float(balance, 'available')
result[code] = account
return self.parse_balance(result)
def parse_margin_balance(self, response):
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
marketId = self.safe_string(balance, 'instrument_id')
market = self.safe_value(self.markets_by_id, marketId)
symbol = None
if market is None:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = market['symbol']
omittedBalance = self.omit(balance, [
'instrument_id',
'liquidation_price',
'product_id',
'risk_rate',
'margin_ratio',
'maint_margin_ratio',
'tiers',
])
keys = list(omittedBalance.keys())
accounts = {}
for k in range(0, len(keys)):
key = keys[k]
marketBalance = balance[key]
if key.find(':') >= 0:
parts = key.split(':')
currencyId = parts[1]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(marketBalance, 'balance')
account['used'] = self.safe_float(marketBalance, 'hold')
account['free'] = self.safe_float(marketBalance, 'available')
accounts[code] = account
else:
raise NotSupported(self.id + ' margin balance response format has changed!')
result[symbol] = self.parse_balance(accounts)
return result
def parse_futures_balance(self, response):
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
info = self.safe_value(response, 'info', {})
ids = list(info.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
balance = self.safe_value(info, id, {})
account = self.account()
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_float(balance, 'equity')
account['free'] = self.safe_float(balance, 'total_avail_balance')
result[code] = account
return self.parse_balance(result)
def parse_swap_balance(self, response):
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
info = self.safe_value(response, 'info', [])
for i in range(0, len(info)):
balance = info[i]
marketId = self.safe_string(balance, 'instrument_id')
symbol = marketId
if marketId in self.markets_by_id:
symbol = self.markets_by_id[marketId]['symbol']
account = self.account()
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_float(balance, 'equity')
account['free'] = self.safe_float(balance, 'total_avail_balance')
result[symbol] = account
return self.parse_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchBalance requires a type parameter(one of 'account', 'spot', 'margin', 'futures', 'swap')")
suffix = 'Wallet' if (type == 'account') else 'Accounts'
method = type + 'Get' + suffix
query = self.omit(params, 'type')
response = await getattr(self, method)(query)
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
# margin
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
# futures
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# swap
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
if (type == 'account') or (type == 'spot'):
return self.parse_account_balance(response)
elif type == 'margin':
return self.parse_margin_balance(response)
elif type == 'futures':
return self.parse_futures_balance(response)
elif type == 'swap':
return self.parse_swap_balance(response)
raise NotSupported(self.id + " fetchBalance does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef1234567890', # [a-z0-9]{1,32}
# 'order_type': '0', # 0: Normal limit order(Unfilled and 0 represent normal limit order) 1: Post only 2: Fill Or Kill 3: Immediatel Or Cancel
}
method = None
if market['futures'] or market['swap']:
size = self.number_to_string(amount) if market['futures'] else self.amount_to_precision(symbol, amount)
request = self.extend(request, {
'type': type, # 1:open long 2:open short 3:close long 4:close short for futures
'size': size,
'price': self.price_to_precision(symbol, price),
# 'match_price': '0', # Order at best counter party price?(0:no 1:yes). The default is 0. If it is set as 1, the price parameter will be ignored. When posting orders at best bid price, order_type can only be 0(regular order).
})
if market['futures']:
request['leverage'] = '10' # or '20'
method = market['type'] + 'PostOrder'
else:
marginTrading = self.safe_string(params, 'margin_trading', '1') # 1 = spot, 2 = margin
request = self.extend(request, {
'side': side,
'type': type, # limit/market
'margin_trading': marginTrading, # 1 = spot, 2 = margin
})
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['size'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
notional = self.safe_float(params, 'notional')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if notional is None:
notional = amount * price
elif notional is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'notional' extra parameter(the exchange-specific behaviour)")
request['notional'] = self.cost_to_precision(symbol, notional)
else:
request['size'] = self.amount_to_precision(symbol, amount)
method = 'marginPostOrders' if (marginTrading == '2') else 'spotPostOrders'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
timestamp = self.milliseconds()
id = self.safe_string(response, 'order_id')
return {
'info': response,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
}
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
type = market['type']
method = type + 'PostCancelOrder'
request = {
'instrument_id': market['id'],
}
if market['futures'] or market['swap']:
method += 'InstrumentId'
else:
method += 's'
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method += 'ClientOid'
request['client_oid'] = clientOid
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, 'type')
response = await getattr(self, method)(self.extend(request, query))
result = response if ('result' in response) else self.safe_value(response, market['id'], {})
#
# spot, margin
#
# {
# "btc-usdt": [
# {
# "result":true,
# "client_oid":"a123",
# "order_id": "2510832677225473"
# }
# ]
# }
#
# futures, swap
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# "instrument_id": "EOS-USD-190628"
# }
#
return self.parse_order(result, market)
def parse_order_status(self, status):
statuses = {
'-2': 'failed',
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_side(self, side):
sides = {
'1': 'buy', # open long
'2': 'sell', # open short
'3': 'sell', # close long
'4': 'buy', # close short
}
return self.safe_string(sides, side, side)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
# cancelOrder
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# # instrument_id is missing for spot/margin orders
# # available in futures and swap orders only
# "instrument_id": "EOS-USD-190628",
# }
#
# fetchOrder, fetchOrdersByState, fetchOpenOrders, fetchClosedOrders
#
# # spot and margin orders
#
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001", # filled_qty in futures and swap orders
# "funds":"", # self is most likely the same as notional
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT", # missing in futures and swap orders
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# }
#
# # futures and swap orders
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10", # filled_size in spot and margin orders
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567", # missing in spot and margin orders
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap, spot and margin orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap, spo and margin orders
# "order_type":"0"
# }
#
id = self.safe_string(order, 'order_id')
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'type')
if (side != 'buy') and (side != 'sell'):
side = self.parse_order_side(type)
if (type != 'limit') and (type != 'market'):
if 'pnl' in order:
type = 'futures'
else:
type = 'swap'
symbol = None
marketId = self.safe_string(order, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = marketId
if market is not None:
if symbol is None:
symbol = market['symbol']
amount = self.safe_float(order, 'size')
filled = self.safe_float_2(order, 'filled_size', 'filled_qty')
remaining = None
if amount is not None:
if filled is not None:
amount = max(amount, filled)
remaining = max(0, amount - filled)
if type == 'market':
remaining = 0
cost = self.safe_float_2(order, 'filled_notional', 'funds')
price = self.safe_float(order, 'price')
average = self.safe_float(order, 'price_avg')
if cost is None:
if filled is not None and average is not None:
cost = average * filled
else:
if (average is None) and (filled is not None) and (filled > 0):
average = cost / filled
status = self.parse_order_status(self.safe_string(order, 'state'))
feeCost = self.safe_float(order, 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
}
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
instrumentId = 'InstrumentId' if (market['futures'] or market['swap']) else ''
method = type + 'GetOrders' + instrumentId
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef12345', # optional, [a-z0-9]{1,32}
# 'order_id': id,
}
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method += 'ClientOid'
request['client_oid'] = clientOid
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, 'type')
response = await getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# {
# "client_oid":"oktspot70",
# "created_at":"2019-03-15T02:52:56.000Z",
# "filled_notional":"3.8886",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2482659399697408",
# "order_type":"0",
# "price":"3927.3",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-15T02:52:56.000Z",
# "type":"limit"
# }
#
# futures, swap
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T02:46:38.000Z",
# "filled_qty":"10",
# "fee":"-0.0080819",
# "order_id":"2510946213248000",
# "price":"3.712",
# "price_avg":"3.712",
# "status":"2",
# "state": "2",
# "type":"2",
# "contract_val":"10",
# "leverage":"10",
# "client_oid":"", # missing in swap orders
# "pnl":"0", # missing in swap orders
# "order_type":"0"
# }
#
return self.parse_order(response)
async def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByState requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
type = market['type']
request = {
'instrument_id': market['id'],
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
'state': state,
}
method = type + 'GetOrders'
if market['futures'] or market['swap']:
method += 'InstrumentId'
query = self.omit(params, 'type')
response = await getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# [
# # in fact, self documented API response does not correspond
# # to their actual API response for spot markets
# # OKEX v3 API returns a plain array of orders(see below)
# [
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# },
# ],
# {
# "before":"2500723297813504",
# "after":"2500650881647616"
# }
# ]
#
# futures, swap
#
# {
# "result":true, # missing in swap orders
# "order_info": [
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10",
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567",
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap orders
# "order_type":"0"
# },
# ]
# }
#
orders = None
if market['type'] == 'swap' or market['type'] == 'futures':
orders = self.safe_value(response, 'order_info', [])
else:
orders = response
responseLength = len(response)
if responseLength < 1:
return []
# in fact, self documented API response does not correspond
# to their actual API response for spot markets
# OKEX v3 API returns a plain array of orders
if responseLength > 1:
before = self.safe_value(response[1], 'before')
if before is not None:
orders = response[0]
return self.parse_orders(orders, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return await self.fetch_orders_by_state('6', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return await self.fetch_orders_by_state('7', symbol, since, limit, params)
def parse_deposit_addresses(self, addresses):
result = []
for i in range(0, len(addresses)):
result.append(self.parse_deposit_address(addresses[i]))
return result
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# tag: 'abcde12345', # will be missing if the token does not require a deposit tag
# payment_id: 'abcde12345', # will not be returned if the token does not require a payment_id
# # can_deposit: 1, # 0 or 1, documented but missing
# # can_withdraw: 1, # 0 or 1, documented but missing
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string_2(depositAddress, 'tag', 'payment_id')
tag = self.safe_string(depositAddress, 'memo', tag)
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.accountGetDepositAddress(self.extend(request, params))
#
# [
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# }
# ]
#
addresses = self.parse_deposit_addresses(response)
numAddresses = len(addresses)
if numAddresses < 1:
raise InvalidAddress(self.id + ' fetchDepositAddress cannot return nonexistent addresses, you should create withdrawal addresses with the exchange website first')
return addresses[0]
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
if tag:
address = address + ':' + tag
fee = self.safe_string(params, 'fee')
if fee is None:
raise ExchangeError(self.id + " withdraw() requires a `fee` string parameter, network transaction fee must be ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set '0'. Withdrawing to external digital asset address requires network transaction fee.")
request = {
'currency': currency['id'],
'to_address': address,
'destination': '4', # 2 = OKCoin International, 3 = OKEx 4 = others
'amount': self.number_to_string(amount),
'fee': fee, # String. Network transaction fee ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set as 0. Withdrawal to external digital asset address requires network transaction fee.
}
if 'password' in params:
request['trade_pwd'] = params['password']
elif 'trade_pwd' in params:
request['trade_pwd'] = params['trade_pwd']
elif self.password:
request['trade_pwd'] = self.password
query = self.omit(params, ['fee', 'password', 'trade_pwd'])
if not ('trade_pwd' in request):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = await self.accountPostWithdrawal(self.extend(request, query))
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
return {
'info': response,
'id': self.safe_string(response, 'withdrawal_id'),
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
method = 'accountGetDepositHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['code'] = currency['code']
method += 'Currency'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
method = 'accountGetWithdrawalHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['code'] = currency['code']
method += 'Currency'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def parse_transaction_status(self, status):
#
# deposit statuses
#
# {
# '0': 'waiting for confirmation',
# '1': 'confirmation account',
# '2': 'recharge success'
# }
#
# withdrawal statues
#
# {
# '-3': 'pending cancel',
# '-2': 'cancelled',
# '-1': 'failed',
# '0': 'pending',
# '1': 'sending',
# '2': 'sent',
# '3': 'email confirmation',
# '4': 'manual confirmation',
# '5': 'awaiting identity confirmation'
# }
#
statuses = {
'-3': 'pending',
'-2': 'pending',
'-1': 'failed',
'0': 'pending',
'1': 'pending',
'2': 'ok',
'3': 'pending',
'4': 'pending',
'5': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
# fetchWithdrawals
#
# {
# amount: "4.72100000",
# withdrawal_id: "1729116",
# fee: "0.01000000eth",
# txid: "0xf653125bbf090bcfe4b5e8e7b8f586a9d87aa7de94598702758c0802b…",
# currency: "ETH",
# from: "7147338839",
# to: "0x26a3CB49578F07000575405a57888681249c35Fd",
# timestamp: "2018-08-17T07:03:42.000Z",
# status: "2"
# }
#
# fetchDeposits
#
# {
# amount: "0.47847546",
# txid: "1723573_3_0_0_WALLET",
# currency: "BTC",
# to: "",
# timestamp: "2018-08-16T03:41:10.000Z",
# status: "2"
# }
#
type = None
id = None
address = None
withdrawalId = self.safe_string(transaction, 'withdrawal_id')
addressFrom = self.safe_string(transaction, 'from')
addressTo = self.safe_string(transaction, 'to')
if withdrawalId is not None:
type = 'withdrawal'
id = withdrawalId
address = addressTo
else:
# the payment_id will appear on new deposits but appears to be removed from the response after 2 months
id = self.safe_string(transaction, 'payment_id')
type = 'deposit'
address = addressTo
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
txid = self.safe_string(transaction, 'txid')
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
feeCost = None
if type == 'deposit':
feeCost = 0
else:
if currencyId is not None:
feeWithCurrencyId = self.safe_string(transaction, 'fee')
if feeWithCurrencyId is not None:
# https://github.com/ccxt/ccxt/pull/5748
lowercaseCurrencyId = currencyId.lower()
feeWithoutCurrencyId = feeWithCurrencyId.replace(lowercaseCurrencyId, '')
feeCost = float(feeWithoutCurrencyId)
# todo parse tags
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': None,
'tagTo': None,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
# okex actually returns ledger entries instead of fills here, so each fill in the order
# is represented by two trades with opposite buy/sell sides, not one :\
# self aspect renders the 'fills' endpoint unusable for fetchOrderTrades
# until either OKEX fixes the API or we workaround self on our side somehow
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrderTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
if (limit is None) or (limit > 100):
limit = 100
request = {
'instrument_id': market['id'],
'order_id': id,
# from: '1', # return the page after the specified page number
# to: '1', # return the page before the specified page number
'limit': limit, # optional, number of results per request, default = maximum = 100
}
defaultType = self.safe_string_2(self.options, 'fetchMyTrades', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = type + 'GetFills'
response = await getattr(self, method)(self.extend(request, query))
#
# spot trades, margin trades
#
# [
# {
# "created_at":"2019-09-20T07:15:24.000Z",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"7173486113",
# "liquidity":"T",
# "order_id":"3553868136523776",
# "price":"217.59",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"0.04619899",
# "timestamp":"2019-09-20T07:15:24.000Z"
# }
# ]
#
# futures trades, swap trades
#
# [
# {
# "trade_id":"197429674631450625",
# "instrument_id":"EOS-USD-SWAP",
# "order_id":"6a-7-54d663a28-0",
# "price":"3.633",
# "order_qty":"1.0000",
# "fee":"-0.000551",
# "created_at":"2019-03-21T04:41:58.0Z", # missing in swap trades
# "timestamp":"2019-03-25T05:56:31.287Z", # missing in futures trades
# "exec_type":"M", # whether the order is taker or maker
# "side":"short", # "buy" in futures trades
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchLedger', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
suffix = '' if (type == 'account') else 'Accounts'
argument = ''
request = {
# 'from': 'id',
# 'to': 'id',
}
if limit is not None:
request['limit'] = limit
currency = None
if (type == 'spot') or (type == 'futures'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger requires a currency code argument for '" + type + "' markets")
argument = 'Currency'
currency = self.currency(code)
request['currency'] = currency['id']
elif (type == 'margin') or (type == 'swap'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger requires a code argument(a market symbol) for '" + type + "' markets")
argument = 'InstrumentId'
market = self.market(code) # we intentionally put a market inside here for the margin and swap ledgers
currency = self.currency(market['base'])
request['instrument_id'] = market['id']
#
# if type == 'margin':
# #
# # 3. Borrow
# # 4. Repayment
# # 5. Interest
# # 7. Buy
# # 8. Sell
# # 9. From capital account
# # 10. From C2C
# # 11. From Futures
# # 12. From Spot
# # 13. From ETT
# # 14. To capital account
# # 15. To C2C
# # 16. To Spot
# # 17. To Futures
# # 18. To ETT
# # 19. Mandatory Repayment
# # 20. From Piggybank
# # 21. To Piggybank
# # 22. From Perpetual
# # 23. To Perpetual
# # 24. Liquidation Fee
# # 54. Clawback
# # 59. Airdrop Return.
# #
# request['type'] = 'number' # All types will be returned if self filed is left blank
# }
#
elif type == 'account':
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
#
# #
# # 1. deposit
# # 2. withdrawal
# # 13. cancel withdrawal
# # 18. into futures account
# # 19. out of futures account
# # 20. into sub account
# # 21. out of sub account
# # 28. claim
# # 29. into ETT account
# # 30. out of ETT account
# # 31. into C2C account
# # 32. out of C2C account
# # 33. into margin account
# # 34. out of margin account
# # 37. into spot account
# # 38. out of spot account
# #
# request['type'] = 'number'
#
else:
raise NotSupported(self.id + " fetchLedger does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
method = type + 'Get' + suffix + argument + 'Ledger'
response = await getattr(self, method)(self.extend(request, query))
#
# transfer funds transfer in/out
# trade funds moved as a result of a trade, spot and margin accounts only
# rebate fee rebate as per fee schedule, spot and margin accounts only
# match open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
# fee fee, futures only
# settlement settlement/clawback/settle long/settle short
# liquidation force close long/force close short/deliver close long/deliver close short
# funding funding fee, swap only
# margin a change in the amount after adjusting margin, swap only
#
# account
#
# [
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
# ]
#
# spot
#
# [
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
# ]
#
# margin
#
# [
# [
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
# ],
# {
# "before":"78965766",
# "after":"78918186"
# }
# ]
#
# futures
#
# [
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
# ]
#
# swap
#
# [
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
# ]
#
isArray = isinstance(response[0], list)
isMargin = (type == 'margin')
entries = response[0] if (isMargin and isArray) else response
return self.parse_ledger(entries, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'transfer': 'transfer', # # funds transfer in/out
'trade': 'trade', # funds moved as a result of a trade, spot and margin accounts only
'rebate': 'rebate', # fee rebate as per fee schedule, spot and margin accounts only
'match': 'trade', # open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
'fee': 'fee', # fee, futures only
'settlement': 'trade', # settlement/clawback/settle long/settle short
'liquidation': 'trade', # force close long/force close short/deliver close long/deliver close short
'funding': 'fee', # funding fee, swap only
'margin': 'margin', # a change in the amount after adjusting margin, swap only
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
#
# account
#
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
#
# spot
#
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
#
# margin
#
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
#
# futures
#
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
#
# swap
#
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
#
id = self.safe_string(item, 'ledger_id')
account = None
details = self.safe_value(item, 'details', {})
referenceId = self.safe_string(details, 'order_id')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'currency'), currency)
amount = self.safe_float(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'timestamp'))
fee = {
'cost': self.safe_float(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_float(item, 'balance')
status = 'ok'
return {
'info': item,
'id': id,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before, # balance before
'after': after, # balance after
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
isArray = isinstance(params, list)
request = '/api/' + api + '/' + self.version + '/'
request += path if isArray else self.implode_params(path, params)
query = params if isArray else self.omit(params, self.extract_params(path))
url = self.urls['api'] + request
type = self.get_path_authentication_type(path)
if type == 'public':
if query:
url += '?' + self.urlencode(query)
elif type == 'private':
self.check_required_credentials()
timestamp = self.iso8601(self.milliseconds())
headers = {
'OK-ACCESS-KEY': self.apiKey,
'OK-ACCESS-PASSPHRASE': self.password,
'OK-ACCESS-TIMESTAMP': timestamp,
# 'OK-FROM': '',
# 'OK-TO': '',
# 'OK-LIMIT': '',
}
auth = timestamp + method + request
if method == 'GET':
if query:
urlencodedQuery = '?' + self.urlencode(query)
url += urlencodedQuery
auth += urlencodedQuery
else:
if isArray or query:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
headers['OK-ACCESS-SIGN'] = self.decode(signature)
headers['client_oid'] = 'quad' + self.number_to_string(self.milliseconds()) # Quick and dirty way to pass custom header for order tracking
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def get_path_authentication_type(self, path):
auth = self.safe_value(self.options, 'auth', {})
key = self.find_broadly_matched_key(auth, path)
return self.safe_string(auth, key, 'private')
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
feedback = self.id + ' ' + body
if code == 503:
raise ExchangeError(feedback)
if not response:
return # fallback to default error handler
message = self.safe_string(response, 'message')
errorCode = self.safe_string_2(response, 'code', 'error_code')
if message is not None:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
nonEmptyMessage = (message != '')
nonZeroErrorCode = (errorCode is not None) and (errorCode != '0')
if nonZeroErrorCode or nonEmptyMessage:
raise ExchangeError(feedback) # unknown message
| 44.579632 | 493 | 0.459319 |
4a212d476b567312cd0b4718a3d5cf008f66524c
| 575 |
py
|
Python
|
selenium_test.py
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-Struckdown
|
a75d4282b6c4e3e05653edcef76fd1c9c1401c07
|
[
"MIT"
] | null | null | null |
selenium_test.py
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-Struckdown
|
a75d4282b6c4e3e05653edcef76fd1c9c1401c07
|
[
"MIT"
] | 1 |
2021-06-01T22:52:08.000Z
|
2021-06-01T22:52:08.000Z
|
selenium_test.py
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-Struckdown
|
a75d4282b6c4e3e05653edcef76fd1c9c1401c07
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def test_home():
driver = webdriver.Chrome()
driver.get("http://127.0.0.1:8000")
elem = driver.find_element_by_id("name")
assert elem != None
elem = driver.find_element_by_id("about")
assert elem != None
elem = driver.find_element_by_id("skills")
assert elem != None
elem = driver.find_element_by_id("education")
assert elem != None
elem = driver.find_element_by_id("workExperience")
assert elem != None
elem = driver.find_element_by_id("contactInformation")
assert elem != None
| 27.380952 | 55 | 0.751304 |
4a212e395bde35e2318e6e9425076d0448d48050
| 2,297 |
py
|
Python
|
python_study/07linkedlist/linked_list.py
|
xuyang7952/algo
|
0c3cabcdc1aaa3dcb51edc51cf4dea3dce405959
|
[
"Apache-2.0"
] | null | null | null |
python_study/07linkedlist/linked_list.py
|
xuyang7952/algo
|
0c3cabcdc1aaa3dcb51edc51cf4dea3dce405959
|
[
"Apache-2.0"
] | null | null | null |
python_study/07linkedlist/linked_list.py
|
xuyang7952/algo
|
0c3cabcdc1aaa3dcb51edc51cf4dea3dce405959
|
[
"Apache-2.0"
] | null | null | null |
"""
1) Reverse singly-linked list
2) Detect cycle in a list
3) Merge two sorted lists
4) Remove nth node from the end
5) Find middle node
Author: Xuyang
"""
from typing import Optional
class Node:
def __init__(self, data: int, next=None):
self.data = data
self._next = next
# Reverse singly-linked list
# 单链表反转
# Note that the input is assumed to be a Node, not a linked list.
def reverse(head: Node) -> Optional[Node]:
reverse_head = None
current = head
while current:
reverse_head, reverse_head._next, current = current, reverse_head, current._next
return reverse_head
# Detect cycle in a list
# 检测环
def has_cycle(head: Node) -> bool:
slow, fast = head, head
while fast and fast._next:
slow = slow._next
fast = fast._next._next
if slow == fast:
return True
return False
# Merge two sorted linked list
# 有序链表合并
def merge_sorted_list(l1: Node, l2: Node) -> Optional[Node]:
if l1 and l2:
p1, p2 = l1, l2
fake_head = Node(None)
current = fake_head
while p1 and p2:
if p1.data <= p2.data:
current._next = p1
p1 = p1._next
else:
current._next = p2
p2 = p2._next
current = current._next
# p1,p2 还有剩余
current._next = p1 if p1 else p2
return fake_head._next
return l1 or l2
# Remove nth node from the end
# 删除倒数第n个节点。假设n大于0
def remove_nth_from_end(head: Node, n: int) -> Optional[Node]:
fast, slow = head, head
cnt = 0
while fast and cnt < n:
fast = fast._next
cnt += 1
if not fast and cnt < n:
return head
if not fast and cnt == n:
return head._next
while fast._next:
fast, slow = fast._next, slow._next
# 执行删除
slow._next = slow._next._next
def find_middle_node(head: Node) -> Optional[Node]:
slow, fast = head, head
fast = fast._next if fast else None
while fast and fast._next:
slow, fast = slow._next, fast._next._next
return slow
def print_all(head: Node) -> Optional[Node]:
nums = []
cur = head
while cur:
nums.append(str(cur.data))
cur = cur._next
print('->'.join(nums))
| 23.20202 | 88 | 0.591641 |
4a212ec0df02c48d8c7382a9be3dd1e7b4f66dd0
| 36,580 |
py
|
Python
|
saleor/core/utils/random_data.py
|
angeles-ricardo-89/saleor
|
5fab7a883d025bff83320fbdd557ed7afa2923a9
|
[
"BSD-3-Clause"
] | 3 |
2019-01-24T11:41:58.000Z
|
2019-11-10T13:12:24.000Z
|
saleor/core/utils/random_data.py
|
angeles-ricardo-89/saleor
|
5fab7a883d025bff83320fbdd557ed7afa2923a9
|
[
"BSD-3-Clause"
] | 11 |
2021-03-30T14:26:57.000Z
|
2022-03-12T00:51:07.000Z
|
saleor/core/utils/random_data.py
|
angeles-ricardo-89/saleor
|
5fab7a883d025bff83320fbdd557ed7afa2923a9
|
[
"BSD-3-Clause"
] | 12 |
2019-03-21T03:24:58.000Z
|
2022-01-13T10:55:34.000Z
|
import itertools
import json
import os
import random
import unicodedata
import uuid
from collections import defaultdict
from typing import Type, Union
from unittest.mock import patch
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from django.contrib.sites.models import Site
from django.core.files import File
from django.db.models import F, Q
from django.urls import reverse
from django.utils import timezone
from django.utils.text import slugify
from faker import Factory
from faker.providers import BaseProvider
from measurement.measures import Weight
from prices import Money, TaxedMoney
from ...account.models import Address, User
from ...account.utils import store_user_address
from ...checkout import AddressType
from ...core.permissions import (
AccountPermissions,
CheckoutPermissions,
GiftcardPermissions,
OrderPermissions,
get_permissions,
)
from ...core.utils import build_absolute_uri
from ...core.weight import zero_weight
from ...discount import DiscountValueType, VoucherType
from ...discount.models import Sale, Voucher
from ...discount.utils import fetch_discounts
from ...giftcard.models import GiftCard
from ...menu.models import Menu
from ...order.models import Fulfillment, Order, OrderLine
from ...order.utils import update_order_status
from ...page.models import Page
from ...payment import gateway
from ...payment.utils import create_payment
from ...plugins.manager import get_plugins_manager
from ...product.models import (
AssignedProductAttribute,
AssignedVariantAttribute,
Attribute,
AttributeProduct,
AttributeValue,
AttributeVariant,
Category,
Collection,
CollectionProduct,
Product,
ProductImage,
ProductType,
ProductVariant,
)
from ...product.tasks import update_products_minimal_variant_prices_of_discount_task
from ...product.thumbnails import (
create_category_background_image_thumbnails,
create_collection_background_image_thumbnails,
create_product_thumbnails,
)
from ...shipping.models import ShippingMethod, ShippingMethodType, ShippingZone
from ...warehouse.management import increase_stock
from ...warehouse.models import Stock, Warehouse
fake = Factory.create()
PRODUCTS_LIST_DIR = "products-list/"
IMAGES_MAPPING = {
61: ["saleordemoproduct_paints_01.png"],
62: ["saleordemoproduct_paints_02.png"],
63: ["saleordemoproduct_paints_03.png"],
64: ["saleordemoproduct_paints_04.png"],
65: ["saleordemoproduct_paints_05.png"],
71: ["saleordemoproduct_fd_juice_06.png"],
72: ["saleordemoproduct_fd_juice_06.png"], # FIXME inproper image
73: ["saleordemoproduct_fd_juice_05.png"],
74: ["saleordemoproduct_fd_juice_01.png"],
75: ["saleordemoproduct_fd_juice_03.png"], # FIXME inproper image
76: ["saleordemoproduct_fd_juice_02.png"], # FIXME inproper image
77: ["saleordemoproduct_fd_juice_03.png"],
78: ["saleordemoproduct_fd_juice_04.png"],
79: ["saleordemoproduct_fd_juice_02.png"],
81: ["saleordemoproduct_wine-red.png"],
82: ["saleordemoproduct_wine-white.png"],
83: ["saleordemoproduct_beer-02_1.png", "saleordemoproduct_beer-02_2.png"],
84: ["saleordemoproduct_beer-01_1.png", "saleordemoproduct_beer-01_2.png"],
85: ["saleordemoproduct_cuschion01.png"],
86: ["saleordemoproduct_cuschion02.png"],
87: [
"saleordemoproduct_sneakers_01_1.png",
"saleordemoproduct_sneakers_01_2.png",
"saleordemoproduct_sneakers_01_3.png",
"saleordemoproduct_sneakers_01_4.png",
],
88: [
"saleordemoproduct_sneakers_02_1.png",
"saleordemoproduct_sneakers_02_2.png",
"saleordemoproduct_sneakers_02_3.png",
"saleordemoproduct_sneakers_02_4.png",
],
89: ["saleordemoproduct_cl_boot07_1.png", "saleordemoproduct_cl_boot07_2.png"],
107: ["saleordemoproduct_cl_polo01.png"],
108: ["saleordemoproduct_cl_polo02.png"],
109: ["saleordemoproduct_cl_polo03-woman.png"],
110: ["saleordemoproduct_cl_polo04-woman.png"],
111: [
"saleordemoproduct_cl_boot01_1.png",
"saleordemoproduct_cl_boot01_2.png",
"saleordemoproduct_cl_boot01_3.png",
],
112: ["saleordemoproduct_cl_boot03_1.png", "saleordemoproduct_cl_boot03_2.png"],
113: ["saleordemoproduct_cl_boot06_1.png", "saleordemoproduct_cl_boot06_2.png"],
114: [
"saleordemoproduct_cl_boot06_1.png",
"saleordemoproduct_cl_boot06_2.png",
], # FIXME incorrect image
115: ["saleordemoproduct_cl_bogo01_1.png"],
116: ["saleordemoproduct_cl_bogo02_1.png"],
117: ["saleordemoproduct_cl_bogo03_1.png"],
118: ["saleordemoproduct_cl_bogo04_1.png", "saleordemoproduct_cl_bogo04_2.png"],
}
CATEGORY_IMAGES = {7: "accessories.jpg", 8: "groceries.jpg", 9: "apparel.jpg"}
COLLECTION_IMAGES = {1: "summer.jpg", 2: "clothing.jpg"}
def get_weight(weight):
if not weight:
return zero_weight()
value, unit = weight.split(":")
return Weight(**{unit: value})
def create_product_types(product_type_data):
for product_type in product_type_data:
pk = product_type["pk"]
defaults = product_type["fields"]
defaults["weight"] = get_weight(defaults["weight"])
ProductType.objects.update_or_create(pk=pk, defaults=defaults)
def create_categories(categories_data, placeholder_dir):
placeholder_dir = get_product_list_images_dir(placeholder_dir)
for category in categories_data:
pk = category["pk"]
defaults = category["fields"]
parent = defaults["parent"]
image_name = (
CATEGORY_IMAGES[pk] if pk in CATEGORY_IMAGES else CATEGORY_IMAGES[parent]
)
background_image = get_image(placeholder_dir, image_name)
defaults["background_image"] = background_image
if parent:
defaults["parent"] = Category.objects.get(pk=parent)
Category.objects.update_or_create(pk=pk, defaults=defaults)
create_category_background_image_thumbnails.delay(pk)
def create_collections(data, placeholder_dir):
placeholder_dir = get_product_list_images_dir(placeholder_dir)
for collection in data:
pk = collection["pk"]
defaults = collection["fields"]
image_name = COLLECTION_IMAGES[pk]
background_image = get_image(placeholder_dir, image_name)
defaults["background_image"] = background_image
Collection.objects.update_or_create(pk=pk, defaults=defaults)
create_collection_background_image_thumbnails.delay(pk)
def assign_products_to_collections(associations: list):
for value in associations:
pk = value["pk"]
defaults = value["fields"]
defaults["collection_id"] = defaults.pop("collection")
defaults["product_id"] = defaults.pop("product")
CollectionProduct.objects.update_or_create(pk=pk, defaults=defaults)
def create_attributes(attributes_data):
for attribute in attributes_data:
pk = attribute["pk"]
defaults = attribute["fields"]
attr, _ = Attribute.objects.update_or_create(pk=pk, defaults=defaults)
def create_attributes_values(values_data):
for value in values_data:
pk = value["pk"]
defaults = value["fields"]
defaults["attribute_id"] = defaults.pop("attribute")
AttributeValue.objects.update_or_create(pk=pk, defaults=defaults)
def create_products(products_data, placeholder_dir, create_images):
for product in products_data:
pk = product["pk"]
# We are skipping products without images
if pk not in IMAGES_MAPPING:
continue
defaults = product["fields"]
defaults["weight"] = get_weight(defaults["weight"])
defaults["category_id"] = defaults.pop("category")
defaults["product_type_id"] = defaults.pop("product_type")
product, _ = Product.objects.update_or_create(pk=pk, defaults=defaults)
if create_images:
images = IMAGES_MAPPING.get(pk, [])
for image_name in images:
create_product_image(product, placeholder_dir, image_name)
def create_stocks(variant, warehouse_qs=None, **defaults):
if warehouse_qs is None:
warehouse_qs = Warehouse.objects.all()
for warehouse in warehouse_qs:
Stock.objects.update_or_create(
warehouse=warehouse, product_variant=variant, defaults=defaults
)
def create_product_variants(variants_data):
for variant in variants_data:
pk = variant["pk"]
defaults = variant["fields"]
defaults["weight"] = get_weight(defaults["weight"])
product_id = defaults.pop("product")
# We have not created products without images
if product_id not in IMAGES_MAPPING:
continue
defaults["product_id"] = product_id
set_field_as_money(defaults, "price_override")
set_field_as_money(defaults, "cost_price")
quantity = defaults.pop("quantity")
variant, _ = ProductVariant.objects.update_or_create(pk=pk, defaults=defaults)
create_stocks(variant, quantity=quantity)
def assign_attributes_to_product_types(
association_model: Union[Type[AttributeProduct], Type[AttributeVariant]],
attributes: list,
):
for value in attributes:
pk = value["pk"]
defaults = value["fields"]
defaults["attribute_id"] = defaults.pop("attribute")
defaults["product_type_id"] = defaults.pop("product_type")
association_model.objects.update_or_create(pk=pk, defaults=defaults)
def assign_attributes_to_products(product_attributes):
for value in product_attributes:
pk = value["pk"]
defaults = value["fields"]
defaults["product_id"] = defaults.pop("product")
defaults["assignment_id"] = defaults.pop("assignment")
assigned_values = defaults.pop("values")
assoc, created = AssignedProductAttribute.objects.update_or_create(
pk=pk, defaults=defaults
)
if created:
assoc.values.set(AttributeValue.objects.filter(pk__in=assigned_values))
def assign_attributes_to_variants(variant_attributes):
for value in variant_attributes:
pk = value["pk"]
defaults = value["fields"]
defaults["variant_id"] = defaults.pop("variant")
defaults["assignment_id"] = defaults.pop("assignment")
assigned_values = defaults.pop("values")
assoc, created = AssignedVariantAttribute.objects.update_or_create(
pk=pk, defaults=defaults
)
if created:
assoc.values.set(AttributeValue.objects.filter(pk__in=assigned_values))
def set_field_as_money(defaults, field):
amount_field = f"{field}_amount"
if amount_field in defaults and defaults[amount_field] is not None:
defaults[field] = Money(defaults[amount_field], settings.DEFAULT_CURRENCY)
def create_products_by_schema(placeholder_dir, create_images):
path = os.path.join(
settings.PROJECT_ROOT, "saleor", "static", "populatedb_data.json"
)
with open(path) as f:
db_items = json.load(f)
types = defaultdict(list)
# Sort db objects by its model
for item in db_items:
model = item.pop("model")
types[model].append(item)
create_product_types(product_type_data=types["product.producttype"])
create_categories(
categories_data=types["product.category"], placeholder_dir=placeholder_dir
)
create_attributes(attributes_data=types["product.attribute"])
create_attributes_values(values_data=types["product.attributevalue"])
create_products(
products_data=types["product.product"],
placeholder_dir=placeholder_dir,
create_images=create_images,
)
create_product_variants(variants_data=types["product.productvariant"])
assign_attributes_to_product_types(
AttributeProduct, attributes=types["product.attributeproduct"]
)
assign_attributes_to_product_types(
AttributeVariant, attributes=types["product.attributevariant"]
)
assign_attributes_to_products(
product_attributes=types["product.assignedproductattribute"]
)
assign_attributes_to_variants(
variant_attributes=types["product.assignedvariantattribute"]
)
create_collections(
data=types["product.collection"], placeholder_dir=placeholder_dir
)
assign_products_to_collections(associations=types["product.collectionproduct"])
class SaleorProvider(BaseProvider):
def money(self):
return Money(fake.pydecimal(2, 2, positive=True), settings.DEFAULT_CURRENCY)
def weight(self):
return Weight(kg=fake.pydecimal(1, 2, positive=True))
fake.add_provider(SaleorProvider)
def get_email(first_name, last_name):
_first = unicodedata.normalize("NFD", first_name).encode("ascii", "ignore")
_last = unicodedata.normalize("NFD", last_name).encode("ascii", "ignore")
return "%s.%s@example.com" % (
_first.lower().decode("utf-8"),
_last.lower().decode("utf-8"),
)
def create_product_image(product, placeholder_dir, image_name):
image = get_image(placeholder_dir, image_name)
# We don't want to create duplicated product images
if product.images.count() >= len(IMAGES_MAPPING.get(product.pk, [])):
return None
product_image = ProductImage(product=product, image=image)
product_image.save()
create_product_thumbnails.delay(product_image.pk)
return product_image
def create_address(save=True):
address = Address(
first_name=fake.first_name(),
last_name=fake.last_name(),
street_address_1=fake.street_address(),
city=fake.city(),
country=settings.DEFAULT_COUNTRY,
)
if address.country == "US":
state = fake.state_abbr()
address.country_area = state
address.postal_code = fake.postalcode_in_state(state)
else:
address.postal_code = fake.postalcode()
if save:
address.save()
return address
def create_fake_user(save=True):
address = create_address(save=save)
email = get_email(address.first_name, address.last_name)
# Skip the email if it already exists
try:
return User.objects.get(email=email)
except User.DoesNotExist:
pass
user = User(
first_name=address.first_name,
last_name=address.last_name,
email=email,
password="password",
default_billing_address=address,
default_shipping_address=address,
is_active=True,
note=fake.paragraph(),
date_joined=fake.date_time(tzinfo=timezone.get_current_timezone()),
)
if save:
user.save()
user.addresses.add(address)
return user
# We don't want to spam the console with payment confirmations sent to
# fake customers.
@patch("saleor.order.emails.send_payment_confirmation.delay")
def create_fake_payment(mock_email_confirmation, order):
payment = create_payment(
gateway="mirumee.payments.dummy",
customer_ip_address=fake.ipv4(),
email=order.user_email,
order=order,
payment_token=str(uuid.uuid4()),
total=order.total.gross.amount,
currency=order.total.gross.currency,
)
# Create authorization transaction
gateway.authorize(payment, payment.token)
# 20% chance to void the transaction at this stage
if random.choice([0, 0, 0, 0, 1]):
gateway.void(payment)
return payment
# 25% to end the payment at the authorization stage
if not random.choice([1, 1, 1, 0]):
return payment
# Create capture transaction
gateway.capture(payment)
# 25% to refund the payment
if random.choice([0, 0, 0, 1]):
gateway.refund(payment)
return payment
def create_order_lines(order, discounts, how_many=10):
variants = (
ProductVariant.objects.filter()
.order_by("?")
.prefetch_related("product__product_type")[:how_many]
)
variants_iter = itertools.cycle(variants)
lines = []
for dummy in range(how_many):
variant = next(variants_iter)
product = variant.product
quantity = random.randrange(1, 5)
unit_price = variant.get_price(discounts)
unit_price = TaxedMoney(net=unit_price, gross=unit_price)
lines.append(
OrderLine(
order=order,
product_name=str(product),
variant_name=str(variant),
product_sku=variant.sku,
is_shipping_required=variant.is_shipping_required(),
quantity=quantity,
variant=variant,
unit_price=unit_price,
tax_rate=0,
)
)
lines = OrderLine.objects.bulk_create(lines)
manager = get_plugins_manager()
country = order.shipping_method.shipping_zone.countries[0]
warehouses = Warehouse.objects.filter(
shipping_zones__countries__contains=country
).order_by("?")
warehouse_iter = itertools.cycle(warehouses)
for line in lines:
unit_price = manager.calculate_order_line_unit(line)
line.unit_price = unit_price
line.tax_rate = unit_price.tax / unit_price.net
warehouse = next(warehouse_iter)
increase_stock(line, warehouse, line.quantity, allocate=True)
OrderLine.objects.bulk_update(
lines,
["unit_price_net_amount", "unit_price_gross_amount", "currency", "tax_rate"],
)
return lines
def create_fulfillments(order):
for line in order:
if random.choice([False, True]):
fulfillment, _ = Fulfillment.objects.get_or_create(order=order)
quantity = random.randrange(0, line.quantity) + 1
allocation = line.allocations.get()
fulfillment.lines.create(
order_line=line, quantity=quantity, stock=allocation.stock
)
line.quantity_fulfilled = quantity
line.save(update_fields=["quantity_fulfilled"])
allocation.quantity_allocated = F("quantity_allocated") - quantity
allocation.save(update_fields=["quantity_allocated"])
update_order_status(order)
def create_fake_order(discounts, max_order_lines=5):
customers = User.objects.filter(is_superuser=False).order_by("?")
customer = random.choice([None, customers.first()])
if customer:
address = customer.default_shipping_address
order_data = {
"user": customer,
"billing_address": customer.default_billing_address,
"shipping_address": address,
}
else:
address = create_address()
order_data = {
"billing_address": address,
"shipping_address": address,
"user_email": get_email(address.first_name, address.last_name),
}
manager = get_plugins_manager()
shipping_method = ShippingMethod.objects.order_by("?").first()
shipping_price = shipping_method.price
shipping_price = manager.apply_taxes_to_shipping(shipping_price, address)
order_data.update(
{
"shipping_method": shipping_method,
"shipping_method_name": shipping_method.name,
"shipping_price": shipping_price,
}
)
order = Order.objects.create(**order_data)
lines = create_order_lines(order, discounts, random.randrange(1, max_order_lines))
order.total = sum([line.get_total() for line in lines], shipping_price)
weight = Weight(kg=0)
for line in order:
weight += line.variant.get_weight()
order.weight = weight
order.save()
create_fake_payment(order=order)
create_fulfillments(order)
return order
def create_fake_sale():
sale = Sale.objects.create(
name="Happy %s day!" % fake.word(),
type=DiscountValueType.PERCENTAGE,
value=random.choice([10, 20, 30, 40, 50]),
)
for product in Product.objects.all().order_by("?")[:4]:
sale.products.add(product)
return sale
def create_users(how_many=10):
for dummy in range(how_many):
user = create_fake_user()
yield "User: %s" % (user.email,)
def create_permission_groups():
super_users = User.objects.filter(is_superuser=True)
if not super_users:
super_users = create_staff_users(1, True)
group = create_group("Full Access", get_permissions(), super_users)
yield f"Group: {group}"
staff_users = create_staff_users()
customer_support_codenames = [
perm.codename
for enum in [CheckoutPermissions, OrderPermissions, GiftcardPermissions]
for perm in enum
]
customer_support_codenames.append(AccountPermissions.MANAGE_USERS.codename)
customer_support_permissions = Permission.objects.filter(
codename__in=customer_support_codenames
)
group = create_group("Customer Support", customer_support_permissions, staff_users)
yield f"Group: {group}"
def create_group(name, permissions, users):
group, _ = Group.objects.get_or_create(name=name)
group.permissions.add(*permissions)
group.user_set.add(*users)
return group
def create_staff_users(how_many=2, superuser=False):
users = []
for _ in range(how_many):
address = create_address()
first_name = address.first_name
last_name = address.last_name
email = get_email(first_name, last_name)
staff_user = User.objects.create_user(
first_name=first_name,
last_name=last_name,
email=email,
password="password",
default_billing_address=address,
default_shipping_address=address,
is_staff=True,
is_active=True,
is_superuser=superuser,
)
users.append(staff_user)
return users
def create_orders(how_many=10):
discounts = fetch_discounts(timezone.now())
for _ in range(how_many):
order = create_fake_order(discounts)
yield "Order: %s" % (order,)
def create_product_sales(how_many=5):
for dummy in range(how_many):
sale = create_fake_sale()
update_products_minimal_variant_prices_of_discount_task.delay(sale.pk)
yield "Sale: %s" % (sale,)
def create_shipping_zone(shipping_methods_names, countries, shipping_zone_name):
shipping_zone = ShippingZone.objects.get_or_create(
name=shipping_zone_name, defaults={"countries": countries}
)[0]
ShippingMethod.objects.bulk_create(
[
ShippingMethod(
name=name,
price=fake.money(),
shipping_zone=shipping_zone,
type=(
ShippingMethodType.PRICE_BASED
if random.randint(0, 1)
else ShippingMethodType.WEIGHT_BASED
),
minimum_order_price=Money(0, settings.DEFAULT_CURRENCY),
maximum_order_price_amount=None,
minimum_order_weight=0,
maximum_order_weight=None,
)
for name in shipping_methods_names
]
)
return "Shipping Zone: %s" % shipping_zone
def create_shipping_zones():
european_countries = [
"AX",
"AL",
"AD",
"AT",
"BY",
"BE",
"BA",
"BG",
"HR",
"CZ",
"DK",
"EE",
"FO",
"FI",
"FR",
"DE",
"GI",
"GR",
"GG",
"VA",
"HU",
"IS",
"IE",
"IM",
"IT",
"JE",
"LV",
"LI",
"LT",
"LU",
"MK",
"MT",
"MD",
"MC",
"ME",
"NL",
"NO",
"PL",
"PT",
"RO",
"RU",
"SM",
"RS",
"SK",
"SI",
"ES",
"SJ",
"SE",
"CH",
"UA",
"GB",
]
yield create_shipping_zone(
shipping_zone_name="Europe",
countries=european_countries,
shipping_methods_names=["DHL", "UPS", "Registered priority", "DB Schenker"],
)
oceanian_countries = [
"AS",
"AU",
"CX",
"CC",
"CK",
"FJ",
"PF",
"GU",
"HM",
"KI",
"MH",
"FM",
"NR",
"NC",
"NZ",
"NU",
"NF",
"MP",
"PW",
"PG",
"PN",
"WS",
"SB",
"TK",
"TO",
"TV",
"UM",
"VU",
"WF",
]
yield create_shipping_zone(
shipping_zone_name="Oceania",
countries=oceanian_countries,
shipping_methods_names=["FBA", "FedEx Express", "Oceania Air Mail"],
)
asian_countries = [
"AF",
"AM",
"AZ",
"BH",
"BD",
"BT",
"BN",
"KH",
"CN",
"CY",
"GE",
"HK",
"IN",
"ID",
"IR",
"IQ",
"IL",
"JP",
"JO",
"KZ",
"KP",
"KR",
"KW",
"KG",
"LA",
"LB",
"MO",
"MY",
"MV",
"MN",
"MM",
"NP",
"OM",
"PK",
"PS",
"PH",
"QA",
"SA",
"SG",
"LK",
"SY",
"TW",
"TJ",
"TH",
"TL",
"TR",
"TM",
"AE",
"UZ",
"VN",
"YE",
]
yield create_shipping_zone(
shipping_zone_name="Asia",
countries=asian_countries,
shipping_methods_names=["China Post", "TNT", "Aramex", "EMS"],
)
american_countries = [
"AI",
"AG",
"AR",
"AW",
"BS",
"BB",
"BZ",
"BM",
"BO",
"BQ",
"BV",
"BR",
"CA",
"KY",
"CL",
"CO",
"CR",
"CU",
"CW",
"DM",
"DO",
"EC",
"SV",
"FK",
"GF",
"GL",
"GD",
"GP",
"GT",
"GY",
"HT",
"HN",
"JM",
"MQ",
"MX",
"MS",
"NI",
"PA",
"PY",
"PE",
"PR",
"BL",
"KN",
"LC",
"MF",
"PM",
"VC",
"SX",
"GS",
"SR",
"TT",
"TC",
"US",
"UY",
"VE",
"VG",
"VI",
]
yield create_shipping_zone(
shipping_zone_name="Americas",
countries=american_countries,
shipping_methods_names=["DHL", "UPS", "FedEx", "EMS"],
)
african_countries = [
"DZ",
"AO",
"BJ",
"BW",
"IO",
"BF",
"BI",
"CV",
"CM",
"CF",
"TD",
"KM",
"CG",
"CD",
"CI",
"DJ",
"EG",
"GQ",
"ER",
"SZ",
"ET",
"TF",
"GA",
"GM",
"GH",
"GN",
"GW",
"KE",
"LS",
"LR",
"LY",
"MG",
"MW",
"ML",
"MR",
"MU",
"YT",
"MA",
"MZ",
"NA",
"NE",
"NG",
"RE",
"RW",
"SH",
"ST",
"SN",
"SC",
"SL",
"SO",
"ZA",
"SS",
"SD",
"TZ",
"TG",
"TN",
"UG",
"EH",
"ZM",
"ZW",
]
yield create_shipping_zone(
shipping_zone_name="Africa",
countries=african_countries,
shipping_methods_names=[
"Royale International",
"ACE",
"fastway couriers",
"Post Office",
],
)
def create_warehouses():
for shipping_zone in ShippingZone.objects.all():
shipping_zone_name = shipping_zone.name
warehouse, _ = Warehouse.objects.update_or_create(
name=shipping_zone_name,
slug=slugify(shipping_zone_name),
defaults={"company_name": fake.company(), "address": create_address()},
)
warehouse.shipping_zones.add(shipping_zone)
def create_vouchers():
voucher, created = Voucher.objects.get_or_create(
code="FREESHIPPING",
defaults={
"type": VoucherType.SHIPPING,
"name": "Free shipping",
"discount_value_type": DiscountValueType.PERCENTAGE,
"discount_value": 100,
},
)
if created:
yield "Voucher #%d" % voucher.id
else:
yield "Shipping voucher already exists"
voucher, created = Voucher.objects.get_or_create(
code="DISCOUNT",
defaults={
"type": VoucherType.ENTIRE_ORDER,
"name": "Big order discount",
"discount_value_type": DiscountValueType.FIXED,
"discount_value": 25,
"min_spent": Money(200, settings.DEFAULT_CURRENCY),
},
)
if created:
yield "Voucher #%d" % voucher.id
else:
yield "Value voucher already exists"
voucher, created = Voucher.objects.get_or_create(
code="VCO9KV98LC",
defaults={
"type": VoucherType.ENTIRE_ORDER,
"discount_value_type": DiscountValueType.PERCENTAGE,
"discount_value": 5,
},
)
if created:
yield "Voucher #%d" % voucher.id
else:
yield "Value voucher already exists"
def create_gift_card():
user = random.choice(
[User.objects.filter(is_superuser=False).order_by("?").first()]
)
gift_card, created = GiftCard.objects.get_or_create(
code="Gift_card_10",
defaults={
"user": user,
"initial_balance": Money(10, settings.DEFAULT_CURRENCY),
"current_balance": Money(10, settings.DEFAULT_CURRENCY),
},
)
if created:
yield "Gift card #%d" % gift_card.id
else:
yield "Gift card already exists"
def set_homepage_collection():
homepage_collection = Collection.objects.order_by("?").first()
site = Site.objects.get_current()
site_settings = site.settings
site_settings.homepage_collection = homepage_collection
site_settings.save()
yield "Homepage collection assigned"
def add_address_to_admin(email):
address = create_address()
user = User.objects.get(email=email)
store_user_address(user, address, AddressType.BILLING)
store_user_address(user, address, AddressType.SHIPPING)
def create_page():
content = """
<h2>E-commerce for the PWA era</h2>
<h3>A modular, high performance e-commerce storefront built with GraphQL,
Django, and ReactJS.</h3>
<p>Saleor is a rapidly-growing open source e-commerce platform that has served
high-volume companies from branches like publishing and apparel since 2012.
Based on Python and Django, the latest major update introduces a modular
front end with a GraphQL API and storefront and dashboard written in React
to make Saleor a full-functionality open source e-commerce.</p>
<p><a href="https://github.com/mirumee/saleor">Get Saleor today!</a></p>
"""
content_json = {
"blocks": [
{
"key": "",
"data": {},
"text": "E-commerce for the PWA era",
"type": "header-two",
"depth": 0,
"entityRanges": [],
"inlineStyleRanges": [],
},
{
"key": "",
"data": {},
"text": "A modular, high performance e-commerce storefront "
"built with GraphQL, Django, and ReactJS.",
"type": "unstyled",
"depth": 0,
"entityRanges": [],
"inlineStyleRanges": [],
},
{
"key": "",
"data": {},
"text": "",
"type": "unstyled",
"depth": 0,
"entityRanges": [],
"inlineStyleRanges": [],
},
{
"key": "",
"data": {},
"text": "Saleor is a rapidly-growing open source e-commerce platform "
"that has served high-volume companies from branches like "
"publishing and apparel since 2012. Based on Python and "
"Django, the latest major update introduces a modular "
"front end with a GraphQL API and storefront and dashboard "
"written in React to make Saleor a full-functionality "
"open source e-commerce.",
"type": "unstyled",
"depth": 0,
"entityRanges": [],
"inlineStyleRanges": [],
},
{
"key": "",
"data": {},
"text": "",
"type": "unstyled",
"depth": 0,
"entityRanges": [],
"inlineStyleRanges": [],
},
{
"key": "",
"data": {},
"text": "Get Saleor today!",
"type": "unstyled",
"depth": 0,
"entityRanges": [{"key": 0, "length": 17, "offset": 0}],
"inlineStyleRanges": [],
},
],
"entityMap": {
"0": {
"data": {"url": "https://github.com/mirumee/saleor"},
"type": "LINK",
"mutability": "MUTABLE",
}
},
}
page_data = {
"content": content,
"content_json": content_json,
"title": "About",
"is_published": True,
}
page, dummy = Page.objects.get_or_create(slug="about", defaults=page_data)
yield "Page %s created" % page.slug
def generate_menu_items(menu: Menu, category: Category, parent_menu_item):
menu_item, created = menu.items.get_or_create(
name=category.name, category=category, parent=parent_menu_item
)
if created:
yield "Created menu item for category %s" % category
for child in category.get_children():
for msg in generate_menu_items(menu, child, menu_item):
yield "\t%s" % msg
def generate_menu_tree(menu):
categories = (
Category.tree.get_queryset()
.filter(
Q(parent__isnull=True) & Q(products__isnull=False)
| Q(children__products__isnull=False)
)
.distinct()
)
for category in categories:
for msg in generate_menu_items(menu, category, None):
yield msg
def create_menus():
# Create navbar menu with category links
top_menu, _ = Menu.objects.get_or_create(
name=settings.DEFAULT_MENUS["top_menu_name"]
)
top_menu.items.all().delete()
yield "Created navbar menu"
for msg in generate_menu_tree(top_menu):
yield msg
# Create footer menu with collections and pages
bottom_menu, _ = Menu.objects.get_or_create(
name=settings.DEFAULT_MENUS["bottom_menu_name"]
)
bottom_menu.items.all().delete()
collection = Collection.objects.filter(products__isnull=False).order_by("?")[0]
item, _ = bottom_menu.items.get_or_create(name="Collections", collection=collection)
for collection in Collection.objects.filter(
products__isnull=False, background_image__isnull=False
):
bottom_menu.items.get_or_create(
name=collection.name, collection=collection, parent=item
)
item_saleor = bottom_menu.items.get_or_create(name="Saleor", url="/")[0]
page = Page.objects.order_by("?")[0]
item_saleor.children.get_or_create(name=page.title, page=page, menu=bottom_menu)
api_url = build_absolute_uri(reverse("api"))
item_saleor.children.get_or_create(
name="GraphQL API", url=api_url, menu=bottom_menu
)
yield "Created footer menu"
site = Site.objects.get_current()
site_settings = site.settings
site_settings.top_menu = top_menu
site_settings.bottom_menu = bottom_menu
site_settings.save()
def get_product_list_images_dir(placeholder_dir):
product_list_images_dir = os.path.join(placeholder_dir, PRODUCTS_LIST_DIR)
return product_list_images_dir
def get_image(image_dir, image_name):
img_path = os.path.join(image_dir, image_name)
return File(open(img_path, "rb"), name=image_name)
| 29.95905 | 88 | 0.606288 |
4a212ee66405528c538c7b2fb36f584f01c8d30b
| 1,946 |
py
|
Python
|
personal_finance/config/personal_finance.py
|
fderyckel/personal_finance
|
dd4f8f19ad1b6accd608b4999ddf85e20e1c85ec
|
[
"MIT"
] | 4 |
2021-01-10T05:34:04.000Z
|
2021-11-15T19:35:54.000Z
|
personal_finance/config/personal_finance.py
|
adityaduggal/personal_finance
|
dd4f8f19ad1b6accd608b4999ddf85e20e1c85ec
|
[
"MIT"
] | null | null | null |
personal_finance/config/personal_finance.py
|
adityaduggal/personal_finance
|
dd4f8f19ad1b6accd608b4999ddf85e20e1c85ec
|
[
"MIT"
] | 7 |
2019-07-30T15:10:28.000Z
|
2021-11-15T19:35:56.000Z
|
from frappe import _
def get_data():
return [
{
"label": _("Transaction Masters"),
"items": [
{
"type": "doctype",
"name": "Income Expense Entry",
},
{
"type": "doctype",
"name": "Investment Transaction",
}
]
},
{
"label": _("Accounting Masters"),
"items": [
{
"type": "doctype",
"name": "Portfolio",
}
]
},
{
"label": _("Items and Pricing"),
"items": [
{
"type": "doctype",
"name": "Search and Create Stocks",
},
{
"type": "doctype",
"name": "Price Tracker",
},
{
"type": "doctype",
"name": "Get Stock Price",
}
]
},
{
"label": _("Settings"),
"items": [
{
"type": "doctype",
"name": "Personal Finance Settings",
}
]
},
{
"label": _("Key Reports"),
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Portfolio Status",
},
{
"type": "report",
"is_query_report": True,
"name": "Portfolio Transactions",
},
{
"type": "report",
"is_query_report": True,
"name": "Price Analysis",
}
]
}
]
| 26.297297 | 56 | 0.270812 |
4a212f92c3c488c4cd383afb0a4a8007675ab7cf
| 5,788 |
py
|
Python
|
src/fedservice/rp/__init__.py
|
rohe/fedservice
|
1460d21217b804cac0f38fa26ffa24bee7cf6dad
|
[
"Apache-2.0"
] | 3 |
2018-11-28T12:01:31.000Z
|
2020-12-16T21:43:29.000Z
|
src/fedservice/rp/__init__.py
|
rohe/fedservice
|
1460d21217b804cac0f38fa26ffa24bee7cf6dad
|
[
"Apache-2.0"
] | 13 |
2020-02-10T15:33:37.000Z
|
2022-02-01T16:43:36.000Z
|
src/fedservice/rp/__init__.py
|
rohe/fedservice
|
1460d21217b804cac0f38fa26ffa24bee7cf6dad
|
[
"Apache-2.0"
] | 4 |
2019-05-29T10:04:48.000Z
|
2020-10-14T09:52:53.000Z
|
import logging
from typing import Optional
from cryptojwt.key_jar import init_key_jar
import oidcrp
from oidcrp import rp_handler
from oidcrp.oauth2 import Client
from oidcrp.util import lower_or_upper
from fedservice import create_federation_entity
logger = logging.getLogger(__name__)
class RPHandler(rp_handler.RPHandler):
def __init__(self, base_url='', hash_seed="", keyjar=None, verify_ssl=True,
services=None, service_factory=None, client_configs=None,
client_authn_factory=None, client_cls=None,
state_db=None, federation_entity_config=None, httpc_params=None, **kwargs):
rp_handler.RPHandler.__init__(self, base_url=base_url, hash_seed=hash_seed, keyjar=keyjar,
verify_ssl=verify_ssl, services=services,
service_factory=service_factory,
client_configs=client_configs,
client_authn_factory=client_authn_factory,
client_cls=client_cls,
state_db=state_db, httpc_params=httpc_params, **kwargs)
self.federation_entity_config = federation_entity_config
def init_client(self, issuer):
client = rp_handler.RPHandler.init_client(self, issuer)
client.client_get("service_context").federation_entity = self.init_federation_entity(issuer)
return client
def init_federation_entity(self, issuer):
args = {k: v for k, v in self.federation_entity_config.items()}
_entity_id = ''
_cnf = self.client_configs.get(issuer)
if _cnf:
_entity_id = _cnf.get('entity_id')
if not _entity_id:
_entity_id = self.federation_entity_config['entity_id']
if '{}' in _entity_id:
_entity_id = _entity_id.format(issuer)
args['entity_id'] = _entity_id
logger.debug('Entity ID: %s', _entity_id)
_federation_entity = create_federation_entity(httpc_params=self.httpc_params,
issuer=issuer, **args)
_federation_entity.keyjar.httpc_params = self.httpc_params
_federation_entity.collector.web_cert_path = self.federation_entity_config.get(
'web_cert_path')
return _federation_entity
def client_setup(self,
iss_id: Optional[str] = '',
user: Optional[str] = '',
behaviour_args: Optional[dict] = None) -> Client:
"""
First if no issuer ID is given then the identifier for the user is
used by the webfinger service to try to find the issuer ID.
Once the method has an issuer ID if no client is bound to this issuer
one is created and initiated with
the necessary information for the client to be able to communicate
with the OP/AS that has the provided issuer ID.
:param iss_id: The issuer ID
:param user: A user identifier
:return: A :py:class:`oidcrp.oidc.Client` instance
"""
logger.info('client_setup: iss_id={}, user={}'.format(iss_id, user))
if not iss_id:
if not user:
raise ValueError('Need issuer or user')
logger.debug("Connecting to previously unknown OP")
temporary_client = self.init_client('')
temporary_client.do_request('webfinger', resource=user)
else:
temporary_client = None
try:
client = self.issuer2rp[iss_id]
except KeyError:
if temporary_client:
client = temporary_client
else:
logger.debug("Creating new client: %s", iss_id)
client = self.init_client(iss_id)
else:
return client
logger.debug("Get provider info")
issuer = self.do_provider_info(client, behaviour_args=behaviour_args)
_sc = client.client_get("service_context")
try:
_fe = _sc.federation_entity
except AttributeError:
_fe = None
registration_type = 'explicit'
else:
registration_type = _fe.registration_type
if registration_type == 'automatic':
_redirect_uris = _sc.config.get("redirect_uris")
if _redirect_uris:
_sc.set('redirect_uris', _redirect_uris)
_sc.set('client_id', _fe.entity_id)
# client.client_id = _fe.entity_id
self.hash2issuer[iss_id] = issuer
else:
_callbacks = self.add_callbacks(_sc)
_sc.set('client_id', oidcrp.util.add_path(_fe.entity_id, _callbacks['__hex']))
else: # explicit
logger.debug("Do client registration")
self.do_client_registration(client, iss_id, behaviour_args=behaviour_args)
self.issuer2rp[issuer] = client
return client
def init_oidc_rp_handler(config, dir_path):
rp_keys_conf = config.key_conf
_fed_conf = config.federation
_httpc_params = config.httpc_params
_path = rp_keys_conf['uri_path']
if _path.startswith('./'):
_path = _path[2:]
elif _path.startswith('/'):
_path = _path[1:]
args = {k: v for k, v in rp_keys_conf.items() if k != "uri_path"}
rp_keyjar = init_key_jar(**args)
rp_keyjar.httpc_params = _httpc_params
rph = RPHandler(base_url=config.base_url, hash_seed=config.hash_seed,
jwks_path=_path, client_configs=config.clients, keyjar=rp_keyjar,
services=config.services, httpc_params=_httpc_params,
federation_entity_config=_fed_conf)
return rph
| 38.845638 | 100 | 0.617484 |
4a2131471d7308e533a6f0a2797fa99568967904
| 1,738 |
py
|
Python
|
app/user/serializers.py
|
Busterflint/recipe-app-ap1
|
6d073c244ef5abde49de313019974293c5c5ee38
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
Busterflint/recipe-app-ap1
|
6d073c244ef5abde49de313019974293c5c5ee38
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
Busterflint/recipe-app-ap1
|
6d073c244ef5abde49de313019974293c5c5ee38
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object."""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5 }}
def create(self, validated_data):
"""Create a new user with encrypted password and return it."""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and returning it."""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object."""
email = serializers.CharField()
password = serializers.CharField(
style = {'input_type': 'password'},
trim_whitespace = False
)
def validate(self, attrs):
"""Validate and authenticate the user."""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credential.')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 33.423077 | 77 | 0.643843 |
4a2131819f013f2edfb099e59fa60316b8d7fa51
| 20,254 |
py
|
Python
|
highcharts/highstock/options.py
|
Jbrunn/python-highcharts
|
a4c488ae5c2e125616efad5a722f3dfd8a9bc450
|
[
"MIT"
] | 370 |
2015-10-07T20:13:10.000Z
|
2022-03-31T03:43:17.000Z
|
highcharts/highstock/options.py
|
Jbrunn/python-highcharts
|
a4c488ae5c2e125616efad5a722f3dfd8a9bc450
|
[
"MIT"
] | 67 |
2016-03-14T12:18:44.000Z
|
2022-02-24T09:24:31.000Z
|
highcharts/highstock/options.py
|
Jbrunn/python-highcharts
|
a4c488ae5c2e125616efad5a722f3dfd8a9bc450
|
[
"MIT"
] | 159 |
2016-02-25T15:07:52.000Z
|
2022-03-12T13:04:14.000Z
|
# -*- coding: UTF-8 -*-
from past.builtins import basestring
from .highstock_types import OptionTypeError, Series, SeriesOptions
from .common import Formatter, Events, Position, ContextButton, Options3d, ResetZoomButton, \
Labels, PlotBands, PlotLines, Title, Items, Navigation, Handles, Background, Breaks, \
DateTimeLabelFormats, Zones, Levels, Buttons, \
JSfunction, ColorObject, CSSObject, SVGObject, CommonObject, ArrayObject
import json, datetime
# Base Option Class
class BaseOptions(object):
def __init__(self,**kwargs):
self.update_dict(**kwargs)
def __display_options__(self):
print(json.dumps(self.__dict__, indent=4, sort_keys=True))
def __jsonable__(self):
return self.__dict__
def __validate_options__(self, k, v, ov):
if ov == NotImplemented:
raise OptionTypeError("Option Type Currently Not Supported: %s" % k)
if isinstance(v,dict) and isinstance(ov,dict):
keys = v.keys()
if len(keys) > 1:
raise NotImplementedError
return isinstance(v[keys[0]],ov[keys[0]])
return isinstance(v, ov)
def update_dict(self, **kwargs):
for k, v in kwargs.items():
if k in self.ALLOWED_OPTIONS:
#if isinstance(self.ALLOWED_OPTIONS[k], tuple) and isinstance(self.ALLOWED_OPTIONS[k][0](), SeriesOptions):
if k in PlotOptions.ALLOWED_OPTIONS.keys():
if self.__getattr__(k):
self.__dict__[k].update(series_type=k, **v)
else:
v = SeriesOptions(series_type=k, **v)
self.__dict__.update({k:v})
elif isinstance(self.ALLOWED_OPTIONS[k], tuple) and isinstance(self.ALLOWED_OPTIONS[k][0](), CommonObject):
if isinstance(v, dict):
if self.__getattr__(k):
self.__dict__[k].update(v) #update dict
else: # first
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v)})
else:
OptionTypeError("Not An Accepted Input Type: %s, must be dictionary" % type(v))
elif isinstance(self.ALLOWED_OPTIONS[k], tuple) and isinstance(self.ALLOWED_OPTIONS[k][0](), ArrayObject):
if self.__getattr__(k): #existing attr
if isinstance(v, dict):
self.__dict__[k].update(v) # update array
elif isinstance(v, list):
for item in v:
self.__dict__[k].update(item) # update array
else:
OptionTypeError("Not An Accepted Input Type: %s, must be list or dictionary"
% type(v))
else: #first
if isinstance(v, dict):
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v)})
elif isinstance(v, list):
if len(v) == 1:
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v[0])})
else:
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v[0])})
for item in v[1:]:
self.__dict__[k].update(item)
else:
OptionTypeError("Not An Accepted Input Type: %s, must be list or dictionary"
% type(v))
elif isinstance(self.ALLOWED_OPTIONS[k], tuple) and \
(isinstance(self.ALLOWED_OPTIONS[k][0](), CSSObject) or isinstance(self.ALLOWED_OPTIONS[k][0](), SVGObject)):
if self.__getattr__(k):
for key, value in v.items(): # check if v has object input
self.__dict__[k].__options__().update({key:value})
v = self.__dict__[k].__options__()
# upating object
if isinstance(v, dict):
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v)})
else:
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](v)})
elif isinstance(self.ALLOWED_OPTIONS[k], tuple) and (isinstance(self.ALLOWED_OPTIONS[k][0](), JSfunction) or \
isinstance(self.ALLOWED_OPTIONS[k][0](), Formatter) or isinstance(self.ALLOWED_OPTIONS[k][0](), ColorObject)):
if isinstance(v, dict):
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](**v)})
else:
self.__dict__.update({k:self.ALLOWED_OPTIONS[k][0](v)})
else:
self.__dict__.update({k:v})
else:
print(self.ALLOWED_OPTIONS)
print(k, v)
raise OptionTypeError("Not An Accepted Option Type: %s" % k)
def __getattr__(self, item):
if not item in self.__dict__:
return None # Attribute Not Set
else:
return True
class ChartOptions(BaseOptions):
ALLOWED_OPTIONS = {
"alignTicks": bool,
"animation": [bool, dict, basestring],
"backgroundColor": (ColorObject, basestring, dict),
"borderColor": (ColorObject, basestring, dict),
"borderRadius": int,
"borderWidth": int,
"className": basestring,
"defaultSeriesType": basestring,
"events": (Events, dict),
"height": [int,basestring],
"ignoreHiddenSeries": bool,
"inverted": bool,
"margin": list,
"marginBottom": int,
"marginLeft": int,
"marginRight": int,
"marginTop": int,
"options3d": (Options3d, dict),
"plotBackgroundColor": (ColorObject, basestring, dict),
"plotBackgroundImage": basestring,
"plotBorderColor": (ColorObject, basestring, dict),
"plotBorderWidth": int,
"plotShadow": bool,
"polar": bool,
"reflow": bool,
"renderTo": basestring,
"resetZoomButton": (ResetZoomButton, dict),
"selectionMarkerFill": basestring,
"shadow": bool,
"showAxes": bool,
"spacingBottom": int,
"spacingLeft": int,
"spacingRight": int,
"spacingTop": int,
"style": (CSSObject, dict),
"type": basestring,
"width": [int,basestring],
"zoomType": basestring,
}
class ColorsOptions(BaseOptions):
""" Special Case, this is simply just an array of colours """
def __init__(self):
self.colors = {}
def set_colors(self, colors):
if isinstance(colors, basestring):
self.colors = ColorObject(colors)
elif isinstance(colors, list) or isinstance(colors, dict):
self.colors = colors
else:
OptionTypeError("Not An Accepted Input Type: %s" % type(colors))
def __jsonable__(self):
return self.colors
class CreditsOptions(BaseOptions):
ALLOWED_OPTIONS = {
"enabled": bool,
"href": basestring,
"position": (Position, dict),
"style": (CSSObject, dict),
"text": basestring,
}
class ExportingOptions(BaseOptions):
ALLOWED_OPTIONS = {
"buttons": (ContextButton, dict),
"chartOptions": (ChartOptions, dict),
"enabled": bool,
"filename": basestring,
"formAttributes": NotImplemented,
"scale": int,
"sourceHeight": int,
"sourceWidth": int,
"type": basestring,
"url": basestring,
"width": int,
}
class GlobalOptions(BaseOptions):
ALLOWED_OPTIONS = {
"Date": NotImplemented,
"VMLRadialGradientURL": basestring,
"canvasToolsURL": basestring,
"getTimezoneOffset": (JSfunction, basestring),
"timezoneOffset": int,
"useUTC": bool,
}
class LabelsOptions(BaseOptions):
ALLOWED_OPTIONS = {
"items": (Items, dict),
"style": (CSSObject, dict),
}
class LangOptions(BaseOptions):
ALLOWED_OPTIONS = {
"decimalPoint": basestring,
"downloadJPEG": basestring,
"downloadPDF": basestring,
"downloadPNG": basestring,
"donwloadSVG": basestring,
"exportButtonTitle": basestring,
"loading": basestring,
"months": list,
"noData": basestring,
"numericSymbols": list,
"printButtonTitle": basestring,
"resetZoom": basestring,
"resetZoomTitle": basestring,
"shortMonths": list,
"thousandsSep": basestring,
"weekdays": list,
}
class LegendOptions(BaseOptions):
ALLOWED_OPTIONS = {
"align": basestring,
"backgroundColor": (ColorObject, basestring, dict),
"borderColor": (ColorObject, basestring, dict),
"borderRadius": int,
"borderWidth": int,
"enabled": bool,
"floating": bool,
"itemDistance": int,
"itemHiddenStyle": (CSSObject, dict),
"itemHoverStyle": (CSSObject, dict),
"itemMarginBottom": int,
"itemMarginTop": int,
"itemStyle": (CSSObject, dict),
"itemWidth": int,
"labelFormat": basestring,
"labelFormatter": (Formatter, JSfunction),
"layout": basestring,
"lineHeight": int,
"margin": int,
"maxHeight": int,
"navigation": (Navigation, dict),
"padding": int,
"reversed": bool,
"rtl": bool,
"shadow": bool,
"style": (CSSObject, dict),
"symbolHeight": int,
"symbolPadding": int,
"symbolRadius": int,
"symbolWidth": int,
"title": (Title, dict),
"useHTML": bool,
"verticalAlign": basestring,
"width": int,
"x": int,
"y": int,
}
class LoadingOptions(BaseOptions):
ALLOWED_OPTIONS = {
"hideDuration": int,
"labelStyle": (CSSObject, dict),
"showDuration": int,
"style": (CSSObject, dict),
}
class NavigationOptions(BaseOptions):
ALLOWED_OPTIONS = {
"buttonOptions": (ContextButton, dict),
"menuItemHoverStyle": (CSSObject, dict),
"menuItemStyle": (CSSObject, dict),
"menuStyle": (CSSObject, dict),
}
class PlotOptions(BaseOptions):
""" Another Special Case: Interface With all the different Highchart Plot Types Here """
ALLOWED_OPTIONS = {
"area": (SeriesOptions, dict),
"arearange": (SeriesOptions, dict),
"areaspline": (SeriesOptions, dict),
"areasplinerange": (SeriesOptions, dict),
"candlestick": (SeriesOptions, dict),
"column": (SeriesOptions, dict),
"columnrange": (SeriesOptions, dict),
"flags": (SeriesOptions, dict),
"line": (SeriesOptions, dict),
"ohlc": (SeriesOptions, dict),
"polygon": (SeriesOptions, dict),
"scatter": (SeriesOptions, dict),
"series": (SeriesOptions, dict),
"spline": (SeriesOptions, dict),
}
class RangeSelectorOptions(BaseOptions):
ALLOWED_OPTIONS = {
"allButtonsEnabled": bool,
"buttonSpacing": [int, float],
"buttonTheme": (SVGObject, dict),
"buttons": (Buttons, list),
"enabled": bool,
"inputBoxBorderColor": (ColorObject, basestring, dict),
"inputBoxHeight": [int, float],
"inputBoxWidth": [int, float],
"inputDateFormat": basestring,
"inputDateParser": (JSfunction, basestring),
"inputEditDateFormat": basestring,
"inputEnabled": bool,
"inputPosition": (Position, dict),
"inputStyle": (CSSObject, dict),
"labelStyle": (CSSObject, dict),
"selected": [int, float],
}
class ScrollbarOptions(BaseOptions):
ALLOWED_OPTIONS = {
"barBackgroundColor": (ColorObject, basestring, dict),
"barBorderColor": (ColorObject, basestring, dict),
"barBorderRadius": [int, float],
"barBorderWidth": [int, float],
"buttonArrowColor": (ColorObject, basestring, dict),
"buttonBackgroundColor": (ColorObject, basestring, dict),
"buttonBorderColor": (ColorObject, basestring, dict),
"buttonBorderRadius": [int, float],
"buttonBorderWidth": [int, float],
"enabled": bool,
"height": [int, float],
"liveRedraw": bool,
"minWidth": [int, float],
"rifleColor": (ColorObject, basestring, dict),
"trackBackgroundColor": (ColorObject, basestring, dict),
"trackBorderColor": (ColorObject, basestring, dict),
"trackBorderRadius": [int, float],
"trackBorderWidth": [int, float],
}
class SeriesData(BaseOptions):
""" Another Special Case: Stores Data Series in an array for returning to the chart object """
def __init__(self):
#self.__dict__.update([])
self = []
class SubtitleOptions(BaseOptions):
ALLOWED_OPTIONS = {
"align": basestring,
"floating": bool,
"style": (CSSObject, dict),
"text": basestring,
"useHTML": bool,
"verticalAlign": basestring,
"x": int,
"y": int,
}
class TitleOptions(BaseOptions):
ALLOWED_OPTIONS = {
"align": basestring,
"floating": bool,
"margin": int,
"style": (CSSObject, dict),
"text": basestring,
"useHTML": bool,
"verticalAlign": basestring,
"x": int,
"y": int,
}
class TooltipOptions(BaseOptions):
ALLOWED_OPTIONS = {
"animation": bool,
"backgroundColor": (ColorObject, basestring, dict),
"borderColor": (ColorObject, basestring, dict),
"borderRadius": int,
"borderWidth": int,
"crosshairs": [bool, list, dict],
"dateTimeLabelFormats": (DateTimeLabelFormats, dict),
"enabled": bool,
"followPointer": bool,
"followTouchMove": bool,
"footerFormat": basestring,
"formatter": (Formatter, JSfunction),
"headerFormat": basestring,
"pointFormat": basestring,
"positioner": (JSfunction, basestring),
"shadow": bool,
"shared": bool,
"snap": int,
"style": (CSSObject, dict),
"useHTML": bool,
"valueDecimals": int,
"valuePrefix": basestring,
"valueSuffix": basestring,
"xDateFormat": basestring,
}
class xAxisOptions(BaseOptions):
ALLOWED_OPTIONS = {
"allowDecimals": bool,
"alternateGridColor": (ColorObject, basestring, dict),
"breaks":(Breaks, list),
"categories": list,
'crosshair': bool,
"dateTimeLabelFormats": (DateTimeLabelFormats, dict),
"endOnTick": bool,
"events": (Events, dict),
"gridLineColor": (ColorObject, basestring, dict),
"gridLineDashStyle": basestring,
"gridLineWidth": int,
"id": basestring,
"labels": (Labels, dict),
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": int,
"max": [float, int],
"maxPadding": [float, int],
"maxZoom": NotImplemented,
"min": [float, int],
"minPadding": [float, int],
"minRange": int,
"minTickInterval": int,
"minorGridLineColor": (ColorObject, basestring, dict),
"minorGridLineDashStyle": basestring,
"minorGridLineWidth": int,
"minorTickColor": (ColorObject, basestring, dict),
"minorTickInterval": int,
"minorTickLength": int,
"minorTickPosition": basestring,
"minorTickWidth": int,
"offset": bool,
"opposite": bool,
"ordinal": bool,
"plotBands": (PlotBands, list),
"plotLines": (PlotLines, list),
"reversed": bool,
"showEmpty": bool,
"showFirstLabel": bool,
"showLastLabel": bool,
"startOfWeek": int,
"startOnTick": bool,
"tickColor": (ColorObject, basestring, dict),
"tickInterval": int,
"tickLength": int,
"tickPixelInterval": int,
"tickPosition": basestring,
"tickPositioner": (JSfunction, basestring),
"tickPositions": list,
"tickWidth": int,
"tickmarkPlacement": basestring,
"title": (Title, dict),
"type": basestring,
"units": list
}
class yAxisOptions(BaseOptions):
ALLOWED_OPTIONS = {
"allowDecimals": bool,
"alternateGridColor": (ColorObject, basestring, dict),
"breaks": (Breaks, list),
"categories": list,
"ceiling": (int, float),
"dateTimeLabelFormats": (DateTimeLabelFormats, dict),
"endOnTick": bool,
"events": (Events, dict),
"floor": (int, float),
"gridLineColor": (ColorObject, basestring, dict),
"gridLineDashStyle": basestring,
"gridLineInterpolation": basestring,
"gridLineWidth": int,
"gridZIndex": int,
"height": [int, float, basestring],
"id": basestring,
"labels": (Labels, dict),
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": int,
"max": [float, int],
"maxColor": (ColorObject, basestring, dict),
"maxPadding": [float, int],
"maxZoom": NotImplemented,
"min": [float, int],
"minColor": (ColorObject, basestring, dict),
"minPadding": [float, int],
"minRange": int,
"minTickInterval": int,
"minorGridLineColor": (ColorObject, basestring, dict),
"minorGridLineDashStyle": basestring,
"minorGridLineWidth": int,
"minorTickColor": (ColorObject, basestring, dict),
"minorTickInterval": int,
"minorTickLength": int,
"minorTickPosition": basestring,
"minorTickWidth": int,
"offset": bool,
"opposite": bool,
"ordinal": bool,
"plotBands": (PlotBands, list),
"plotLines": (PlotLines, list),
"reversed": bool,
"reversedStacks": bool,
"showEmpty": bool,
"showFirstLabel": bool,
"showLastLabel": bool,
"stackLabels": (Labels, dict),
"startOfWeek": int,
"startOnTick": bool,
"stops": list,
"tickAmount": int,
"tickColor": (ColorObject, basestring, dict),
"tickInterval": int,
"tickLength": int,
"tickPixelInterval": int,
"tickPosition": basestring,
"tickPositioner": (JSfunction, basestring),
"tickPositions": list,
"tickWidth": int,
"tickmarkPlacement": basestring,
"title": (Title, dict),
"top": [int, float, basestring],
# DEM 2017/11/16: Note that the 'type' keyword for highstock is
# undocumented yet appears to be supported, likely because of underlying
# shared code. This permits logarithmic Y-Axis scale which is
# frequently useful in stock charts.
"type": basestring,
"units": list
}
class NavigatorOptions(BaseOptions):
ALLOWED_OPTIONS = {
"adaptToUpdatedData": bool,
"baseSeries": [int, basestring],
"enabled": bool,
"handles": (Handles, dict), # need handles object
"height": [int, float],
"margin": [int, float],
"maskFill": (ColorObject, dict),
"maskInside": bool,
"outlineColor": (ColorObject, dict),
"outlineWidth": [int, float],
"series": dict,
"xAxis": (xAxisOptions, dict),
"yAxis": (yAxisOptions, dict),
}
class MultiAxis(object):
def __init__(self, axis):
AXIS_LIST = {
"xAxis": xAxisOptions,
"yAxis": yAxisOptions
}
self.axis = []
self.AxisObj = AXIS_LIST[axis]
def update(self, **kwargs):
self.axis.append(self.AxisObj(**kwargs))
def __jsonable__(self):
return self.axis
| 34.445578 | 130 | 0.555841 |
4a2131b65e9de878ce757a14f7183edcbd491e79
| 499 |
py
|
Python
|
setup.py
|
amran-quantum/test
|
beb1f8e4d0ebe9862e199a70e06da42a9141b054
|
[
"MIT"
] | null | null | null |
setup.py
|
amran-quantum/test
|
beb1f8e4d0ebe9862e199a70e06da42a9141b054
|
[
"MIT"
] | null | null | null |
setup.py
|
amran-quantum/test
|
beb1f8e4d0ebe9862e199a70e06da42a9141b054
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
# get version from __version__ variable in test_app/__init__.py
from test_app import __version__ as version
setup(
name="test_app",
version=version,
description="test app",
author="amran@quantumuniversal.com",
author_email="amran@quantumuniversal.com",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 24.95 | 63 | 0.785571 |
4a2132893071b90b44af29d67a8f02142a4c3b2b
| 5,891 |
py
|
Python
|
test/completion/arrays.py
|
perrinjerome/jedi
|
00b220516de9a7d4e1ea313e0474840ca2e8d8c1
|
[
"MIT"
] | 1 |
2020-12-18T21:45:22.000Z
|
2020-12-18T21:45:22.000Z
|
test/completion/arrays.py
|
perrinjerome/jedi
|
00b220516de9a7d4e1ea313e0474840ca2e8d8c1
|
[
"MIT"
] | null | null | null |
test/completion/arrays.py
|
perrinjerome/jedi
|
00b220516de9a7d4e1ea313e0474840ca2e8d8c1
|
[
"MIT"
] | null | null | null |
# -----------------
# basic array lookups
# -----------------
#? int()
[1,""][0]
#? str()
[1,""][1]
#? int() str()
[1,""][2]
#? int() str()
[1,""][20]
#? int() str()
[1,""][str(hello)]
a = list()
#? list()
[a][0]
#? list()
[[a,a,a]][2][100]
c = [[a,""]]
#? str()
c[0][1]
b = [6,7]
#? int()
b[8-7]
# Something unreasonable:
#? int()
b['']
# -----------------
# Slices
# -----------------
#? list()
b[8:]
#? list()
b[int():]
#? list()
b[:]
#? int()
b[:, 1]
#? int()
b[:1, 1]
#? int()
b[1:1, 1]
#? int()
b[1:1:, ...]
#? int()
b[1:1:5, ...]
class _StrangeSlice():
def __getitem__(self, sliced):
return sliced
# Should not result in an error, just because the slice itself is returned.
#? slice()
_StrangeSlice()[1:2]
# -----------------
# iterable multiplication
# -----------------
a = ['']*2
#? list()
a
# -----------------
# tuple assignments
# -----------------
a1, b1 = (1, "")
#? int()
a1
#? str()
b1
(a2, b2) = (1, "")
#? int()
a2
#? str()
b2
# list assignment
[list1, list2] = (1, "")
#? int()
list1
#? str()
list2
[list3, list4] = [1, ""]
#? int()
list3
#? str()
list4
# -----------------
# subtuple assignment
# -----------------
(a3, (b3, c3)) = (1, ("", list))
#? list
c3
a4, (b4, c4) = (1, ("", list))
#? list
c4
#? int()
a4
#? str()
b4
# -----------------
# multiple assignments
# -----------------
a = b = 1
#? int()
a
#? int()
b
(a, b) = (c, (e, f)) = ('2', (3, 4))
#? str()
a
#? tuple()
b
#? str()
c
#? int()
e
#? int()
f
# -----------------
# unnessecary braces
# -----------------
a = (1)
#? int()
a
#? int()
(1)
#? int()
((1))
#? int()
((1)+1)
u, v = 1, ""
#? int()
u
((u1, v1)) = 1, ""
#? int()
u1
#? int()
(u1)
(a), b = 1, ''
#? int()
a
def a(): return ''
#? str()
(a)()
#? str()
(a)().title()
#? int()
(tuple).index()
#? int()
(tuple)().index()
class C():
def __init__(self):
self.a = (str()).upper()
#? str()
C().a
# -----------------
# imbalanced sides
# -----------------
(f, g) = (1,)
#? int()
f
#? []
g.
(f, g, h) = (1,'')
#? int()
f
#? str()
g
#? []
h.
(f1, g1) = 1
#? []
f1.
#? []
g1.
(f, g) = (1,'',1.0)
#? int()
f
#? str()
g
# -----------------
# dicts
# -----------------
dic2 = {'asdf': 3, 'b': 'str'}
#? int()
dic2['asdf']
#? None int() str()
dic2.get('asdf')
# string literal
#? int()
dic2[r'asdf']
#? int()
dic2[r'asdf']
#? int()
dic2[r'as' 'd' u'f']
#? int() str()
dic2['just_something']
# unpacking
a, b = dic2
#? str()
a
a, b = {1: 'x', 2.0: 1j}
#? int() float()
a
#? int() float()
b
def f():
""" github #83 """
r = {}
r['status'] = (200, 'ok')
return r
#? dict()
f()
# completion within dicts
#? 9 ['str']
{str: str}
# iteration problem (detected with sith)
d = dict({'a':''})
def y(a):
return a
#?
y(**d)
#? str()
d['a']
# problem with more complicated casts
dic = {str(key): ''}
#? str()
dic['']
# Just skip Python 2 tests from here. EoL soon, I'm too lazy for it.
# python > 2.7
for x in {1: 3.0, '': 1j}:
#? int() str()
x
#? ['__iter__']
dict().values().__iter__
d = dict(a=3, b='')
x, = d.values()
#? int() str()
x
#? int()
d['a']
#? int() str() None
d.get('a')
some_dct = dict({'a': 1, 'b': ''}, a=1.0)
#? float()
some_dct['a']
#? str()
some_dct['b']
#? int() float() str()
some_dct['c']
# -----------------
# with variable as index
# -----------------
a = (1, "")
index = 1
#? str()
a[index]
# these should just ouput the whole array
index = int
#? int() str()
a[index]
index = int()
#? int() str()
a[index]
# dicts
index = 'asdf'
dic2 = {'asdf': 3, 'b': 'str'}
#? int()
dic2[index]
# -----------------
# __getitem__
# -----------------
class GetItem():
def __getitem__(self, index):
return 1.0
#? float()
GetItem()[0]
class GetItem():
def __init__(self, el):
self.el = el
def __getitem__(self, index):
return self.el
#? str()
GetItem("")[1]
class GetItemWithList():
def __getitem__(self, index):
return [1, 1.0, 's'][index]
#? float()
GetItemWithList()[1]
for i in 0, 2:
#? int() str()
GetItemWithList()[i]
# With super
class SuperYeah(list):
def __getitem__(self, index):
return super()[index]
#?
SuperYeah([1])[0]
#?
SuperYeah()[0]
# -----------------
# conversions
# -----------------
a = [1, ""]
#? int() str()
list(a)[1]
#? int() str()
list(a)[0]
#?
set(a)[0]
#? int() str()
list(set(a))[1]
#? int() str()
next(iter(set(a)))
#? int() str()
list(list(set(a)))[1]
# does not yet work, because the recursion catching is not good enough (catches # to much)
#? int() str()
list(set(list(set(a))))[1]
#? int() str()
list(set(set(a)))[1]
# frozenset
#? int() str()
list(frozenset(a))[1]
#? int() str()
list(set(frozenset(a)))[1]
# iter
#? int() str()
list(iter(a))[1]
#? int() str()
list(iter(list(set(a))))[1]
# tuple
#? int() str()
tuple(a)[1]
#? int() str()
tuple(list(set(a)))[1]
#? int()
tuple((1,))[0]
# implementation detail for lists, should not be visible
#? []
list().__iterable
# With a list comprehension.
for i in set(a for a in [1]):
#? int()
i
# -----------------
# Merged Arrays
# -----------------
for x in [1] + ['']:
#? int() str()
x
# -----------------
# Potential Recursion Issues
# -----------------
class X():
def y(self):
self.a = [1]
def x(self):
self.a = list(self.a)
#? int()
self.a[0]
# -----------------
# For loops with attribute assignment.
# -----------------
def test_func():
x = 'asdf'
for x.something in [6,7,8]:
pass
#? str()
x
for x.something, b in [[6, 6.0]]:
pass
#? str()
x
#? int()
tuple({1})[0]
# python >= 3.4
# -----------------
# PEP 3132 Extended Iterable Unpacking (star unpacking)
# -----------------
a, *b, c = [1, 'b', list, dict]
#? int()
a
#?
b
#? list
c
# Not valid syntax
a, *b, *c = [1, 'd', list]
#? int()
a
#?
b
#?
c
lc = [x for a, *x in [(1, '', 1.0)]]
#?
lc[0][0]
#?
lc[0][1]
| 12.022449 | 90 | 0.444576 |
4a21332de5e15983dd300e0ced515750bc34d905
| 2,435 |
py
|
Python
|
ytelapi/models/body_54.py
|
Ytel-Inc/YtelAPI-Python
|
139dc02d93e74c78b6c3d91e3002ae98e2270223
|
[
"MIT"
] | null | null | null |
ytelapi/models/body_54.py
|
Ytel-Inc/YtelAPI-Python
|
139dc02d93e74c78b6c3d91e3002ae98e2270223
|
[
"MIT"
] | null | null | null |
ytelapi/models/body_54.py
|
Ytel-Inc/YtelAPI-Python
|
139dc02d93e74c78b6c3d91e3002ae98e2270223
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Body54(object):
"""Implementation of the 'body_54' model.
TODO: type model description here.
Attributes:
page (int): The page count to retrieve from the total results in the
collection. Page indexing starts at 1.
page_size (int): The count of objects to return per page.
mfrom (string): Filter SMS message objects from this valid 10-digit
phone number (E.164 format).
to (string): Filter SMS message objects to this valid 10-digit phone
number (E.164 format).
date_sent (string): Filter sms message objects by this date.
"""
# Create a mapping from Model property names to API property names
_names = {
"page":'Page',
"page_size":'PageSize',
"mfrom":'From',
"to":'To',
"date_sent":'DateSent'
}
def __init__(self,
page=None,
page_size=None,
mfrom=None,
to=None,
date_sent=None):
"""Constructor for the Body54 class"""
# Initialize members of the class
self.page = page
self.page_size = page_size
self.mfrom = mfrom
self.to = to
self.date_sent = date_sent
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
page = dictionary.get('Page')
page_size = dictionary.get('PageSize')
mfrom = dictionary.get('From')
to = dictionary.get('To')
date_sent = dictionary.get('DateSent')
# Return an object of this model
return cls(page,
page_size,
mfrom,
to,
date_sent)
| 28.647059 | 84 | 0.545791 |
4a2133f1c96719b3f7b7bbf336d75b2727428833
| 11,347 |
py
|
Python
|
train_frcnn.py
|
vaishnavi-naik/keras-frcnn
|
d980e50a7da8097bc947be1bd8bed45f057c5289
|
[
"Apache-2.0"
] | null | null | null |
train_frcnn.py
|
vaishnavi-naik/keras-frcnn
|
d980e50a7da8097bc947be1bd8bed45f057c5289
|
[
"Apache-2.0"
] | null | null | null |
train_frcnn.py
|
vaishnavi-naik/keras-frcnn
|
d980e50a7da8097bc947be1bd8bed45f057c5289
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
import re
import traceback
import os
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from keras_frcnn import config, data_generators
from keras_frcnn import losses as losses
import keras_frcnn.roi_helpers as roi_helpers
from tensorflow.keras.utils import Progbar
sys.setrecursionlimit(40000)
parser = OptionParser()
parser.add_option("-p", "--path", dest="train_path", help="Path to training data.")
parser.add_option("-v", "--valpath", dest="val_path", help="Path to validation data.")
parser.add_option("-o", "--parser", dest="parser", help="Parser to use. One of simple or pascal_voc",
default="pascal_voc")
parser.add_option("-n", "--num_rois", type="int", dest="num_rois", help="Number of RoIs to process at once.", default=32)
parser.add_option("--network", dest="network", help="Base network to use. Supports vgg or resnet50.", default='resnet50')
parser.add_option("--hf", dest="horizontal_flips", help="Augment with horizontal flips in training. (Default=false).", action="store_true", default=False)
parser.add_option("--vf", dest="vertical_flips", help="Augment with vertical flips in training. (Default=false).", action="store_true", default=False)
parser.add_option("--rot", "--rot_90", dest="rot_90", help="Augment with 90 degree rotations in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--num_epochs", type="int", dest="num_epochs", help="Number of epochs.", default=2000)
parser.add_option("--config_filename", dest="config_filename", help=
"Location to store all the metadata related to the training (to be used when testing).",
default="config.pickle")
parser.add_option("--output_weight_path", dest="output_weight_path", help="Output path for weights.", default='./model_frcnn.hdf5')
parser.add_option("--input_weight_path", dest="input_weight_path", help="Input path for weights. If not specified, will try to load default weights provided by keras.")
(options, args) = parser.parse_args()
if not options.train_path: # if filename is not given
parser.error('Error: path to training data must be specified. Pass --path to command line')
if options.parser == 'pascal_voc':
from keras_frcnn.pascal_voc_parser import get_data
elif options.parser == 'simple':
from keras_frcnn.simple_parser import get_data
else:
raise ValueError("Command line option parser must be one of 'pascal_voc' or 'simple'")
# pass the settings from the command line, and persist them in the config object
C = config.Config()
C.use_horizontal_flips = bool(options.horizontal_flips)
C.use_vertical_flips = bool(options.vertical_flips)
C.rot_90 = bool(options.rot_90)
C.model_path = options.output_weight_path
model_path_regex = re.match("^(.+)(\.hdf5)$", C.model_path)
if model_path_regex.group(2) != '.hdf5':
print('Output weights must have .hdf5 filetype')
exit(1)
C.num_rois = int(options.num_rois)
if options.network == 'vgg':
C.network = 'vgg'
from keras_frcnn import vgg as nn
elif options.network == 'resnet50':
from keras_frcnn import resnet as nn
C.network = 'resnet50'
else:
print('Not a valid model')
raise ValueError
# check if weight path was passed via command line
if options.input_weight_path:
C.base_net_weights = options.input_weight_path
else:
# set the path to weights based on backend and model
C.base_net_weights = nn.get_weight_path()
train_imgs, classes_count, class_mapping = get_data(options.train_path)
val_imgs, _, _ = get_data(options.val_path)
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_mapping['bg'] = len(class_mapping)
C.class_mapping = class_mapping
inv_map = {v: k for k, v in class_mapping.items()}
print('Training images per class:')
pprint.pprint(classes_count)
print(f'Num classes (including bg) = {len(classes_count)}')
config_output_filename = options.config_filename
with open(config_output_filename, 'wb') as config_f:
pickle.dump(C,config_f)
print(f'Config has been written to {config_output_filename}, and can be loaded when testing to ensure correct results')
random.shuffle(train_imgs)
num_imgs = len(train_imgs)
#train_imgs = [s for s in all_imgs if s['imageset'] == 'trainval']
#val_imgs = [s for s in all_imgs if s['imageset'] == 'test']
print(f'Num train samples {len(train_imgs)}')
print(f'Num val samples {len(val_imgs)}')
data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='train')
data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length,K.image_data_format(), mode='val')
if K.image_data_format() == 'th':
input_shape_img = (3, None, None)
else:
input_shape_img = (None, None, 3)
print("Img input shp:", input_shape_img)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))
# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)
print("Rpn:", rpn, "output:", rpn[:2])
model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)
# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)
try:
print('loading weights from {C.base_net_weights}', C.base_net_weights, os.getcwd())
model_rpn.load_weights(C.base_net_weights, by_name=True)
model_classifier.load_weights(C.base_net_weights, by_name=True)
except:
print('Could not load pretrained model weights. Weights can be found in the keras application folder \
https://github.com/fchollet/keras/tree/master/keras/applications')
optimizer = Adam(lr=1e-5)
optimizer_classifier = Adam(lr=1e-5)
model_rpn.compile(optimizer=optimizer, loss=[losses.rpn_loss_cls(num_anchors), losses.rpn_loss_regr(num_anchors)])
model_classifier.compile(optimizer=optimizer_classifier, loss=[losses.class_loss_cls, losses.class_loss_regr(len(classes_count)-1)], metrics={f'dense_class_{len(classes_count)}': 'accuracy'})
model_all.compile(optimizer='sgd', loss='mae')
print(model_all.summary())
# epoch_length = 1000
epoch_length = 3
num_epochs = int(options.num_epochs)
iter_num = 0
losses = np.zeros((epoch_length, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
start_time = time.time()
best_loss = np.Inf
class_mapping_inv = {v: k for k, v in class_mapping.items()}
print('Starting training')
vis = True
for epoch_num in range(num_epochs):
progbar = Progbar(epoch_length)
print(f'Epoch {epoch_num + 1}/{num_epochs}')
while True:
try:
if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor))/len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print(f'Average number of overlapping bounding boxes from RPN = {mean_overlapping_bboxes} for {epoch_length} previous iterations')
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')
X, Y, img_data = next(data_gen_train)
print("Training batch", X[0].shape, Y[0].shape)
loss_rpn = model_rpn.train_on_batch(X, Y)
print("Train done")
P_rpn = model_rpn.predict_on_batch(X)
R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if C.num_rois > 1:
if len(pos_samples) < C.num_rois//2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, C.num_rois//2, replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
loss_class = model_classifier.train_on_batch([X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])
losses[iter_num, 0] = loss_rpn[1]
losses[iter_num, 1] = loss_rpn[2]
losses[iter_num, 2] = loss_class[1]
losses[iter_num, 3] = loss_class[2]
losses[iter_num, 4] = loss_class[3]
progbar.update(iter_num+1, [('rpn_cls', losses[iter_num, 0]), ('rpn_regr', losses[iter_num, 1]),
('detector_cls', losses[iter_num, 2]), ('detector_regr', losses[iter_num, 3])])
iter_num += 1
if iter_num == epoch_length:
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if C.verbose:
print(f'Mean number of bounding boxes from RPN overlapping ground truth boxes: {mean_overlapping_boxes}')
print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}')
print(f'Loss RPN classifier: {loss_rpn_cls}')
print(f'Loss RPN regression: {loss_rpn_regr}')
print(f'Loss Detector classifier: {loss_class_cls}')
print(f'Loss Detector regression: {loss_class_regr}')
print(f'Elapsed time: {time.time() - start_time}')
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
iter_num = 0
start_time = time.time()
if curr_loss < best_loss:
if C.verbose:
print(f'Total loss decreased from {best_loss} to {curr_loss}, saving weights')
best_loss = curr_loss
model_all.save_weights(model_path_regex.group(1) + "_" + '{:04d}'.format(epoch_num) + model_path_regex.group(2))
break
except Exception as e:
print(f'Exception: {e}')
print(traceback.format_exc())
continue
print('Training complete, exiting.')
| 38.205387 | 191 | 0.738081 |
4a2133fce1828764d1ba495c762ea09ddcc60a86
| 2,140 |
py
|
Python
|
pysm/tests/test_mpi_smoothing.py
|
xgarrido/pysm
|
a47162bddd0a9edefa8ea3fe57f38f3b78e27fd9
|
[
"MIT"
] | null | null | null |
pysm/tests/test_mpi_smoothing.py
|
xgarrido/pysm
|
a47162bddd0a9edefa8ea3fe57f38f3b78e27fd9
|
[
"MIT"
] | null | null | null |
pysm/tests/test_mpi_smoothing.py
|
xgarrido/pysm
|
a47162bddd0a9edefa8ea3fe57f38f3b78e27fd9
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
import healpy as hp
import pysm
import pysm.units as u
try:
from mpi4py import MPI
except ImportError:
pytest.skip("mpi4py failed to import, skip MPI tests", allow_module_level=True)
try:
import libsharp
except ImportError:
pytest.skip(
"libsharp failed to import, skip MPI smoothing tests", allow_module_level=True
)
@pytest.fixture
def mpi_comm():
comm = MPI.COMM_WORLD
return comm
def test_mpi_assemble(mpi_comm):
nside = 128
lmax = 2 * nside
map_dist = pysm.MapDistribution(pixel_indices=None, mpi_comm=mpi_comm, nside=nside)
model = pysm.Model(nside, map_dist=map_dist)
distributed_map = model.read_map("pysm_2/dust_temp.fits")
full_map_rank0 = pysm.mpi.assemble_map_on_rank0(
mpi_comm,
distributed_map,
model.map_dist.pixel_indices,
n_components=1,
npix=hp.nside2npix(nside),
)[0]
if mpi_comm.rank == 0:
np.testing.assert_allclose(
full_map_rank0,
pysm.read_map("pysm_2/dust_temp.fits", nside=nside).value,
rtol=1e-5,
)
def test_mpi_smoothing(mpi_comm):
nside = 128
lmax = 2 * nside
map_dist = pysm.MapDistribution(
pixel_indices=None, mpi_comm=mpi_comm, smoothing_lmax=lmax, nside=nside
)
model = pysm.Model(nside, map_dist=map_dist)
distributed_map = model.read_map("pysm_2/dust_temp.fits")
fwhm = 5 * u.deg
smoothed_distributed_map = pysm.mpi_smoothing(
distributed_map, fwhm, map_dist=map_dist
)
full_map_rank0 = pysm.mpi.assemble_map_on_rank0(
mpi_comm,
smoothed_distributed_map,
model.map_dist.pixel_indices,
n_components=1,
npix=hp.nside2npix(nside),
)[0]
if mpi_comm.rank == 0:
np.testing.assert_allclose(
full_map_rank0,
hp.smoothing(
pysm.read_map("pysm_2/dust_temp.fits", nside=nside).value,
fwhm.to(u.rad).value,
iter=0,
lmax=lmax,
use_pixel_weights=False,
),
rtol=1e-5,
)
| 27.435897 | 87 | 0.639252 |
4a2134137313b1b4e62c5828b0b7dcc85ce82751
| 411 |
py
|
Python
|
core/migrations/0006_task_completed.py
|
firminoneto11/bright-cities-todo
|
74a3ebdc82699c0ffa1fcd39fa6b82248495a1dc
|
[
"MIT"
] | null | null | null |
core/migrations/0006_task_completed.py
|
firminoneto11/bright-cities-todo
|
74a3ebdc82699c0ffa1fcd39fa6b82248495a1dc
|
[
"MIT"
] | null | null | null |
core/migrations/0006_task_completed.py
|
firminoneto11/bright-cities-todo
|
74a3ebdc82699c0ffa1fcd39fa6b82248495a1dc
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-06-12 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20210612_0908'),
]
operations = [
migrations.AddField(
model_name='task',
name='completed',
field=models.BooleanField(default=False, verbose_name='Completed'),
),
]
| 21.631579 | 79 | 0.610706 |
4a21342f961cfdc129e9cfcfdb4ac52377b4b275
| 8,254 |
py
|
Python
|
models/base_model.py
|
mkecera/InstColorization
|
14b5d7084912014a668b81a704619529dc63ccb1
|
[
"MIT"
] | null | null | null |
models/base_model.py
|
mkecera/InstColorization
|
14b5d7084912014a668b81a704619529dc63ccb1
|
[
"MIT"
] | null | null | null |
models/base_model.py
|
mkecera/InstColorization
|
14b5d7084912014a668b81a704619529dc63ccb1
|
[
"MIT"
] | null | null | null |
import os
import torch
from collections import OrderedDict
from . import networks
import skimage
from util import util
from skimage import io, img_as_ubyte, metrics
import numpy as np
class BaseModel():
# modify parser to add command line options,
# and also change the default values if needed
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
if opt.resize_or_crop != 'scale_width':
torch.backends.cudnn.benchmark = True
# self.loss_names = []
self.model_names = []
self.visual_names = []
self.image_paths = []
self.loss_names = ['G', 'L1', 'CE']
self.avg_losses = OrderedDict()
self.avg_loss_alpha = opt.avg_loss_alpha
self.error_cnt = 0
self.criterionL1 = networks.HuberLoss(delta=1. / opt.ab_norm)
for loss_name in self.loss_names:
self.avg_losses[loss_name] = 0
def set_input(self, input):
self.input = input
def forward(self):
pass
# load and print networks; create schedulers
def setup(self, opt, parser=None):
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.load_model:
self.load_networks(opt.which_epoch)
# make models eval mode during test time
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
# used in test time, wrapping `forward` in no_grad() so we don't save
# intermediate steps for backprop
def test(self, compute_losses=False):
with torch.no_grad():
self.forward()
if(compute_losses):
self.compute_losses_G()
# get image paths
def get_image_paths(self):
return self.image_paths
def optimize_parameters(self):
pass
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
# print('learning rate = %.7f' % lr)
# return visualization images. train.py will display these images, and save the images to a html
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
# return traning losses/errors. train.py will print out these errors as debugging information
def get_current_losses(self):
self.loss_L1 = torch.mean(self.criterionL1(self.fake_B_reg.type(torch.cuda.FloatTensor),
self.full_real_B.type(torch.cuda.FloatTensor)))
self.loss_G = 10 * torch.mean(self.criterionL1(self.fake_B_reg.type(torch.cuda.FloatTensor),
self.full_real_B.type(torch.cuda.FloatTensor)))
self.error_cnt += 1
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
# float(...) works for both scalar tensor and float number
errors_ret[name] = float(getattr(self, 'loss_' + name))
return errors_ret
def get_current_metric(self):
self.out_img = torch.clamp(util.lab2rgb(
torch.cat((self.full_real_A.type(torch.cuda.FloatTensor), self.fake_B_reg.type(torch.cuda.FloatTensor)),
dim=1), self.opt), 0.0, 1.0)
self.out_img = np.transpose(self.out_img.cpu().data.numpy()[0], (1, 2, 0))
# self.out_img = img_as_ubyte(self.out_img)
self.true_img = torch.clamp(util.lab2rgb(
torch.cat((self.full_real_A.type(torch.cuda.FloatTensor), self.full_real_B.type(torch.cuda.FloatTensor)),
dim=1), self.opt), 0.0, 1.0)
self.true_img = np.transpose(self.true_img.cpu().data.numpy()[0], (1, 2, 0))
# self.true_img = img_as_ubyte(self.true_img)
self.psnr = skimage.metrics.peak_signal_noise_ratio(self.true_img, self.out_img)
self.ssim = skimage.metrics.structural_similarity(self.true_img, self.out_img, multichannel=True)
return (self.psnr, self.ssim)
# save models to the disk
def save_networks(self, which_epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (which_epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
# load models from the disk
def load_networks(self, which_epoch):
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (which_epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
if os.path.isfile(load_path) is False:
continue
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
# for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
# self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict, strict=False)
# print network information
def print_networks(self, verbose):
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
# set requies_grad=Fasle to avoid computation
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 42.112245 | 117 | 0.591713 |
4a2134b8c04689ff39436baddfda427023663478
| 9,413 |
py
|
Python
|
appengine/flexible/tasks/snippets.py
|
HoleCat/echarlosperros
|
b67460de0467e05b42a763c4430b26ecfd97c2aa
|
[
"Apache-2.0"
] | null | null | null |
appengine/flexible/tasks/snippets.py
|
HoleCat/echarlosperros
|
b67460de0467e05b42a763c4430b26ecfd97c2aa
|
[
"Apache-2.0"
] | null | null | null |
appengine/flexible/tasks/snippets.py
|
HoleCat/echarlosperros
|
b67460de0467e05b42a763c4430b26ecfd97c2aa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import tasks
def create_queue(project, location, queue_blue_name, queue_red_name):
# [START cloud_tasks_taskqueues_using_yaml]
client = tasks.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us- central1'
# queue_blue_name = 'queue-blue'
# queue_red_name = 'queue-red'
parent = f"projects/{project}/locations/{location}"
queue_blue = {
'name': client.queue_path(project, location, queue_blue_name),
'rate_limits': {
'max_dispatches_per_second': 5
},
'app_engine_routing_override': {
'version': 'v2',
'service': 'task-module'
}
}
queue_red = {
'name': client.queue_path(project, location, queue_red_name),
'rate_limits': {
'max_dispatches_per_second': 1
}
}
queues = [queue_blue, queue_red]
for queue in queues:
response = client.create_queue(parent=parent, queue=queue)
print(response)
# [END cloud_tasks_taskqueues_using_yaml]
return response
def update_queue(project, location, queue):
# [START cloud_tasks_taskqueues_processing_rate]
client = tasks.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us- central1'
# queue = 'queue-blue'
# Get queue object
queue_path = client.queue_path(project, location, queue)
queue = client.get_queue(name=queue_path)
# Update queue object
queue.rate_limits.max_dispatches_per_second = 20
queue.rate_limits.max_concurrent_dispatches = 10
response = client.update_queue(queue=queue)
print(response)
# [END cloud_tasks_taskqueues_processing_rate]
return response
def create_task(project, location, queue):
# [START cloud_tasks_taskqueues_new_task]
client = tasks.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us- central1'
# queue = 'default'
amount = 10
parent = client.queue_path(project, location, queue)
task = {
'app_engine_http_request': {
'http_method': tasks.HttpMethod.POST,
'relative_uri': '/update_counter',
'app_engine_routing': {
'service': 'worker'
},
'body': str(amount).encode()
}
}
response = client.create_task(parent=parent, task=task)
eta = response.schedule_time.strftime("%m/%d/%Y, %H:%M:%S")
print('Task {} enqueued, ETA {}.'.format(response.name, eta))
# [END cloud_tasks_taskqueues_new_task]
return response
def create_tasks_with_data(project, location, queue):
# [START cloud_tasks_taskqueues_passing_data]
import json
client = tasks.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us- central1'
# queue = 'default'
parent = client.queue_path(project, location, queue)
task1 = {
'app_engine_http_request': {
'http_method': tasks.HttpMethod.POST,
'relative_uri': '/update_counter?key=blue',
'app_engine_routing': {
'service': 'worker'
}
}
}
task2 = {
'app_engine_http_request': {
'http_method': tasks.HttpMethod.POST,
'relative_uri': '/update_counter',
'app_engine_routing': {
'service': 'worker'
},
'headers': {
'Content-Type': 'application/json'
},
'body': json.dumps({'key': 'blue'}).encode()
}
}
response = client.create_task(parent=parent, task=task1)
print(response)
response = client.create_task(parent=parent, task=task2)
print(response)
# [END cloud_tasks_taskqueues_passing_data]
return response
def create_task_with_name(project, location, queue, task_name):
# [START cloud_tasks_taskqueues_naming_tasks]
client = tasks.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us- central1'
# queue = 'default'
# task_name = 'first-try'
parent = client.queue_path(project, location, queue)
task = {
'name': client.task_path(project, location, queue, task_name),
'app_engine_http_request': {
'http_method': tasks.HttpMethod.GET,
'relative_uri': '/url/path'
}
}
response = client.create_task(parent=parent, task=task)
print(response)
# [END cloud_tasks_taskqueues_naming_tasks]
return response
def delete_task(project, location, queue):
# [START cloud_tasks_taskqueues_deleting_tasks]
client = tasks.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us- central1'
# queue = 'queue1'
task_path = client.task_path(project, location, queue, 'foo')
response = client.delete_task(name=task_path)
# [END cloud_tasks_taskqueues_deleting_tasks]
return response
def purge_queue(project, location, queue):
# [START cloud_tasks_taskqueues_purging_tasks]
client = tasks.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us- central1'
# queue = 'queue1'
queue_path = client.queue_path(project, location, queue)
response = client.purge_queue(name=queue_path)
# [END cloud_tasks_taskqueues_purging_tasks]
return response
def pause_queue(project, location, queue):
# [START cloud_tasks_taskqueues_pause_queue]
client = tasks.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us- central1'
# queue = 'queue1'
queue_path = client.queue_path(project, location, queue)
response = client.pause_queue(name=queue_path)
# [END cloud_tasks_taskqueues_pause_queue]
return response
def delete_queue(project, location, queue):
# [START cloud_tasks_taskqueues_deleting_queues]
client = tasks.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us- central1'
# queue = 'queue1'
queue_path = client.queue_path(project, location, queue)
response = client.delete_queue(name=queue_path)
# [END cloud_tasks_taskqueues_deleting_queues]
return response
def retry_task(project, location, fooqueue, barqueue, bazqueue):
# [START cloud_tasks_taskqueues_retrying_tasks]
from google.protobuf import duration_pb2
client = tasks.CloudTasksClient()
# TODO(developer): Uncomment these lines and replace with your values.
# project = 'my-project-id'
# location = 'us- central1'
# fooqueue = 'fooqueue'
# barqueue = 'barqueue'
# bazqueue = 'bazqueue'
parent = f"projects/{project}/locations/{location}"
max_retry = duration_pb2.Duration()
max_retry.seconds = 2*60*60*24
foo = {
'name': client.queue_path(project, location, fooqueue),
'rate_limits': {
'max_dispatches_per_second': 1
},
'retry_config': {
'max_attempts': 7,
'max_retry_duration': max_retry
}
}
min = duration_pb2.Duration()
min.seconds = 10
max = duration_pb2.Duration()
max.seconds = 200
bar = {
'name': client.queue_path(project, location, barqueue),
'rate_limits': {
'max_dispatches_per_second': 1
},
'retry_config': {
'min_backoff': min,
'max_backoff': max,
'max_doublings': 0
}
}
max.seconds = 300
baz = {
'name': client.queue_path(project, location, bazqueue),
'rate_limits': {
'max_dispatches_per_second': 1
},
'retry_config': {
'min_backoff': min,
'max_backoff': max,
'max_doublings': 3
}
}
queues = [foo, bar, baz]
for queue in queues:
response = client.create_queue(parent=parent, queue=queue)
print(response)
# [END cloud_tasks_taskqueues_retrying_tasks]
return response
| 30.963816 | 75 | 0.626793 |
4a2134e05d81734f48f43758e42e5254cbab2119
| 134,907 |
py
|
Python
|
astropy/io/fits/tests/test_table.py
|
bastianbeischer/astropy
|
6054cc78b22a6bcd4b37fdfdec02f5331b957355
|
[
"BSD-3-Clause"
] | 1 |
2020-05-10T21:17:24.000Z
|
2020-05-10T21:17:24.000Z
|
astropy/io/fits/tests/test_table.py
|
bastianbeischer/astropy
|
6054cc78b22a6bcd4b37fdfdec02f5331b957355
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/io/fits/tests/test_table.py
|
bastianbeischer/astropy
|
6054cc78b22a6bcd4b37fdfdec02f5331b957355
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import pytest
import numpy as np
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.tests.helper import catch_warnings, ignore_warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.io.fits.column import Delayed, NUMPY2FITS
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from . import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == 'float32' or bb.dtype.name == 'float32':
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.
if np.any(mask0):
if diff[mask0].max() != 0.:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == 'S':
fielda = decode_ascii(fielda)
if fieldb.dtype.char == 'S':
fieldb = decode_ascii(fieldb)
if (not isinstance(fielda, type(fieldb)) and not
isinstance(fieldb, type(fielda))):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f'field {i} type differs')
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f'field {i} differs')
return False
elif (isinstance(fielda, fits.column._VLF) or
isinstance(fieldb, fits.column._VLF)):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print('fielda[{}]: {}'.format(row, fielda[row]))
print('fieldb[{}]: {}'.format(row, fieldb[row]))
print(f'field {i} differs in row {row}')
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f'field {i} differs')
return False
return True
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr['FILENAME'] = 'labq01i3q_rawtag.fits'
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert thdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# open some existing FITS files:
tt = fits.open(self.data('tb.fits'))
fd = fits.open(self.data('test0.fits'))
# create some local arrays
a1 = chararray.array(['abc', 'def', 'xx'])
r1 = np.array([11., 12., 13.], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name='abc', format='3A', array=a1)
c2 = fits.Column(name='def', format='E', array=r1)
a3 = np.array([3, 4, 5], dtype='i2')
c3 = fits.Column(name='xyz', format='I', array=a3)
a4 = np.array([1, 2, 3], dtype='i2')
c4 = fits.Column(name='t1', format='I', array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype='c8')
c5 = fits.Column(name='t2', format='C', array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name='t3', format='X', array=a6)
a7 = np.array([101, 102, 103], dtype='i4')
c7 = fits.Column(name='t4', format='J', array=a7)
a8 = np.array([[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]], dtype=np.uint8)
c8 = fits.Column(name='t5', format='11X', array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view('bool')).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field('abc')) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp('tableout1.fits'), overwrite=True)
with fits.open(self.temp('tableout1.fits')) as f2:
temp = f2[1].data.field(7)
assert (temp[0] == [True, True, False, True, False, True,
True, True, False, False, True]).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp('tableout2.fits'), 'append')
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data('tb.fits'))
assert t[1].header['tform1'] == '1J'
info = {'name': ['c1', 'c2', 'c3', 'c4'],
'format': ['1J', '3A', '1E', '1L'],
'unit': ['', '', '', ''],
'null': [-2147483647, '', '', ''],
'bscale': ['', '', 3, ''],
'bzero': ['', '', 0.4, ''],
'disp': ['I11', 'A3', 'G15.7', 'L6'],
'start': ['', '', '', ''],
'dim': ['', '', '', ''],
'coord_inc': ['', '', '', ''],
'coord_type': ['', '', '', ''],
'coord_unit': ['', '', '', ''],
'coord_ref_point': ['', '', '', ''],
'coord_ref_value': ['', '', '', ''],
'time_ref_pos': ['', '', '', '']}
assert t[1].columns.info(output=False) == info
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field('c4')[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, 'c4')) == '[84 84]'
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data('ascii.fits'))
ra1 = np.rec.array([
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345)], names='c1, c2')
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names='c1, c2')
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array([
(10.123000144958496, 37),
(15.609999656677246, 17),
(345.0, 345)
], names='c1, c2')
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(['abcd', 'def'])
r1 = np.array([11., 12.])
c1 = fits.Column(name='abc', format='A3', start=19, array=a1)
c2 = fits.Column(name='def', format='E', start=3, array=r1)
c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert (dict(hdu.data.dtype.fields) ==
{'abc': (np.dtype('|S3'), 18),
'def': (np.dtype('|S15'), 2),
't1': (np.dtype('|S10'), 21)})
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11., 12.])
c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,
bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with open(self.temp('toto.fits')) as f:
assert '4.95652173913043548D+00' in f.read()
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype='uint8')
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
hduL = fits.open(self.temp('testendian.fits'))
rfiHDU = hduL['RFI']
data = rfiHDU.data
channelsOut = data.field('Channels')[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1., 2., 3., 4.]
a1 = np.array(a, dtype='<f8')
a2 = np.array(a, dtype='>f8')
col1 = fits.Column(name='a', format='D', array=a1)
col2 = fits.Column(name='b', format='D', array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data['a'] == a1).all()
assert (tbhdu.data['b'] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
with fits.open(self.temp('testendian.fits')) as hdul:
assert (hdul[1].data['a'] == a2).all()
assert (hdul[1].data['b'] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float32,a10',
names='order,name,mag,Sp')
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'S20', 'float32', 'S10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'U20', 'float32', 'U10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == 'Serius'
assert hdu.data[1][1] == 'Canopys'
assert (hdu.data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == 'A1V'
assert hdu.data[1][3] == 'F0Ib'
with ignore_warnings():
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert (hdul[1].data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == 'Serius'
assert hdul[1].data[1][1] == 'Canopys'
assert (hdul[1].data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdul[1].data[0][3] == 'A1V'
assert hdul[1].data[1][3] == 'F0Ib'
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
assert comparerecords(hdu.data, tmp)
with ignore_warnings():
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data('tb.fits')) as h:
data = h[1].data
new_data = np.array([(3, 'qwe', 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith('FITS_rec(')
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert (t1[1].columns._arrays[1] is t1[1].columns.columns[1].array)
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp('newtable.fits'))
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 19, '8R x 5C', '[10A, J, 10A, 5E, L]',
'')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True),
('NGC5', 412, '', z, False),
('NGC6', 434, '', z, True),
('NGC7', 408, '', z, False),
('NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu1.data, array)
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target1', format='10A', array=names)
c2 = fits.Column(name='counts1', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes1', format='A10')
c4 = fits.Column(name='spectrum1', format='5E')
c5 = fits.Column(name='flag1', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp('newtable.fits'))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 30, '4R x 10C',
'[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]', '')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
assert (hdu.columns.names ==
['target', 'counts', 'notes', 'spectrum', 'flag', 'target1',
'counts1', 'notes1', 'spectrum1', 'flag1'])
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {'a': 2, 'b': 'b', 'c': 2.3}
data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'S1'), ('c', float)])
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
header = hdul[1].header
assert header['TNULL1'] == 2
assert header['TNULL2'] == 'b'
assert header['TNULL3'] == 2.3
def test_mask_array(self):
t = fits.open(self.data('table.fits'))
tbdata = t[1].data
mask = tbdata.field('V_mag') > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp('newtable.fits'))
hdul = fits.open(self.temp('newtable.fits'))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
row = t1[1].data[2]
assert row['counts'] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ''
assert (c == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
row['counts'] = 310
assert row['counts'] == 310
row[1] = 315
assert row['counts'] == 315
assert row[1:4]['counts'] == 315
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
assert row['counts'] == 300
row[1:4][0] = 400
assert row[1:4]['counts'] == 400
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]['counts'] == 500
row[1:4:2][0] = 300
assert row[1:4]['counts'] == 300
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
assert row[1:4].field(0) == 300
assert row[1:4].field('counts') == 300
pytest.raises(KeyError, row[1:4].field, 'flag')
row[1:4].setfield('counts', 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, 'flag', False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name='target', format='10A')
c2 = fits.Column(name='counts', format='J', unit='DN')
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L')
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = ('NGC1', 312, 'A Note',
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True)
# Test assigning data to a tables row using a list
tbhdu.data[3] = ['JIM1', '33', 'A Note',
np.array([1., 2., 3., 4., 5.], dtype=np.float32),
True]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == 'NGC1'
assert tbhdu.columns.columns[2].array[0] == ''
assert (tbhdu.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[0] == True # nopep8
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == 'JIM1'
assert tbhdu.columns.columns[2].array[3] == 'A Note'
assert (tbhdu.columns.columns[3].array[3] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[3] == True # nopep8
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1., 2., 3., 4., 5.], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.data._coldefs._arrays[0]))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns.columns[0].array))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns._arrays[0]))
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == 'NGC1'
assert tbhdu2.columns.columns[2].array[0] == ''
assert (tbhdu2.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[0] == True # nopep8
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == 'NGC5'
assert tbhdu2.columns.columns[2].array[4] == ''
assert (tbhdu2.columns.columns[3].array[4] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[4] == False # nopep8
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ''
assert tbhdu2.columns.columns[2].array[8] == ''
assert (tbhdu2.columns.columns[3].array[8] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[8] == False # nopep8
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert (id(coldefs.columns[0].array) !=
id(tbhdu.columns.columns[0].array))
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.data._coldefs._arrays[0]))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns.columns[0].array))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = hducls(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = hducls(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert 'EXTVER' not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header['EXTVER'] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header['EXTVER'] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header['EXTVER'] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name='spam', format='E', array=[42.])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name='flag', format='2L',
array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (tbhdu1.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu1.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (tbhdu.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data('table.fits'))
assert (tbdata.V_mag == tbdata.field('V_mag')).all()
assert (tbdata.V_mag == tbdata['V_mag']).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data('tb.fits'))
for col in ('c1', 'c2', 'c3', 'c4'):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data('ascii.fits'))
for col in ('a', 'b'):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(name='x', format='PI()',
array=np.array([[45, 56], [11, 12, 13]],
dtype=np.object_))
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data['x']) == type(hdu.data.x) # noqa
assert (hdu.data['x'][0] == hdu.data.x[0]).all()
assert (hdu.data['x'][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data('zerowidth.fits'))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert 'ORBPARM' in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.writeto(self.temp('newtable.fits'))
hdul.close()
hdul = fits.open(self.temp('newtable.fits'))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert 'ORBPARM' in tbhdu.columns.names
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.close()
def test_string_column_padding(self):
a = ['img1', 'img2', 'img3a', 'p']
s = 'img1\x00\x00\x00\x00\x00\x00' \
'img2\x00\x00\x00\x00\x00\x00' \
'img3a\x00\x00\x00\x00\x00' \
'p\x00\x00\x00\x00\x00\x00\x00\x00\x00'
acol = fits.Column(name='MEMNAME', format='A10',
array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode('raw-unicode-escape') == s
ahdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
with ignore_warnings():
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert (hdul[1].data.tobytes().decode('raw-unicode-escape') ==
s.replace('\x00', ' '))
assert (hdul[1].data['MEMNAME'] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[([0, 1, 2, 3, 4, 5], 'row1' * 2),
([6, 7, 8, 9, 0, 1], 'row2' * 2),
([2, 3, 4, 5, 6, 7], 'row3' * 2)], formats='6i4,a8')
thdu = fits.BinTableHDU.from_columns(data)
# Modify the TDIM fields to my own specification
thdu.header['TDIM1'] = '(2,3)'
thdu.header['TDIM2'] = '(4,2)'
thdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits')) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (c1 == np.array([[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]]])).all()
assert (c2 == np.array([['row1', 'row1'],
['row2', 'row2'],
['row3', 'row3']])).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', 4)])
data['x'] = 1, 2, 3
data['s'] = 'ok'
with ignore_warnings():
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', (4, 3))])
data['x'] = 1, 2, 3
data['s'] = 'ok'
del t
with ignore_warnings():
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4, 3)
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b'abcd', b'efgh'],
[b'ijkl', b'mnop'],
[b'qrst', b'uvwx']]
arr = np.array([(data,), (data,), (data,), (data,), (data,)],
dtype=[('S', '(3, 2)S4')])
with catch_warnings() as w:
tbhdu1 = fits.BinTableHDU(data=arr)
assert len(w) == 0
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(4,2,3)'
assert tbhdu2.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
assert np.all(tbhdu2.data['S'] == tbhdu.data['S'])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b'ab', b'cd'], [b'ef', b'gh'], [b'ij', b'kl']]
arr2 = [1, 2, 3, 4, 5]
arr = np.array([(arr1, arr2), (arr1, arr2)],
dtype=[('a', '(3, 2)S2'), ('b', '5i8')])
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp('test.fits'), 'wb') as f:
f.write(raw_bytes.replace(b'(2,2,3)', b'(2,2,2)'))
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(2,2,2)'
assert tbhdu2.header['TFORM1'] == '12A'
for row in tbhdu2.data:
assert np.all(row['a'] == [['ab', 'cd'], ['ef', 'gh']])
assert np.all(row['b'] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [['abc', 'def', 'ghi'],
['jkl', 'mno', 'pqr'],
['stu', 'vwx', 'yz ']]
recarr = np.rec.array([(data,), (data,)], formats=['(3,3)S3'])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
with fits.open(self.temp('test.fits')) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(['a', 'b'], dtype='|S1')
arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2')
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name='str', format='1A', array=arra),
fits.Column(name='strarray', format='4A', dim='(2,2)',
array=arrb),
fits.Column(name='intarray', format='4I', dim='(2, 2)',
array=arrc)
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data['str'].encode('ascii') == arra).all()
assert (h[1].data['strarray'].encode('ascii') == arrb).all()
assert (h[1].data['intarray'] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [fits.Column(name='a', format='20I', dim='(2,2)',
array=arra),
fits.Column(name='b', format='4I', dim='(2,2)',
array=arrb)]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM1'] == '20I'
assert h[1].header['TFORM2'] == '4I'
assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)'
assert (h[1].data['a'] == arra).all()
assert (h[1].data['b'] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(VerifyError, fits.Column, name='a', format='2I',
dim='(2,2)', array=arra)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data('tdim.fits')) as hdulist:
assert hdulist[1].data['V_mag'].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
targets = data.field('target')
s = data[:]
assert (s.field('target') == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field('target') == targets[:n]).all()
s = data[n:]
assert (s.field('target') == targets[n:]).all()
s = data[::2]
assert (s.field('target') == targets[::2]).all()
s = data[::-1]
assert (s.field('target') == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data('table.fits')) as hdu:
data = hdu[1].data
data['V_mag'] = 0
assert np.all(data['V_mag'] == 0)
data['V_mag'] = 1
assert np.all(data['V_mag'] == 1)
for container in (list, tuple, np.array):
data['V_mag'] = container([1, 2, 3])
assert np.array_equal(data['V_mag'], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data('table.fits'), mode='readonly') as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_dump_load_round_trip(self):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
# Double check that the headers are equivalent
assert str(tbhdu.header) == str(new_tbhdu.header)
hdul.close()
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array([('a', [1, 2, 3, 4], 0.1),
('b', [5, 6, 7, 8], 0.2)],
formats='a1,4i4,f8')
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name='c0', format='L', array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name='c2', format='B', array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name='c3', format='I', array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name='c4', format='J', array=a4)
a5 = np.array(['a', 'abc', 'ab'])
c5 = fits.Column(name='c5', format='A3', array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name='c6', format='D', array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j],
dtype=np.complex128)
c7 = fits.Column(name='c7', format='M', array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name='c8', format='PJ()', array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp('data.txt')
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name conincides
with an existing attribute/method of the array, the existing name takes
precence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name='names', format='I', array=[1])
c2 = fits.Column(name='formats', format='I', array=[2])
c3 = fits.Column(name='other', format='I', array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ['names', 'formats', 'other']
assert t.data.formats == ['I'] * 3
assert (t.data['names'] == [1]).all()
assert (t.data['formats'] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats='|b1,|b1')
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp('table.fits'))
data = fits.getdata(self.temp('table.fits'), ext=1)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data['a'] == arr['a']).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column('F1', 'L', array=[True, False])
c2 = fits.Column('F2', 'L', array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp('table.fits'))
with fits.open(self.temp('table.fits'), mode='update') as hdul:
hdul[1].data['F1'][1] = True
hdul[1].data['F2'][0] = True
with fits.open(self.temp('table.fits')) as hdul:
assert (hdul[1].data['F1'] == [True, True]).all()
assert (hdul[1].data['F2'] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column('F1', 'A3', null='---',
array=np.array(['1.0', '2.0', '---', '3.0']),
ascii=True)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp('test.fits'))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp('test.fits'), mode='update') as h:
h[1].header['TFORM1'] = 'E3'
del h[1].header['TNULL1']
with fits.open(self.temp('test.fits')) as h:
pytest.raises(ValueError, lambda: h[1].data['F1'])
try:
with fits.open(self.temp('test.fits')) as h:
h[1].data['F1']
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data")
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = ' '
c1 = fits.Column('F1', format='I8', null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp('ascii_null.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null.fits'), mode='r+') as h:
nulled = h.read().replace('2 ', ' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null.fits'), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = 'NaN'
c2 = fits.Column('F1', format='F12.8', null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp('ascii_null2.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null2.fits'), mode='r+') as h:
nulled = h.read().replace('3.00000000', ' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null2.fits'), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('tb.fits')) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['NAXIS'] == 2
assert h[1].header['NAXIS1'] == 12
assert h[1].header['NAXIS2'] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
direclty from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unecessary processing of the data.
"""
with fits.open(self.data('table.fits')) as h:
h[1].writeto(self.temp('test.fits'))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert 'data' not in h[1].__dict__
with fits.open(self.data('table.fits')) as h1:
with fits.open(self.temp('test.fits')) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data('table.fits'))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data('tb.fits')) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
with ignore_warnings():
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata['c1'] == tbdata2['c1'])
assert np.all(tbdata['c2'] == tbdata2['c2'])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(tbdata['c3'].astype(np.float32) ==
tbdata2['c3'].astype(np.float32))
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata['c4'], 'T', 'F') ==
tbdata2['c4'])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
with ignore_warnings():
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[
('a', 'i8'),
('b', 'S64'),
('c', ('i4', (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header['NAXIS1'] == 96
assert hdu.header['NAXIS2'] == 0
assert hdu.header['TDIM3'] == '(2,3)'
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data('random_groups.fits'))['DATA']
col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)',
format='1152E')
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[1].data['TEST'] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data('tb.fits'))
data2 = fits.getdata(self.data('tb.fits'))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1):] = data2
mask = merged['c1'] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data('tb.fits')))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([('abc',)], dtype=[('a', 'S3')])
fits.writeto(self.temp('test.fits'), data)
with fits.open(self.temp('test.fits'), mode='update') as hdul:
hdul[1].data['a'][0] = 'XYZ'
assert hdul[1].data['a'][0] == 'XYZ'
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].data['a'][0] == 'XYZ'
# Test update but with a non-trivial TDIMn
data = np.array([([['abc', 'def', 'geh'],
['ijk', 'lmn', 'opq']],)],
dtype=[('a', ('S3', (2, 3)))])
fits.writeto(self.temp('test2.fits'), data)
expected = [['abc', 'def', 'geh'],
['ijk', 'XYZ', 'opq']]
with fits.open(self.temp('test2.fits'), mode='update') as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data['a'][0, 1, 1] = 'XYZ'
assert np.all(hdul[1].data['a'][0] == expected)
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
assert np.all(hdul[1].data['a'][0] == expected)
@pytest.mark.skipif('not HAVE_OBJGRAPH')
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting('FITS_rec'):
readfile(self.data('memtest.fits'))
@pytest.mark.skipif('not HAVE_OBJGRAPH')
def test_reference_leak2(self, tmpdir):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_core import TestCore
from .test_connect import TestMultipleHDU
t1 = TestCore()
t1.setup()
try:
with _refcounting('FITS_rec'):
t1.test_add_del_columns2()
finally:
t1.teardown()
del t1
t2 = self.__class__()
for test_name in ['test_recarray_to_bintablehdu',
'test_numpy_ndarray_to_bintablehdu',
'test_new_table_from_recarray',
'test_new_fitsrec']:
t2.setup()
try:
with _refcounting('FITS_rec'):
getattr(t2, test_name)()
finally:
t2.teardown()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting('FITS_rec'):
t3.test_read(tmpdir)
finally:
t3.teardown_class()
del t3
def test_dump_clobber_vs_overwrite(self):
with fits.open(self.data('table.fits')) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
tbhdu.dump(datafile, cdfile, hfile, clobber=True)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"clobber" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "overwrite" instead.')
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name='A', format='1J', bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
# Test that the file wrote out correctly
with fits.open(self.temp('test.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == data)
# Test updating the unsigned int data
hdu.data['A'][0] = 99
hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(name='c1', array=np.array([1], dtype='>i2'),
format='1I', bscale=1, bzero=32768)
S = fits.HDUList([fits.PrimaryHDU(),
fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data['c1'][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data['c1'] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data['c1'][0] = 10
assert X[1].data['c1'][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data['c1'][0] == 10
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert len(objgraph.by_type(type_)) <= refcount, \
"More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[[0] * 1571] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
with ignore_warnings():
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as toto:
q = toto[1].data.field('QUAL_SPE')
assert (q[0][4:8] ==
np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith('J(1571)')
for code in ('PJ()', 'QJ()'):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name='TESTVLF', format=format_code, array=arr)
col2 = fits.Column(name='TESTSCA', format='J', array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data['TESTSCA']) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data['TESTVLF']) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data['TESTVLF'][0] == arr[0]).all()
assert (tb_hdu.data['TESTVLF'][9] == arr[9]).all()
assert (tb_hdu.data['TESTVLF'][10] == ([0] * 10)).all()
assert (tb_hdu.data['TESTVLF'][-1] == ([0] * 10)).all()
for code in ('PJ()', 'QJ()'):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array([np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])]
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array([np.array(['a', 'b', 'c']), np.array(['d', 'e']),
np.array(['f'])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ['a', 'ab', 'abc']
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[np.arange(1572)] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
with ignore_warnings():
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
data = fits.getdata(self.temp('toto.fits'))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data['QUAL_SPE'], col.array):
assert (row_a == row_b).all()
for code in ('PJ()', 'QJ()'):
test(code)
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column('test', format='J', array=np.arange(255))
c1 = fits.Column('A', format='PJ', array=arr1)
c2 = fits.Column('B', format='PJ', array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp('test.fits'), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM2'] == 'PJ(255)'
assert h[2].header['TFORM2'] == 'PJ(255)'
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp('test.fits')) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp('test3.fits'))
with fits.open(self.temp('test3.fits')) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp('test2.fits'))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp('test2.fits'), mode='append') as new_hdul:
for _ in range(2):
with fits.open(self.temp('test.fits')) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp('test2.fits')) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column('TEST', np.dtype(recformat))
c.format == fitsformat
c = fits.Column('TEST', recformat)
c.format == fitsformat
c = fits.Column('TEST', fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column('TEST', 'I4')
assert c.format == 'I4'
assert c.format.format == 'I'
assert c.format.width == 4
c = fits.Column('TEST', 'F15.8')
assert c.format == 'F15.8'
assert c.format.format == 'F'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'E15.8')
assert c.format.format == 'E'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'D15.8')
assert c.format.format == 'D'
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column('TEST', 'F10.0')
assert c.format.format == 'F'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'E10.0')
assert c.format.format == 'E'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'D10.0')
assert c.format.format == 'D'
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column('TEST', 'I')
assert c.format == 'I'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I', ascii=True)
assert c.format == 'I10'
c = fits.Column('TEST', 'E')
assert c.format == 'E'
assert c.format.recformat == 'f4'
c = fits.Column('TEST', 'E', ascii=True)
assert c.format == 'E15.7'
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column('TEST', 'F')
assert c.format == 'F16.7'
c = fits.Column('TEST', 'D')
assert c.format == 'D'
assert c.format.recformat == 'f8'
c = fits.Column('TEST', 'D', ascii=True)
assert c.format == 'D25.17'
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column('TEST', 'F5.0', array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['TFORM1'] == 'F5.0'
assert hdul[1].data['TEST'].dtype == np.dtype('float64')
assert np.all(hdul[1].data['TEST'] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, 'TEST')
assert raw.tobytes() == b' 1. 2. 3.'
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[('A', '<u4', (2,)), ('B', '>u2')])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs['A'].bzero
assert 2**15 == col_defs['B'].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match=r'Field 2 has a repeat count '
r'of 0 in its format code'):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name='a', format='D')
b = fits.Column(name='b', format='D')
cols = fits.ColDefs([a, b])
assert cols['a'] == cols[0]
assert cols['b'] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns([
fits.Column('a', format='D'),
fits.Column('b', format='D')])
b = table.columns['b']
table.columns.del_col('b')
assert table.data.dtype.names == ('a',)
b.name = 'HELLO'
assert b.name == 'HELLO'
assert 'TTYPE2' not in table.header
assert table.header['TTYPE1'] == 'a'
assert table.columns.names == ['a']
with pytest.raises(KeyError):
table.columns['b']
# Make sure updates to the remaining column still work
table.columns.change_name('a', 'GOODBYE')
with pytest.raises(KeyError):
table.columns['a']
assert table.columns['GOODBYE'].name == 'GOODBYE'
assert table.data.dtype.names == ('GOODBYE',)
assert table.columns.names == ['GOODBYE']
assert table.data.columns.names == ['GOODBYE']
table.columns['GOODBYE'].name = 'foo'
with pytest.raises(KeyError):
table.columns['GOODBYE']
assert table.columns['foo'].name == 'foo'
assert table.data.dtype.names == ('foo',)
assert table.columns.names == ['foo']
assert table.data.columns.names == ['foo']
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column('xcol', format='5X', array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column('pcol', format='PJ', array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column('qcol', format='QJ', array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format='I', array=[1, 2, 3, 4, 5])
assert 'Column name must be a string able to fit' in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('col', format=0, null='Nan', disp=1, coord_type=1,
coord_unit=2, coord_inc='1', time_ref_pos=1,
coord_ref_point='1', coord_ref_value='1')
err_msgs = ['keyword arguments to Column were invalid',
'TFORM', 'TNULL', 'TDISP', 'TCTYP', 'TCUNI', 'TCRPX',
'TCRVL', 'TCDLT', 'TRPOS']
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='B', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='I', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='I', start='-56', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(err.value)
@pytest.mark.parametrize('keys',
[{'TFORM': 'Z', 'TDISP': 'E'},
{'TFORM': '2', 'TDISP': '2E'},
{'TFORM': 3, 'TDISP': 6.3},
{'TFORM': float, 'TDISP': np.float64},
{'TFORM': '', 'TDISP': 'E.5'}])
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column('col', format=keys['TFORM'], disp=keys['TDISP'])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name='a', array=x, format='E')
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header['TTYPE1']
hdu.columns[0].name = 'b'
def test_table_to_hdu():
from astropy.table import Table
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
table.meta['foo'] = 'bar'
with catch_warnings() as w:
hdu = fits.BinTableHDU(table, header=fits.Header({'TEST': 1}))
assert len(w) == 1
assert str(w[0].message).startswith("'not-a-unit' did not parse as"
" fits unit")
for name in 'abc':
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert hdu.header['FOO'] == 'bar'
assert hdu.header['TEST'] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)],
dtype=[('x', float), ('y', int)]).view(fits.FITS_rec)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmpdir):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr['TUNIT1'] = 'pixel'
hdr['TUNIT2'] = 'm'
hdr['TUNIT3'] = 'm'
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr['TCTYP2'] = 'RA---TAN'
hdr['TCTYP3'] = 'ANGLE'
hdr['TCRVL2'] = -999.0
hdr['TCRVL3'] = -999.0
hdr['TCRPX2'] = 1.0
hdr['TCRPX3'] = 1.0
hdr['TALEN2'] = 16384
hdr['TALEN3'] = 1024
hdr['TCUNI2'] = 'angstrom'
hdr['TCUNI3'] = 'deg'
# Other non-relevant keywords
hdr['RA'] = 1.5
hdr['DEC'] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith("The following keywords are now recognized as special")
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == 's'
assert hdu.columns[1].unit == 'pixel'
assert hdu.columns[2].unit is None
assert hdu.header['TUNIT1'] == 's'
assert hdu.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert 'TCTYP1' not in hdu.header
assert hdu.header['TCTYP2'] == 'RA---TAN'
assert hdu.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu.header['RA'] == 1.5
assert hdu.header['DEC'] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attribtues to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmpdir.join('test.fits').strpath
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with pytest.warns(None) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == 's'
assert hdu2.columns[1].unit == 'pixel'
assert hdu2.columns[2].unit is None
assert hdu2.header['TUNIT1'] == 's'
assert hdu2.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == 'RA---TAN'
assert hdu2.columns[2].coord_type == 'ANGLE'
assert 'TCTYP1' not in hdu2.header
assert hdu2.header['TCTYP2'] == 'RA---TAN'
assert hdu2.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu2.header['RA'] == 1.5
assert hdu2.header['DEC'] == 3.0
def test_empty_table(tmpdir):
ofile = str(tmpdir.join('emptytable.fits'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
ofile = str(tmpdir.join('emptytable.fits.gz'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
def test_a3dtable(tmpdir):
testfile = str(tmpdir.join('test.fits'))
hdu = fits.BinTableHDU.from_columns([
fits.Column(name='FOO', format='J', array=np.arange(10))
])
hdu.header['XTENSION'] = 'A3DTABLE'
hdu.writeto(testfile, output_verify='ignore')
with fits.open(testfile) as hdul:
assert hdul[1].header['XTENSION'] == 'A3DTABLE'
with catch_warnings() as w:
hdul.verify('fix')
assert str(w[0].message) == 'Verification reported errors:'
assert str(w[2].message).endswith(
'Converted the XTENSION keyword to BINTABLE.')
assert hdul[1].header['XTENSION'] == 'BINTABLE'
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header['FOO'] = None
hdu.header.cards['FOO']._value = np.nan
testfile = tmp_path / 'test.fits'
hdu.writeto(testfile, output_verify='ignore')
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
| 40.162846 | 106 | 0.563714 |
4a2135a70f9b99a5d077588fea10553d7f201abc
| 557 |
py
|
Python
|
odin_securities/queries/gets/__init__.py
|
JamesBrofos/Odin-Securities
|
a07d3a21bcd3f78513ef394d4e8b620b7ca7fad8
|
[
"MIT"
] | 13 |
2017-02-04T08:41:10.000Z
|
2020-06-09T12:43:09.000Z
|
odin_securities/queries/gets/__init__.py
|
JamesBrofos/Odin-Securities
|
a07d3a21bcd3f78513ef394d4e8b620b7ca7fad8
|
[
"MIT"
] | 1 |
2020-11-15T05:32:18.000Z
|
2020-11-15T05:32:18.000Z
|
odin_securities/queries/gets/__init__.py
|
JamesBrofos/Odin-Securities
|
a07d3a21bcd3f78513ef394d4e8b620b7ca7fad8
|
[
"MIT"
] | 9 |
2017-02-05T21:51:44.000Z
|
2020-03-23T10:55:11.000Z
|
from .symbols import symbols
from .id_for_portfolio import id_for_portfolio
from .id_for_symbol import id_for_symbol
from .id_for_vendor import id_for_vendor
from .id_for_fund import id_for_fund
from .prices import prices
from .actions import actions
from .standard_sessions import standard_sessions
from .symbols_for_vendor import symbols_for_vendor
from .portfolio import portfolio
from .fund import fund, fund_for_fund_id
from .positions_for_portfolio_id import positions_for_portfolio_id
from .updated_for_symbol_vendor import updated_for_symbol_vendor
| 39.785714 | 66 | 0.879713 |
4a2136d62beaaf726f187234c0e4ae0c2626a955
| 1,535 |
py
|
Python
|
tests/test_types_itp_quartairstratigrafie.py
|
rebot/pydov
|
1d5f0080440f4e0f983c8087aed9aec1624ba906
|
[
"MIT"
] | null | null | null |
tests/test_types_itp_quartairstratigrafie.py
|
rebot/pydov
|
1d5f0080440f4e0f983c8087aed9aec1624ba906
|
[
"MIT"
] | null | null | null |
tests/test_types_itp_quartairstratigrafie.py
|
rebot/pydov
|
1d5f0080440f4e0f983c8087aed9aec1624ba906
|
[
"MIT"
] | null | null | null |
"""Module grouping tests for the
pydov.types.interpretaties.QuartairStratigrafie class."""
from pydov.types.interpretaties import QuartairStratigrafie
from pydov.util.dovutil import build_dov_url
from tests.abstract import AbstractTestTypes
location_wfs_getfeature = \
'tests/data/types/interpretaties/quartaire_stratigrafie/wfsgetfeature.xml'
location_wfs_feature = \
'tests/data/types/interpretaties/quartaire_stratigrafie/feature.xml'
location_dov_xml = \
'tests/data/types/interpretaties/quartaire_stratigrafie/' \
'quartaire_stratigrafie.xml'
class TestQuartairStratigrafie(AbstractTestTypes):
"""Class grouping tests for the
pydov.types.interpretaties.QuartairStratigrafie class."""
datatype_class = QuartairStratigrafie
namespace = 'http://dov.vlaanderen.be/ocdov/interpretaties'
pkey_base = build_dov_url('data/interpretatie/')
field_names = [
'pkey_interpretatie', 'pkey_boring',
'betrouwbaarheid_interpretatie', 'x', 'y',
'diepte_laag_van', 'diepte_laag_tot', 'lid1',
'relatie_lid1_lid2', 'lid2']
field_names_subtypes = [
'diepte_laag_van', 'diepte_laag_tot', 'lid1',
'relatie_lid1_lid2', 'lid2']
field_names_nosubtypes = [
'pkey_interpretatie', 'pkey_boring',
'betrouwbaarheid_interpretatie', 'x', 'y']
valid_returnfields = ('pkey_interpretatie', 'pkey_boring')
valid_returnfields_subtype = (
'pkey_interpretatie', 'diepte_laag_van', 'diepte_laag_tot')
inexistent_field = 'onbestaand'
| 37.439024 | 78 | 0.740065 |
4a2137fc201710e1215cf4d83a8c9d809c6aa9de
| 628 |
py
|
Python
|
mne/decoding/__init__.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 3 |
2021-01-04T08:45:56.000Z
|
2021-05-19T12:25:59.000Z
|
mne/decoding/__init__.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 23 |
2017-09-12T11:08:26.000Z
|
2019-10-04T11:11:29.000Z
|
mne/decoding/__init__.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 3 |
2019-01-28T13:48:00.000Z
|
2019-07-10T16:02:11.000Z
|
"""Decoding and encoding, including machine learning and receptive fields."""
from .transformer import (PSDEstimator, Vectorizer,
UnsupervisedSpatialFilter, TemporalFilter,
Scaler, FilterEstimator)
from .mixin import TransformerMixin
from .base import BaseEstimator, LinearModel, get_coef, cross_val_multiscore
from .csp import CSP, SPoC
from .ems import compute_ems, EMS
from .time_frequency import TimeFrequency
from .receptive_field import ReceptiveField
from .time_delaying_ridge import TimeDelayingRidge
from .search_light import SlidingEstimator, GeneralizingEstimator
| 44.857143 | 77 | 0.778662 |
4a21381177d211e0812ad4107ef9ae72772adbbb
| 2,414 |
py
|
Python
|
home/models.py
|
Dogruyer/hospital-automation
|
e5faffce93c0bda475cd0dbd0e7ef0f55b11564d
|
[
"Apache-2.0"
] | null | null | null |
home/models.py
|
Dogruyer/hospital-automation
|
e5faffce93c0bda475cd0dbd0e7ef0f55b11564d
|
[
"Apache-2.0"
] | null | null | null |
home/models.py
|
Dogruyer/hospital-automation
|
e5faffce93c0bda475cd0dbd0e7ef0f55b11564d
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
class Hasta(models.Model):
adi = models.CharField(max_length=100)
yas = models.IntegerField()
cinsiyet = models.CharField(max_length=100)
tc_kimlik_no = models.CharField(max_length=100)
class Hastane(models.Model):
adi = models.CharField(max_length=100)
lokasyon = models.CharField(max_length=100)
sahibi = models.CharField(max_length=100)
class Doktor(models.Model):
adi = models.CharField(max_length=100)
yas = models.IntegerField()
cinsiyet = models.CharField(max_length=100)
tc_kimlik_no = models.CharField(max_length=100)
brans = models.CharField(max_length=100)
unvan = models.CharField(max_length=100)
bolum = models.CharField(max_length=100)
hastane = models.ForeignKey(Hastane)
class IdariKadro(models.Model):
adi = models.CharField(max_length=100)
yas = models.IntegerField()
cinsiyet = models.CharField(max_length=100)
unvan = models.CharField(max_length=100)
bolum = models.CharField(max_length=100)
hastane = models.ForeignKey(Hastane)
class Kullanici(models.Model):
adi = models.CharField(max_length=100)
yas = models.IntegerField()
cinsiyet = models.CharField(max_length=100)
mail = models.EmailField(max_length=100)
tc_kimlik_no = models.CharField(max_length=100)
aktif = models.BooleanField(default=True)
yonetici = models.BooleanField(default=True)
class Hastalik(models.Model):
adi = models.CharField(max_length=100)
bolum = models.CharField(max_length=100)
class Teshis(models.Model):
adi = models.CharField(max_length=100)
hastalik = models.ForeignKey(Hastalik)
class Ilac(models.Model):
adi = models.CharField(max_length=100)
class Recete(models.Model):
numara = models.CharField(max_length=100)
hasta = models.ForeignKey(Hasta)
ilac = models.ForeignKey(Ilac)
class Randevu(models.Model):
doktor = models.ForeignKey(Doktor)
hasta = models.ForeignKey(Hasta)
hastane = models.ForeignKey(Hastane)
class Klinik(models.Model):
adi = models.CharField(max_length=100)
lokasyon = models.CharField(max_length=100)
class Yonetici(models.Model):
adi = models.CharField(max_length=100)
yas = models.IntegerField()
cinsiyet = models.CharField(max_length=100)
class Izinler(models.Model):
adi = models.CharField(max_length=100)
turu = models.CharField(max_length=100)
| 28.069767 | 51 | 0.73198 |
4a2139a5ccd5b1d9e30f9257efd2c07085656034
| 5,970 |
py
|
Python
|
api_extractor_config.py
|
SigmaAdvancedAnalytics/hydra
|
bf392dfb0e458d1156f52aa206629c1d1636d7a3
|
[
"MIT"
] | null | null | null |
api_extractor_config.py
|
SigmaAdvancedAnalytics/hydra
|
bf392dfb0e458d1156f52aa206629c1d1636d7a3
|
[
"MIT"
] | null | null | null |
api_extractor_config.py
|
SigmaAdvancedAnalytics/hydra
|
bf392dfb0e458d1156f52aa206629c1d1636d7a3
|
[
"MIT"
] | null | null | null |
from sqlalchemy import *
DEBUG = False
# ***************************************************************************
# > BE VERY CAREFUL ABOUT CREATE_TABLES.
# > WHEN SET TO TRUE IT WILL DROP ALL THE EXISTING TABLES AND CREATES NEW ONES.
# ***************************************************************************
CREATE_TABLES = False
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DATE_FORMAT = "%Y-%m-%d"
HASHTAGS_BUFFER_DB = 'hashtags'
DATA_FOLDER = 'persistent_data'
SCHEMA_NAME = 'imports'
#-----------------------------------------------------------------------
# NOTE ON SCHEMAS:
# The schemas are wrapped inside a function, so that every time the function
# is called we get a new set of Column objects.
#
# This is needed for the 'retry a few times' feature, when we create the
# tableclass multiple times. I am not exactlu sure why.
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Twitter Table Schemas
#-----------------------------------------------------------------------
def twitter_account_schema():
twitter_account_schema = {
'Extract_Datetime': Column(String(length=30)),
'Account_ID': Column(String(length=30)),
'Display_Name': Column(String(length=100)),
'Handle': Column(String(length=30)),
'Statuses_Count': Column(Integer()),
'Followers_Count': Column(Integer()),
'Friends_Count': Column(Integer()),
'Favourites_Count': Column(Integer()),
'Listed_Count': Column(Integer()),
'Profile_URL': Column(String(length=300)),
'Profile_Image': Column(String(length=300)),
'Banner_Image': Column(String(length=300)),
'Location': Column(String(length=100)),
'Timezone': Column(String(length=50)),
'Description': Column(String(length=400)),
'Daily_Mentions_Count': Column(Integer()),
'Autogen_ID': Column(Integer(), primary_key=True),
'__tablename__': 'twitter_account_stats',
'__table_args__': {'schema' : SCHEMA_NAME}
}
return twitter_account_schema
def twitter_status_schema():
twitter_status_schema = {
'Extract_Datetime': Column(String(length=30)),
'Tweet_ID': Column(String(length=30)),
'Created_Date': Column(String(length=30)),
'Handle': Column(String(length=30)),
'Tweet_Text': Column(String(length=700)),
'Favourite_Count': Column(Integer()),
'Retweet_Count': Column(Integer()),
'Hashtags': Column(String(length=100)),
'Media': Column(String(length=3000)),
'URLs': Column(String(length=500)),
'Place': Column(String(length=500)),
'User_Mentions': Column(String(length=5000)),
'Favorited': Column(String(length=20)),
'Retweeted': Column(String(length=20)),
'In_reply_to_screen_name': Column(String(length=30)),
'Autogen_ID': Column(Integer(), primary_key=True),
'__tablename__': 'twitter_statuses',
'__table_args__': {'schema' : SCHEMA_NAME}
}
return twitter_status_schema
# For the mssql table
def twitter_hashtags_schema():
twitter_hashtags_schema = {
'Extract_Datetime': Column(String(length=30)),
'TweetID': Column(String(length=30)),
'TweetText': Column(String(length=700)),
'AccountID': Column(String(length=30)),
'DisplayName': Column(String(length=100)),
'Handle': Column(String(length=30)),
'CreatedAt': Column(String(length=30)),
'Hashtags': Column(String(length=100)),
'Autogen_ID': Column(Integer(), primary_key=True),
'__tablename__': 'twitter_hashtags',
'__table_args__': {'schema' : SCHEMA_NAME}
}
return twitter_hashtags_schema
# For the sqlite buffer table
twitter_hashtags_sqlite_schema = [
('TweetID', 'text'),
('TweetText', 'text'),
('AccountID', 'text'),
('DisplayName', 'text'),
('Handle', 'text'),
('CreatedAt', 'text'),
('Hashtags', 'text')
]
#-----------------------------------------------------------------------
# Youtube Table Schemas
#-----------------------------------------------------------------------
def channel_table_schema():
channel_table_schema = {
'Extract_Datetime': Column(String(length=30)),
'channel_name': Column(String(length=200)),
'channel_id': Column(String(length=30)),
'total_comments': Column(String(length=30)),
'subscribers': Column(Integer()),
'new_subscribers': Column(Integer()),
'total_views': Column(String(length=30)),
'videos': Column(Integer()),
'Autogen_ID': Column(Integer(), primary_key=True), # add a PK - SQLAlchemy demands this
'__tablename__': 'youtube_channel_stats', #tablename assignment is done inside the dictionary
'__table_args__': {'schema' : SCHEMA_NAME}
}
return channel_table_schema
def videos_table_schema():
videos_table_schema = {
'Extract_Datetime': Column(String(length=30)),
'publishedAt': Column(String(length=30)),
'video': Column(String(length=50)),
'title': Column(String(length=200)),
'likes': Column(Integer()),
'dislikes': Column(Integer()),
'comments': Column(Integer()),
'shares': Column(Integer()),
'views': Column(Integer),
'averageViewDuration': Column(Integer),
'subscribersGained': Column(Integer),
'subscribersLost': Column(Integer),
'Autogen_ID': Column(Integer(), primary_key=True), # add a PK - SQLAlchemy demands this
'__tablename__': 'youtube_videos_stats', #tablename assignment is done inside the dictionary
'__table_args__': {'schema' : SCHEMA_NAME}
}
return videos_table_schema
| 39.8 | 102 | 0.562647 |
4a2139e2da72ffd9befb49e1f916c7bc5931c139
| 3,241 |
py
|
Python
|
LogAnalysis/PreProcess/eNodeB_CMAC_Log.py
|
WenPeiyu/LianjiaSpider
|
9d4fd31df0120c756fbbd625b66f2fee4fe17891
|
[
"MIT"
] | null | null | null |
LogAnalysis/PreProcess/eNodeB_CMAC_Log.py
|
WenPeiyu/LianjiaSpider
|
9d4fd31df0120c756fbbd625b66f2fee4fe17891
|
[
"MIT"
] | null | null | null |
LogAnalysis/PreProcess/eNodeB_CMAC_Log.py
|
WenPeiyu/LianjiaSpider
|
9d4fd31df0120c756fbbd625b66f2fee4fe17891
|
[
"MIT"
] | null | null | null |
# 导入模块
import pandas as pd
import re
import time
import sys
import os
# 路径地址
# FilePath = r"d:/data/"
# OriginFile = FilePath + r"BPN_3_40_3_104201810241801_output.txt"
# FixedCSV = FilePath + r"Fixed.csv"
# ErrorFile = FilePath + r"Error.txt"
# LogOutput = FilePath + r"LogOutput.xlsx"
FilePath = os.path.split(os.path.split(sys.argv[0])[0])[0] + "/rawdata"
OriginFile = FilePath + r"/enodeb.txt"
FixedCSV = FilePath + r"/Fixed.csv"
ErrorFile = FilePath + r"/Error.txt"
LogOutput = FilePath + r"/LogOutput.csv"
time1 = time.time()
# 读取文本转换成不存在歧义的CSV
with open(OriginFile, 'r') as f, open(FixedCSV, "w") as csv, open(ErrorFile, "w") as txt:
a = True
csv.write("")
while a != [""]:
a = f.readline()
a = a.split(" ")
try:
a[8]="".join(a[8:])
csv.write(";;;;".join(a[0:9]))
except:
txt.write(";;;;".join(a))
time2 = time.time()
#文本匹配符
re_I = re.compile(r"\[I\]")
re_U = re.compile(r"\[U\]")
re_D = re.compile(r"\[D\]")
re_RA = re.compile(r"\[RA\]")
re_TOOL = re.compile(r"\[TOOL\]")
re_MSG = re.compile(r"MSG")
time3 = time.time()
# 读取CSV分割字段
df_log = pd.read_csv(FixedCSV, sep=";;;;", header=None)
DateTime = df_log.loc[:, 0]
Sequence = df_log.loc[:, 1]
LogSpace = df_log.loc[:, 2]
PriClass = df_log.loc[:, 3]
interHSFN = [eval(i)[0] for i in list(df_log.loc[:, 4])]
sysHSFN = [i[1:-1].split(".")[0] for i in list(df_log.loc[:, 5])]
SysFraNum = [i[1:-1].split(".")[1] for i in list(df_log.loc[:, 5])]
SysSubFN = [i[1:-1].split(".")[2] for i in list(df_log.loc[:, 5])]
PhyCellIdentifier = [i[1:-1].split(":")[0] for i in list(df_log.loc[:, 6])]
CELs = [i[1:-1].split(":")[2] for i in list(df_log.loc[:, 6])]
UEIndex = [i[1:-1].split(":")[0] for i in list(df_log.loc[:, 7])]
CRNTI = [i[1:-1].split(":")[1] for i in list(df_log.loc[:, 7])]
GID = [i[1:-1].split(":")[2] for i in list(df_log.loc[:, 7])]
Message = df_log.loc[:,8]
time4 = time.time()
# 分割日志信息体进行标注
I = [1 if re_I.match(i) else 0 for i in Message]
U = [1 if re_U.match(i) else 0 for i in Message]
D = [1 if re_D.match(i) else 0 for i in Message]
TOOL = [1 if re_TOOL.match(i) else 0 for i in Message]
RA = [1 if re_RA.search(i) else 0 for i in Message]
MSG = [1 if re_MSG.search(i) else 0 for i in Message]
df_log = pd.DataFrame({"DateTime": DateTime,
"Sequence": Sequence,
"LogSpace": LogSpace,
"PriClass": PriClass,
"interHSFN": interHSFN,
"sysHSFN": sysHSFN,
"SysFraNum": SysFraNum,
"SysSubFN": SysSubFN,
"PhyCellIdentifier": PhyCellIdentifier,
"CELs": CELs,
"UEIndex": UEIndex,
"CRNTI": CRNTI,
"GID": GID,
"I": I,
"U": U,
"D": D,
"TOOL": TOOL,
"RA": RA,
"MSG": MSG,
"Message": Message
})
time5 = time.time()
df_log.to_csv(LogOutput,escapechar='"',index=False)
time6 = time.time()
RunTime = [time1, time2, time3, time4, time5, time6]
| 34.115789 | 89 | 0.531009 |
4a213a6864005a954d5b5f00e7e6c07d8eb38f52
| 444 |
py
|
Python
|
blog/models.py
|
BkrmDahal/newsapp
|
5b8a00072662db31a87f7975dcdf8e5bb6068235
|
[
"MIT"
] | 1 |
2018-05-11T16:30:55.000Z
|
2018-05-11T16:30:55.000Z
|
blog/models.py
|
BkrmDahal/newsapp
|
5b8a00072662db31a87f7975dcdf8e5bb6068235
|
[
"MIT"
] | null | null | null |
blog/models.py
|
BkrmDahal/newsapp
|
5b8a00072662db31a87f7975dcdf8e5bb6068235
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Stories(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
summary = models.TextField()
tag = models.CharField(max_length = 200)
image_url = models.URLField()
url = models.URLField()
source = models.CharField(max_length = 200)
pub_date = models.DateTimeField()
def __str__(self):
return self.title
| 27.75 | 47 | 0.695946 |
4a213a7a146c86ba996872fb2f9a6e31494f23ce
| 59,595 |
py
|
Python
|
python/paddle/fluid/layers/sequence_lod.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11 |
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/layers/sequence_lod.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/sequence_lod.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1 |
2021-09-24T11:23:36.000Z
|
2021-09-24T11:23:36.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
from .layer_function_generator import templatedoc
from ..framework import core, Variable, _non_static_mode, in_dygraph_mode, _in_legacy_dygraph, convert_np_dtype_to_dtype_
from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..core import VarDesc
from paddle import _C_ops
__all__ = [
'sequence_conv',
'sequence_softmax',
'sequence_pool',
'sequence_concat',
'sequence_first_step',
'sequence_last_step',
'sequence_slice',
'sequence_expand',
'sequence_expand_as',
'sequence_pad',
'sequence_unpad',
'sequence_reshape',
'sequence_scatter',
'sequence_enumerate',
'sequence_mask',
'sequence_reverse',
]
@templatedoc()
def sequence_conv(input,
num_filters,
filter_size=3,
filter_stride=1,
padding=True,
padding_start=None,
bias_attr=None,
param_attr=None,
act=None,
name=None):
r"""
:api_attr: Static Graph
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use conv2d Op.(fluid.layers.** :ref:`api_fluid_layers_conv2d` ).
This operator receives input sequences with variable length and other convolutional
configuration parameters(num_filters, filter_size) to apply the convolution operation.
It fills all-zero padding data on both sides of the sequence by default to ensure that
the output is the same length as the input. You can customize the padding behavior by
configuring the parameter :attr:`padding\_start` .
**Warning:** the parameter :attr:`padding` take no effect and will be deprecated in the future.
.. code-block:: text
Here we will illustrate the details of the padding operation:
For a mini-batch of 2 variable lengths sentences, containing 3, and 1 time-steps:
Assumed input (X) is a [4, N] float LoDTensor, and for the sake of simplicity, we assume N=2.
input.data = [[1, 1],
[2, 2],
[3, 3],
[4, 4]]
This is to say that input (X) has 4 words and the dimension of each word
representation is 2.
* Case1:
If padding_start is -1 and filter_size is 3.
The length of padding data is calculated as follows:
up_pad_len = max(0, -padding_start) = 1
down_pad_len = max(0, filter_size + padding_start - 1) = 1
The output of the input sequence after padding is:
data_aftet_padding = [[0, 0, 1, 1, 2, 2],
[1, 1, 2, 2, 3, 3],
[2, 2, 3, 3, 0, 0],
[0, 0, 4, 4, 0, 0]]
It will be multiplied by the filter weight to get the final output.
Assume num_filters = 3
output.data = [[ 0.3234, -0.2334, 0.7433],
[ 0.5646, 0.9464, -0.1223],
[-0.1343, 0.5653, 0.4555],
[ 0.9954, -0.1234, -0.1234]]
output.shape = [4, 3] # 3 = num_filters
output.lod = [[0, 3, 4]] # Remain the same
Args:
input (Variable): LoDTensor with shape :math:`(M, K)`, where M is the total time-step of mini-batch
and K is hidden_size of input. Only lod_level of 1 is supported. The data type should be float32 or
float64.
num_filters (int): the number of filters.
filter_size (int): the height of filter. Specified filter width is not supported, the width is
hidden_size by default. Default: 3.
filter_stride (int): stride of the filter. Currently only supports :attr:`stride` = 1.
padding (bool): the parameter :attr:`padding` take no effect and will be discarded in the
future. Currently, it will always pad input to make sure the length of the output is
the same as input whether :attr:`padding` is set true or false. Because the length of
input sequence may be shorter than :attr:`filter\_size`, which will cause the convolution
result to not be computed correctly. These padding data will not be trainable or updated
while training. Default: True.
padding_start (int): It is used to indicate the start index for padding the input
sequence, which can be negative. The negative number means to pad
:attr:`|padding_start|` time-steps of all-zero data at the beginning of each instance.
The positive number means to skip :attr:`padding_start` time-steps of each instance,
and it will pad :math:`filter\_size + padding\_start - 1` time-steps of all-zero data
at the end of the sequence to ensure that the output is the same length as the input.
If set None, the same length :math:`\\frac{filter\_size}{2}` of data will be filled
on both sides of the sequence. If set 0, the length of :math:`filter\_size - 1` data
is padded at the end of each input sequence. Default: None.
bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the
default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
act (str): Activation to be applied to the output of this layer, such as tanh, softmax,
sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: LoDTensor with the same length as input. The data type is float32 or float64, which is same as input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[-1, 10], dtype='float32', lod_level=1)
x_conved = paddle.static.nn.sequence_conv(input=x, num_filters=2, filter_size=3, padding_start=-1)
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'sequence_conv')
helper = LayerHelper('sequence_conv', **locals())
dtype = helper.input_dtype()
filter_shape = [filter_size * input.shape[1], num_filters]
filter_param = helper.create_parameter(attr=helper.param_attr,
shape=filter_shape,
dtype=dtype)
pre_bias = helper.create_variable_for_type_inference(dtype)
if padding_start is None:
padding_start = -int(filter_size // 2)
helper.append_op(type='sequence_conv',
inputs={
'X': [input],
'Filter': [filter_param],
},
outputs={"Out": pre_bias},
attrs={
'contextStride': filter_stride,
'contextStart': padding_start,
'contextLength': filter_size,
})
pre_act = helper.append_bias_op(pre_bias)
return helper.append_activation(pre_act)
def sequence_softmax(input, use_cudnn=False, name=None):
r"""
:api_attr: Static Graph
**Note**:
**The input type of the OP must be LoDTensor. For Tensor, use:** :ref:`api_fluid_layers_softmax`
A LoD-tensor can be regarded as several sequences, and this op apply softmax algo on each sequence.
The shape of input Tensor can be :math:`[N, 1]` or :math:`[N]`, where :math:`N`
is the sum of the length of all sequences. Recommended usage: :math:`[N]`.
For i-th sequence in a mini-batch:
.. math::
Out(X[lod[i]:lod[i+1]], :) = \\frac{\exp(X[lod[i]:lod[i+1], :])}{\sum(\exp(X[lod[i]:lod[i+1], :]))}
For example, for a LoD-Tensor with 6 sequences ([3, 2, 4, 1, 2, 3] - sequence length list in order),
the lod in the runtime is [[0, 3, 5, 9, 10, 12, 15]],
then softmax will be computed among :math:`X[0:3,:],X[3:5,:],X[5:9,:],X[9:10,:],X[10:12,:],X[12:15,:]`,
and :math:`N` turns out to be 15.
.. code-block:: text
*Case 1:
Given:
input.data = [0.7, 1, 0.6,
1.5, 1.1,
1.2, 0.2, 0.6, 1.9,
3.1,
2.5, 0.8,
0.1, 2.4, 1.3]
input.lod = [[0, 3, 5, 9, 10, 12, 15]]
then:
output.data = [0.30724832, 0.41474187, 0.2780098,
0.59868765, 0.40131235,
0.2544242, 0.09359743, 0.13963096, 0.5123474,
1.,
0.84553474, 0.15446526,
0.06995796, 0.69777346, 0.23226859]
output.lod = [[0, 3, 5, 9, 10, 12, 15]]
Args:
input (Variable):A LoDTensor with shape of :math:`[N, 1]` or :math:`[N]`, Recommended usage: :math:`[N]`.
Supported data types: float32, float64.
use_cudnn (bool, optional): Use cudnn kernel or not. Effective only when the cudnn version of the paddle
library is installed and GPU is used for training or reasoning. Default: False.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A LoD-Tensor which has the same shape and data type with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[7, 1],
dtype='float32', lod_level=1)
x_sequence_softmax_1 = paddle.static.nn.sequence_softmax(input=x)
y = paddle.static.data(name='y', shape=[7],
dtype='float32', lod_level=1)
x_sequence_softmax_2 = paddle.static.nn.sequence_softmax(input=y)
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_softmax', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'sequence_softmax')
dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="sequence_softmax",
inputs={"X": input},
outputs={"Out": softmax_out},
attrs={"use_cudnn": use_cudnn})
return softmax_out
def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
r"""
:api_attr: Static Graph
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use pool2d Op.(fluid.layers.** :ref:`api_fluid_layers_pool2d` ).
This operator only supports LoDTensor as input. It will apply specified pooling
operation on the input LoDTensor. It pools features of all time-steps of each
sequence at the last lod_level using :attr:`pool_type` mentioned in the parameters,
such as sum, average, sqrt, etc.
It supports six pool_type:
- average: :math:`Out[i] = \\frac{\sum_i X_i}{N}`
- sum: :math:`Out[i] = \sum_jX_{ij}`
- sqrt: :math:`Out[i] = \\frac{\sum_jX_{ij}}{\sqrt{len(X_i)}}`
- max: :math:`Out[i] = max(X_i)`
- last: :math:`Out[i] = X_{N_i}`
- first: :math:`Out[i]` = X_0
where :math:`N_i` is the length of i-th input sequence.
.. code-block:: text
Case 1:
input is a 1-level LoDTensor and pad_value = 0.0:
input.lod = [[0, 2, 5, 7, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
output is LoDTensor:
out.shape = [4, 1]
with condition out.shape[0] == len(x.lod[-1]) == 4
for different pool_type:
average: out.data = [[2.], [4.], [3.], [0.0]], where 2.=(1. + 3.)/2, 4.=(2. + 4. + 6.)/3, 3.=(5. + 1.)/2
sum : out.data = [[4.], [12.], [6.], [0.0]], where 4.=1. + 3., 12.=2. + 4. + 6., 6.=5. + 1.
sqrt : out.data = [[2.82], [6.93], [4.24], [0.0]], where 2.82=(1. + 3.)/sqrt(2), 6.93=(2. + 4. + 6.)/sqrt(3), 4.24=(5. + 1.)/sqrt(2)
max : out.data = [[3.], [6.], [5.], [0.0]], where 3.=max(1., 3.), 6.=max(2., 4., 6.), 5.=max(5., 1.)
last : out.data = [[3.], [6.], [1.], [0.0]], where 3.=last(1., 3.), 6.=last(2., 4., 6.), 1.=last(5., 1.)
first : out.data = [[1.], [2.], [5.], [0.0]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.)
and all above [0.0] at last of out.data is padding data.
Case 2:
input is a 2-level LoDTensor containing 3 sequences with length info [2, 0, 3],
where 0 means empty sequence.
The first sequence contains 2 subsequence with length info [1, 2];
The last sequence contains 3 subsequence with length info [1, 0, 3].
input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
If pool_typ = sum, it will apply pooling on last lod_level [0, 1, 3, 4, 4, 7]. pad_value = 0.0
output is LoDTensor:
out.shape= [5, 1]
out.lod = [[0, 2, 2, 5]]
where out.shape[0] == len(x.lod[-1]) == 5
sum: out.data = [[1.], [5.], [4.], [0.0], [12.]]
where 1.=1., 5.=3. + 2., 4.=4., 0.0=pad_value, 12.=6. + 5. + 1.
Args:
input (variable): LoDTensor with lod_level no more than 2. The data type should be float32 or float64.
pool_type (str): The pooling type that supports average, sum, sqrt, max, last or first.
is_test (bool): Only works when :attr:`pool_type` is max. If set False, a temporary Tenosr maxIndex is
created to record the index information corresponding to the maximum value, which is used for backward
gradient calculation in the training phase. Default: False.
pad_value (float): Used to pad the pooling result for empty input sequence. Default: 0.0
Returns:
Variable: LoDTensor after pooling with data type float32 or float64.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[None, 10], dtype='float32', lod_level=1)
avg_x = paddle.static.nn.sequence_pool(input=x, pool_type='average')
sum_x = paddle.static.nn.sequence_pool(input=x, pool_type='sum')
sqrt_x = paddle.static.nn.sequence_pool(input=x, pool_type='sqrt')
max_x = paddle.static.nn.sequence_pool(input=x, pool_type='max')
last_x = paddle.static.nn.sequence_pool(input=x, pool_type='last')
first_x = paddle.static.nn.sequence_pool(input=x, pool_type='first')
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'sequence_pool')
helper = LayerHelper('sequence_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_variable_for_type_inference(dtype)
max_index = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="sequence_pool",
inputs={"X": input},
outputs={
"Out": pool_out,
"MaxIndex": max_index
},
attrs={
"pooltype": pool_type.upper(),
"is_test": is_test,
"pad_value": pad_value
})
# when pool_type is max, variable max_index is initialized,
# so we stop the gradient explicitly here
if pool_type == 'max':
max_index.stop_gradient = True
return pool_out
@templatedoc()
def sequence_concat(input, name=None):
"""
:api_attr: Static Graph
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use concat Op.(fluid.layers.** :ref:`api_fluid_layers_concat` ).
This operator only supports LoDTensor as input. It concatenates the multiple LoDTensor from input by the LoD information,
and outputs the concatenated LoDTensor.
.. code-block:: text
input is a list of LoDTensor:
input = [x1, x2]
where:
x1.lod = [[0, 3, 5]]
x1.data = [[1], [2], [3], [4], [5]]
x1.shape = [5, 1]
x2.lod = [[0, 2, 4]]
x2.data = [[6], [7], [8], [9]]
x2.shape = [4, 1]
and should satisfy: len(x1.lod[0]) == len(x2.lod[0])
output is LoDTensor:
out.lod = [[0, 3+2, 5+4]]
out.data = [[1], [2], [3], [6], [7], [4], [5], [8], [9]]
out.shape = [9, 1]
Args:
input(list of Variable): List of LoDTensor to be concatenated. The length of each LoDTensor should be same.
The data type can be float32, float64 or int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: Output the concatenated LoDTensor. The data type is same as input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[-1, 10], dtype='float32', lod_level=1)
y = paddle.static.data(name='y', shape=[-1, 10], dtype='float32', lod_level=1)
out = paddle.static.nn.sequence_concat(input=[x, y])
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_concat', **locals())
check_type(input, 'input', list, 'fluid.layers.sequence_concat')
for i, input_x in enumerate(input):
check_variable_and_dtype(input_x, 'input[' + str(i) + ']',
['int64', 'float32', 'float64'],
'fluid.layers.sequence_concat')
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='sequence_concat',
inputs={'X': input},
outputs={'Out': [out]})
return out
def sequence_first_step(input):
"""
:api_attr: Static Graph
This operator only supports LoDTensor as input. Given the input LoDTensor, it will
select first time-step feature of each sequence as output.
.. code-block:: text
Case 1:
input is 1-level LoDTensor:
input.lod = [[0, 2, 5, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
output is a LoDTensor:
out.shape = [3, 1]
out.shape[0] == len(x.lod[-1]) == 3
out.data = [[1.], [2.], [5.]], where 1.=first(1., 3.), 2.=first(2., 4., 6.), 5.=first(5., 1.)
Case 2:
input is a 2-level LoDTensor containing 3 sequences with length info [2, 0, 3],
where 0 means empty sequence.
The first sequence contains 2 subsequence with length info [1, 2];
The last sequence contains 3 subsequence with length info [1, 0, 3].
input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
It will apply pooling on last lod_level [0, 1, 3, 4, 4, 7]. pad_value = 0.0
output is a LoDTensor:
out.shape= [5, 1]
out.lod = [[0, 2, 2, 5]]
out.shape[0] == len(x.lod[-1]) == 5
out.data = [[1.], [3.], [4.], [0.0], [6.]]
where 1.=first(1.), 3.=first(3., 2.), 4.=first(4.), 0.0 = pad_value, 6.=first(6., 5., 1.)
Args:
input(Variable): LoDTensor with lod_level no more than 2. The data type should be float32 or float64.
Returns:
Variable: LoDTensor consist of the sequence's first step vector. The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[None, 10], dtype='float32', lod_level=1)
x_first_step = paddle.static.nn.sequence_first_step(input=x)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'sequence_first_step')
return sequence_pool(input=input, pool_type="first")
def sequence_last_step(input):
"""
:api_attr: Static Graph
This operator only supports LoDTensor as input. Given the input LoDTensor, it will
select last time-step feature of each sequence as output.
.. code-block:: text
Case 1:
input is 1-level LoDTensor:
input.lod = [[0, 2, 5, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
output is a LoDTensor:
out.shape = [3, 1]
out.shape[0] == len(x.lod[-1]) == 3
out.data = [[3.], [6.], [1.]], where 3.=last(1., 3.), 6.=last(2., 4., 6.), 1.=last(5., 1.)
Case 2:
input is a 2-level LoDTensor containing 3 sequences with length info [2, 0, 3],
where 0 means empty sequence.
The first sequence contains 2 subsequence with length info [1, 2];
The last sequence contains 3 subsequence with length info [1, 0, 3].
input.lod = [[0, 2, 2, 5], [0, 1, 3, 4, 4, 7]]
input.data = [[1.], [3.], [2.], [4.], [6.], [5.], [1.]]
input.shape = [7, 1]
It will apply pooling on last lod_level [0, 1, 3, 4, 4, 7]. pad_value = 0.0
output is a LoDTensor:
out.shape= [5, 1]
out.lod = [[0, 2, 2, 5]]
out.shape[0] == len(x.lod[-1]) == 5
out.data = [[1.], [2.], [4.], [0.0], [1.]]
where 1.=last(1.), 2.=last(3., 2.), 4.=last(4.), 0.0 = pad_value, 1=last(6., 5., 1.)
Args:
input(Variable): LoDTensor with lod_level no more than 2. The data type should be float32.
Returns:
Variable: LoDTensor consist of the sequence's last step vector. The data type is float32.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[None, 10], dtype='float32', lod_level=1)
x_last_step = paddle.static.nn.sequence_last_step(input=x)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'sequence_last_step')
return sequence_pool(input=input, pool_type="last")
def sequence_slice(input, offset, length, name=None):
"""
:api_attr: Static Graph
**Sequence Slice Layer**
The layer crops a subsequence from given sequence with given start
offset and subsequence length.
It only supports sequence data (LoDTensor with lod_level equal to 1).
.. code-block:: text
- Case:
Given the input Variable **input**:
input.data = [[a1, a2], [b1, b2], [c1, c2], [d1, d2], [e1, e2]],
input.lod = [[3, 2]],
input.dims = (5, 2),
with offset.data = [[0], [1]] and length.data = [[2], [1]],
the output Variable will be
out.data = [[a1, a2], [b1, b2], [e1, e2]],
out.lod = [[2, 1]],
out.dims = (3, 2).
Note:
The first dimension size of **input**, **offset** and **length**
should be equal. The **offset** should start from 0.
Args:
input(Variable): LoDTensor, The input Variable which consists of the complete
sequences.The data type can be float32, float64, int32 or int64
offset(Variable): LoDTensor, The offset to slice each sequence. The data
type is int32 or int64.
length(Variable): LoDTensor, The length of each subsequence. The data
type is int32 or int64.
name(str|None): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: The output subsequences.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import numpy as np
seqs = paddle.static.data(name='x', shape=[10, 5],
dtype='float32', lod_level=1)
offset = paddle.assign(np.array([[0, 1]]).astype("int32"))
length = paddle.assign(np.array([[2, 1]]).astype("int32"))
subseqs = paddle.static.nn.sequence_slice(input=seqs, offset=offset,
length=length)
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_slice", **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64'],
'sequence_slice')
check_variable_and_dtype(offset, 'offset', ['int32', 'int64'],
'sequence_slice')
check_variable_and_dtype(length, 'length', ['int32', 'int64'],
'sequence_slice')
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
offset.stop_gradient = True
length.stop_gradient = True
helper.append_op(type="sequence_slice",
inputs={
"X": input,
"Offset": offset,
"Length": length
},
outputs={"Out": out})
return out
def sequence_expand(x, y, ref_level=-1, name=None):
r"""
:api_attr: Static Graph
Sequence Expand Layer. This layer will expand the input variable ``x`` \
according to specified level ``ref_level`` lod of ``y``. Please note that \
the lod level of ``x`` is at most 1. If the lod level of ``x`` is 1, than \
the size of lod of ``x`` must be equal to the length of ``ref_level`` lod \
of ``y``. If the lod level of ``x`` is 0, then the first dim of ``x`` should \
be equal to the size of ``ref_level`` of ``y``. The rank of **x** is at least 2. \
When rank of ``x`` is greater than 2, then it would be viewed as a 2-D tensor.
Please note that the input ``x`` should be LodTensor or Tensor, \
and input ``y`` must be LodTensor.
Following examples will explain how sequence_expand works:
.. code-block:: text
Case 1
Consider 2 sequences [a][b] and [c][d], now we want to expand them to [a][b], [a][b], [c][d] and [c][d].
Sequence [a][b] expand twice and [c][d] expands twice, so the lod which according to is [2, 2].
Input x is a 1-level LoDTensor:
x.lod = [[2, 2]] #lod based on length may be easier to understand
x.data = [[a], [b], [c], [d]]
x.dims = [4, 1]
input y is a LoDTensor:
y.lod = [[2, 2], #the 0th level lod, according to this level
[3, 3, 1, 1]] #the 1st level lod, it has nothing to do with this level
ref_level: 0
then output is a 1-level LoDTensor out:
out.lod = [[2, 2, 2, 2]] #lod based on offset
out.data = [[a], [b], [a], [b], [c], [d], [c], [d]]
out.dims = [8, 1]
Case 2
Consider 3 sequences [a], [b], [c], now we want to expand them to [a][a], [c][c][c].
It's obvious that the lod info of expanded sequences is [2, 0, 3].
x is a Tensor:
x.data = [[a], [b], [c]]
x.dims = [3, 1]
y is a LoDTensor:
y.lod = [[2, 0, 3]]
ref_level: -1
then output is a 1-level LodTensor:
out.data = [[a], [a], [c], [c], [c]]
out.dims = [5, 1]
Args:
x (Variable): The input variable which is a Tensor or LoDTensor, with the \
dims ``[M, K]``. The lod level is at most 1. The data type should be \
float32, float64, int32 or int64.
y (Variable): The input variable which is a LoDTensor, the lod level is \
at least 1.
ref_level (int): Lod level of ``y`` to be referred by ``x``. If set to -1, \
refer the last level of lod.
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The expanded variable which is a LoDTensor, with dims ``[N, K]``. \
``N`` depends on the lod info of ``x`` and ``y``. \
The data type is same as input.
Return Type: Variable
Examples:
.. code-block:: python
import paddle
from paddle import fluid
paddle.enable_static()
import numpy as np
x = paddle.static.data(name='x', shape=[4, 1], dtype='float32')
y = paddle.static.data(name='y', shape=[8, 1],
dtype='float32', lod_level=1)
out = paddle.static.nn.sequence_expand(x=x, y=y, ref_level=0)
exe = paddle.static.Executor(fluid.CPUPlace())
place = paddle.CPUPlace()
np_data = np.array([[1], [2], [3], [4]]).astype('float32')
x_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2]], place)
print(x_lod_tensor)
#lod: [[0, 2, 4]]
# dim: 4, 1
# layout: NCHW
# dtype: float
# data: [1 2 3 4]
np_data = np.array([[1], [2], [3], [4], [5], [6], [7], [8]]).astype('float32')
y_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2], [3,3,1,1]], place)
print(y_lod_tensor)
#lod: [[0, 2, 4][0, 3, 6, 7, 8]]
# dim: 8, 1
# layout: NCHW
# dtype: int64_t
# data: [0 0 1 1 1 1 1 0]
out_main = exe.run(fluid.default_main_program(),
feed={'x': x_lod_tensor, 'y': y_lod_tensor},
fetch_list=[out], return_numpy=False)
print(out_main[0])
#lod: [[0, 2, 4, 6, 8]]
# dim: 8, 1
# layout: NCHW
# dtype: float
# data: [1 2 1 2 3 4 3 4]
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'sequence_expand')
helper = LayerHelper('sequence_expand', **locals())
dtype = helper.input_dtype(input_param_name='x')
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='sequence_expand',
inputs={
'X': x,
'Y': y
},
outputs={'Out': tmp},
attrs={'ref_level': ref_level})
return tmp
def sequence_expand_as(x, y, name=None):
r"""
:api_attr: Static Graph
Sequence Expand As Layer. This OP will expand the input variable ``x`` \
according to the zeroth level lod of ``y``. Current implementation requires \
the level number of ``y``'s lod must be 1, and the first dimension of \
``x`` should be equal to the size of ``y``'s zeroth level lod, thus \
the expanded LodTensor has the same lod info as ``y``. The expanded result \
has nothing to do with ``x``'s lod, so the lod of Input(X) is not considered.
Please note that the input ``x`` should be LodTensor or Tensor, \
and input ``y`` must be LodTensor.
Following examples will explain how sequence_expand_as works:
.. code-block:: text
Case 1:
Consider 4 sequences [a], [b], [c], [d], now we want to expand them to [a][a][a], [b][b][b], [c] and [d].
It's obvious that the lod info of expanded sequences is [0, 3, 6, 7, 8].
Given a 1-level LodTensor ``x``:
x.data = [[a], [b], [c], [d]]
x.dims = [4, 1]
and input ``y``
y.lod = [[3, 3, 1, 1]] #lod based on length may be easier to understand
then we get 1-level LoDTensor out:
Out.lod = [[0, 3, 6, 7, 8]] #based on offset
Out.data = [[a], [a], [a], [b], [b], [b], [c], [d]]
Out.dims = [8, 1]
Case 2:
Given a common Tensor ``x``:
x.data = [[a, b], [c, d], [e, f]]
x.dims = [3, 2]
and input ``y``:
y.lod = [[0, 2, 3, 6]]
then we get a 1-level LoDTensor:
out.lod = [[0, 2, 3, 6]]
out.data = [[a, b], [a, b] [c, d], [e, f], [e, f], [e, f]]
out.dims = [6, 2]
Args:
x (Variable): The input variable which is a Tensor or LoDTensor, with the \
dims ``[M, K]``. The data type should be float32, float64, int32 \
or int64.
y (Variable): The input variable which is a LoDTensor with 1-level lod.
name (str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The expanded variable which is a LoDTensor with the dims ``[N, K]``. \
``N`` depends on the lod of ``y``, and the lod level must be 1. \
The data type is same as input.
Return Type: Variable
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
import numpy as np
x = paddle.static.data(name='x', shape=[4, 1], dtype='float32')
y = paddle.static.data(name='y', shape=[8, 1], dtype='float32', lod_level=1)
out = paddle.static.nn.sequence_expand_as(x=x, y=y)
exe = fluid.Executor(fluid.CPUPlace())
place = fluid.CPUPlace()
np_data = np.array([[1], [2], [3], [4]]).astype('float32')
x_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2]], place)
print(x_lod_tensor)
#lod: [[0, 2, 4]]
# dim: 4, 1
# layout: NCHW
# dtype: float
# data: [1 2 3 4]
np_data = np.array([[1], [2], [3], [4], [5], [6], [7], [8]]).astype('float32')
y_lod_tensor = fluid.create_lod_tensor(np_data, [[3,3,1,1]], place)
print(y_lod_tensor)
#lod: [[0, 3, 6, 7, 8]]
# dim: 8, 1
# layout: NCHW
# dtype: int64_t
# data: [0 0 1 0 1 1 1 0]
out_main = exe.run(fluid.default_main_program(),
feed={'x': x_lod_tensor, 'y': y_lod_tensor},
fetch_list=[out], return_numpy=False)
print(out_main[0])
#lod: [[0, 3, 6, 7, 8]]
# dim: 8, 1
# layout: NCHW
# dtype: float
# data: [1 1 1 2 2 2 3 4]
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'sequence_expand_as')
check_type(y, 'y', Variable, 'sequence_expand_as')
helper = LayerHelper('sequence_expand_as', **locals())
dtype = helper.input_dtype(input_param_name='x')
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='sequence_expand_as',
inputs={
'X': x,
'Y': y
},
outputs={'Out': tmp})
return tmp
def sequence_pad(x, pad_value, maxlen=None, name=None):
r"""
:api_attr: Static Graph
This layer padding the sequences in a same batch to a common length (according
to ``maxlen``). The padding value is defined by ``pad_value``, and will be
appended to the tail of sequences. The result is a Python tuple ``(Out, Length)``:
the LodTensor ``Out`` is the padded sequences, and LodTensor ``Length`` is
the length information of input sequences. For removing padding data (unpadding operation), See :ref:`api_fluid_layers_sequence_unpad`.
Please note that the input ``x`` should be LodTensor.
.. code-block:: text
Case 1:
Given input 1-level LoDTensor x:
x.lod = [[0, 2, 5]]
x.data = [[a],[b],[c],[d],[e]]
pad_value:
pad_value.data = [0]
maxlen = 4
the output tuple (Out, Length):
Out.data = [[[a],[b],[0],[0]],[[c],[d],[e],[0]]]
Length.data = [2, 3] #Original sequences length
Case 2:
Given input 1-level LoDTensor x:
x.lod = [[0, 2, 5]]
x.data = [[a1,a2],[b1,b2],[c1,c2],[d1,d2],[e1,e2]]
pad_value:
pad_value.data = [0]
default maxlen = None, (the virtual value is 3, according to the shape of x)
the output tuple (Out, Length):
Out.data = [[[a1,a2],[b1,b2],[0,0]],[[c1,c2],[d1,d2],[e1,e2]]]
Length.data = [2, 3]
Case 3:
Given input 1-level LoDTensor x:
x.lod = [[0, 2, 5]]
x.data = [[a1,a2],[b1,b2],[c1,c2],[d1,d2],[e1,e2]]
pad_value:
pad_value.data = [p1,p2]
default maxlen = None, (the virtual value is 3)
get tuple (Out, Length):
Out.data = [[[a1,a2],[b1,b2],[p1,p2]],[[c1,c2],[d1,d2],[e1,e2]]]
Length.data = [2, 3]
Args:
x (Variable): Input 1-level LodTensor with dims ``[M, K]``. The batch \
size is described by lod infor (the number of sequences ). \
The data type should be float32, float64, int8, int32 or int64.
pad_value (Variable): Padding value. It can be a scalar or a 1D tensor \
with length ``K``. If it's a scalar, it will be automatically broadcasted \
to a Tensor. The data type should be as same as ``x``.
maxlen (int, optional): The length of padded sequences, None by default. \
When it is None, all sequences will be padded up to the length of the \
longest one among them; when it a certain positive value, it must be \
greater than the length of the longest original sequence.
name (str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: A Python tuple (Out, Length): the 1st is a 0 level LodTensor \
``Out``, with the shape ``[batch_size, maxlen, K]``; the second is the original \
sequences length infor ``Length``, which should be a 0-level 1D LodTensor. \
The size of ``Length`` is equal to batch size, and the data type is int64.
Return Type: tuple
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.fluid as fluid
import numpy
x = paddle.static.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = paddle.assign(
numpy.array([0.0], dtype=numpy.float32))
out = paddle.static.nn.sequence_pad(x=x, pad_value=pad_value)
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_pad', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_pad')
check_variable_and_dtype(pad_value, 'pad_value',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_pad')
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
length = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
pad_value.stop_gradient = True
length.stop_gradient = True
if maxlen is None:
maxlen = -1
helper.append_op(type='sequence_pad',
inputs={
'X': x,
'PadValue': pad_value
},
outputs={
'Out': out,
'Length': length
},
attrs={'padded_length': maxlen})
return out, length
def sequence_unpad(x, length, name=None):
"""
:api_attr: Static Graph
**Note**:
**The input of the OP is Tensor and the output is LoDTensor. For padding operation, See:** :ref:`api_fluid_layers_sequence_pad`
The OP removes the padding data from the input based on the length information and returns a LoDTensor.
.. code-block:: text
Case 1:
Given input Variable **x**:
x.data = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0]],
in which there are 3 sequences padded to length 5, and the actual length
specified by input Variable **length**:
length.data = [2, 3, 4],
after unpadding, the output Variable will be:
out.data = [[1.0, 2.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0]]
out.lod = [[0, 2, 5, 9]]
Args:
x(Variable): A Tensor which contains padding data, and its shape size can not be less than 2.
Supported data types: float32, float64, int32, int64.
length(Variable): A 1D Tensor that stores the actual length of each sample, and the Tensor
has the same shape with the 0th dimension of the X . Supported data types: int64.
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A LoDTensor whose recursive sequence length is consistent with the information of the length parameter and it has the same data type with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.fluid as fluid
import numpy
# pad data
x = paddle.static.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = paddle.assign(numpy.array([0.0], dtype=numpy.float32))
pad_data, len = paddle.static.nn.sequence_pad(x=x, pad_value=pad_value)
# unpad data
unpad_data = paddle.static.nn.sequence_unpad(x=pad_data, length=len)
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_unpad', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_unpad')
check_variable_and_dtype(length, 'length', ['int64'],
'fluid.layers.sequence_unpad')
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
length.stop_gradient = True
helper.append_op(type='sequence_unpad',
inputs={
'X': x,
'Length': length
},
outputs={'Out': out})
return out
def sequence_reshape(input, new_dim):
"""
:api_attr: Static Graph
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use reshape Op.(fluid.layers.** :ref:`api_fluid_layers_reshape` ).
This operator only supports LoDTensor as input. Given :attr:`new_dim` ,
it will compute new shape according to original length of each sequence,
original dimensions and :attr:`new_dim` . Then it will output a new LoDTensor
containing :attr:`new_dim` . Currently it only supports 1-level LoDTensor.
Please make sure that (original length * original dimensions) can be divided
by the :attr:`new_dim` with no remainder for each sequence.
.. code-block:: text
input is a LoDTensor:
input.lod = [[0, 2, 6]]
input.data = [[1, 2], [3, 4],
[5, 6], [7, 8],
[9, 10], [11, 12]]
input.shape = [6, 2]
set new_dim = 4
out is a LoDTensor:
out.lod = [[0, 1, 3]]
out.data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out.shape = [3, 4]
Args:
input (Variable): 1-level LoDTensor with shape :math:`[M, K]` . The data type should
be int32, int64, float32 or float64.
new_dim (int): New dimension that the input LoDTensor is reshaped to.
Returns:
Variable: Reshaped LoDTensor according to new dimension. The data type is same as input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[None, 16], dtype='float32', lod_level=1)
x_reshaped = paddle.static.nn.sequence_reshape(input=x, new_dim=4)
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_reshape', **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_reshape')
out = helper.create_variable_for_type_inference(helper.input_dtype())
helper.append_op(type='sequence_reshape',
inputs={'X': [input]},
outputs={'Out': [out]},
attrs={'new_dim': new_dim})
return out
def sequence_scatter(input, index, updates, name=None):
"""
:api_attr: Static Graph
**Note**:
**The index and updates parameters of the OP must be LoDTensor.**
Plus the updates data to the corresponding input according to the index.
The updated algorithm is as follows: output[instance_index][index [pos]] = input[instance_index][index [pos]] + updates[pos],
where instance_idx is the K sample corresponding to pos in batch.
The value of output[i][j] depends on whether j can be found in the i+1th interval of the index. If found,
out[i][j] = input[i][j] + update[m] [n], otherwise, out[i][j] = input[i][j].
For example, in the following example, the lod information for index is divided into three sequences. Among
them, because the element 0 can be found in the first interval of the index, it is updated with the value of
the corresponding position of the updates, out[0][0] = input[0][0]+updates[0][0] . Because element 1 cannot
be found in the third interval of index, out[2][1] = input[2][1].
.. code-block:: text
*Case 1:
Given:
input.data = [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]
input.dims = [3, 6]
index.data = [[0], [1], [2], [5], [4], [3], [2], [1], [3], [2], [5], [4]]
index.lod = [[0, 3, 8, 12]]
updates.data = [[0.3], [0.3], [0.4], [0.1], [0.2], [0.3], [0.4], [0.0], [0.2], [0.3], [0.1], [0.4]]
updates.lod = [[ 0, 3, 8, 12]]
Then:
out.data = [[1.3, 1.3, 1.4, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.4, 1.3, 1.2, 1.1],
[1.0, 1.0, 1.3, 1.2, 1.4, 1.1]]
out.dims = X.dims = [3, 6]
Args:
input (Variable): A Tensor with shape of :math:`[N, k_1... k_n]`. Supported data types: float32, float64, int32, int64.
index (Variable): A LoDTensor contains index information. Its LoD level must be 1 and its data type can be int32 or int64.
updates (Variable): A LodTensor contains updates information. It has the same LoD level with the index and has the
same data type with the input. Supported data types: float32, float64, int32, int64.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A Tensor which has been updated. It has the same shape and data type with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
input = paddle.static.data(name="x", shape=[None, 3, 6], dtype='float32' )
index = paddle.static.data(name='index', shape=[12, 1], dtype='int64', lod_level=1)
updates = paddle.static.data(name='updates', shape=[12, 1], dtype='float32', lod_level=1)
output = paddle.static.nn.sequence_scatter(input, index, updates)
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_scatter', **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64'],
'sequence_scatter')
check_variable_and_dtype(index, 'index', ['int32', 'int64'],
'sequence_scatter')
check_variable_and_dtype(updates, 'updates',
['float32', 'float64', 'int32', 'int64'],
'sequence_scatter')
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="sequence_scatter",
inputs={
"X": input,
"Ids": index,
"Updates": updates
},
outputs={"Out": out})
return out
def sequence_enumerate(input, win_size, pad_value=0, name=None):
r"""
:api_attr: Static Graph
Generate a new sequence for the input index sequence with \
shape ``[d_1, win_size]``, which enumerates all the \
sub-sequences with length ``win_size`` of the input with \
shape ``[d_1, 1]``, and padded by ``pad_value`` if necessary in generation.
Please note that the `input` must be LodTensor.
.. code-block:: text
Input x:
x.lod = [[0, 3, 5]]
x.data = [[1], [2], [3], [4], [5]]
x.dims = [5, 1]
Attrs:
win_size = 2
pad_value = 0
Output:
out.lod = [[0, 3, 5]]
out.data = [[1, 2], [2, 3], [3, 0], [4, 5], [5, 0]]
out.dims = [5, 2]
Args:
input (Variable): The input variable which is a index sequence, \
which should be a LodTensor with shape ``[d_1, 1]`` and 1-level lod info. \
The data type should be int32 or int64.
win_size (int): The window size for enumerating all sub-sequences.
pad_value (int, optional): The padding value, default 0.
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The enumerate sequence variable which is a LoDTensor with \
shape ``[d_1, win_size]`` and 1-level lod info. \
The data type is same as ``input``.
Return Type: Variable
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[-1, 1], dtype='int32', lod_level=1)
out = paddle.static.nn.sequence_enumerate(input=x, win_size=3, pad_value=0)
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(input, 'input', ['int32', 'int64'],
'sequence_enumerate')
helper = LayerHelper('sequence_enumerate', **locals())
out = helper.create_variable_for_type_inference(helper.input_dtype(),
stop_gradient=True)
helper.append_op(type='sequence_enumerate',
inputs={'X': input},
outputs={'Out': out},
attrs={
'win_size': win_size,
'pad_value': pad_value
})
return out
def sequence_mask(x, maxlen=None, dtype='int64', name=None):
r"""
**SequenceMask Layer**
This layer outputs a mask according to the input :code:`x` and
:code:`maxlen` with data type of :code:`dtype`.
Supposing :code:`x` is a Tensor with shape [d_1, d_2, ..., d_n], the
:code:`y` is a mask with shape [d_1, d_2, ..., d_n, maxlen], where:
.. math::
y(i_1, i_2,..., i_n, j) = (j < x(i_1, i_2,..., i_n))
.. code-block:: text
Case:
Consider input:
x = [3, 1, 1, 0] max_len = 4
then we get out:
mask = [[1, 1, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]]
Args:
x (Variable): Input tensor of sequence_mask layer, \
whose elements are integers less than :code:`maxlen`. \
Tensor or LodTensor with shape [d_1, d_2, ..., d_n].
maxlen (int, optional): Maximum length of the sequence. If :code:`maxlen` \
is None, it would be replace with :math:`max(x)`.
dtype (np.dtype|paddle.dtype|str, optional): Data type of the output, \
``int64`` by default.
name(str, optional): For detailed information, please refer \
to :ref:`api_guide_Name`. Usually name is no need to set and \
None by default.
Returns: The output sequence mask. Tensor with shape [d_1, d_2, ..., d_n, maxlen] \
and data type of :code:`dtype`. The data type should be bool, float32, float64, int8, \
int32 or int64.
Return Type: Tensor
Examples:
.. code-block:: python
import paddle
lengths = paddle.to_tensor([10, 9, 8])
mask = paddle.nn.functional.sequence_mask(lengths)
print(mask.numpy())
# [[1 1 1 1 1 1 1 1 1 1]
# [1 1 1 1 1 1 1 1 1 0]
# [1 1 1 1 1 1 1 1 0 0]]
"""
return paddle.nn.functional.sequence_mask(x, maxlen, dtype, name)
@templatedoc()
def sequence_reverse(x, name=None):
"""
**Notes: The Op only receives LoDTensor as input. If your input is Tensor, please use reverse Op.(fluid.layers.** :ref:`api_fluid_layers_reverse` ).
This operator only supports LoDTensor as input. It will reverse each sequence for input LoDTensor.
Currently it only supports 1-level LoDTensor. This operator is very useful when building a
reverse :ref:`api_fluid_layers_DynamicRNN` network.
.. code-block:: text
input(x) is a LoDTensor:
x.lod = [[0, 2, 5]]
x.data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13,14, 15, 16],
[17,18, 19, 20]]
x.shape = [5, 4]
output LoDTensor with same shape and LoD info:
out.lod = [[0, 2, 5]]
out.data = [[5, 6, 7, 8],
[1, 2, 3, 4],
[17,18, 19, 20],
[13,14, 15, 16],
[9, 10, 11, 12]]
out.shape = [5, 4]
Args:
x(Variable): LoDTensor with 1-level LoD info. Currently it only supports 1-level LoDTensor.
The data type should be float32, float64, int8, int32 or int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: LoDTensor reversed from input. The data type is same with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[None, 10], dtype='float32', lod_level=1)
x_reversed = paddle.static.nn.sequence_reverse(x)
"""
assert not _non_static_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper("sequence_reverse", **locals())
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'int8', 'int32', 'int64'],
'fluid.layers.sequence_reverse')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="sequence_reverse",
inputs={"X": x},
outputs={"Y": out},
attrs=dict())
return out
| 40.874486 | 162 | 0.545801 |
4a213abf2882edb3eab0db456d246f2f9011c0c2
| 685 |
py
|
Python
|
carpyncho2/webproject/migrations/0005_auto_20161025_2357.py
|
carpyncho/yeolde_carpyncho
|
fba72ebf9d4a3e4e4ea18160310058c6812a0457
|
[
"BSD-3-Clause"
] | null | null | null |
carpyncho2/webproject/migrations/0005_auto_20161025_2357.py
|
carpyncho/yeolde_carpyncho
|
fba72ebf9d4a3e4e4ea18160310058c6812a0457
|
[
"BSD-3-Clause"
] | 2 |
2020-06-05T19:37:26.000Z
|
2020-06-05T19:40:38.000Z
|
carpyncho2/webproject/migrations/0005_auto_20161025_2357.py
|
carpyncho/yeolde_carpyncho
|
fba72ebf9d4a3e4e4ea18160310058c6812a0457
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-10-25 23:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webproject', '0004_auto_20160228_2217'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='affiliation',
field=models.CharField(help_text=b'', max_length=500),
),
migrations.AlterField(
model_name='profile',
name='note',
field=models.TextField(help_text=b'Why do you want a Capyncho user? (or any other commentary)'),
),
]
| 26.346154 | 108 | 0.608759 |
4a213b966f2b5fb377bb724c4bb3f4bd04bcb57c
| 23,503 |
py
|
Python
|
tests/test_modules/test_ADCore/test_hdfwriterpart.py
|
thomascobb/pymalcolm
|
801f8fe6217c0c028b5edc87fa0aef9d60b91d9d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modules/test_ADCore/test_hdfwriterpart.py
|
thomascobb/pymalcolm
|
801f8fe6217c0c028b5edc87fa0aef9d60b91d9d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modules/test_ADCore/test_hdfwriterpart.py
|
thomascobb/pymalcolm
|
801f8fe6217c0c028b5edc87fa0aef9d60b91d9d
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
from xml.etree import ElementTree
import cothread
from mock import MagicMock, call, patch
from scanpointgenerator import CompoundGenerator, LineGenerator, SpiralGenerator
from malcolm.core import Context, Future, Process, TimeoutError
from malcolm.modules.ADCore.blocks import hdf_writer_block
from malcolm.modules.ADCore.infos import (
CalculatedNDAttributeDatasetInfo,
FilePathTranslatorInfo,
NDArrayDatasetInfo,
NDAttributeDatasetInfo,
)
from malcolm.modules.ADCore.parts import HDFWriterPart
from malcolm.modules.ADCore.parts.hdfwriterpart import greater_than_zero
from malcolm.modules.ADCore.util import AttributeDatasetType
from malcolm.modules.builtin.defines import tmp_dir
from malcolm.modules.scanning.controllers import RunnableController
from malcolm.modules.scanning.util import DatasetType
from malcolm.testutil import ChildTestCase
expected_xml = (
'<?xml version="1.0" ?>\n'
"<hdf5_layout>\n"
'<group name="entry">\n'
'<attribute name="NX_class" source="constant" type="string" value="NXentry" />\n'
'<group name="detector">\n'
'<attribute name="signal" source="constant" type="string" value="detector" />\n'
'<attribute name="axes" source="constant" type="string" '
'value="energy_set,.,.,." />\n'
'<attribute name="NX_class" source="constant" type="string" value="NXdata" />\n'
'<attribute name="energy_set_indices" source="constant" type="string" '
'value="0" />\n'
'<dataset name="energy_set" source="constant" type="float" value="13,15.2">\n'
'<attribute name="units" source="constant" type="string" value="kEv" />\n'
"</dataset>\n"
'<attribute name="x_set_indices" source="constant" type="string" value="1" />\n'
'<dataset name="x_set" source="constant" type="float" '
'value="0.473264298891,-1.28806365331,-1.11933765723,0.721339144968,2.26130106714,'
"2.3717213098,1.08574712174,-0.863941392256,-2.59791589857,-3.46951769442,"
"-3.22399679412,-1.98374931946,-0.132541097885,1.83482458567,3.45008680308,"
'4.36998121172,4.42670524204,3.63379270355,2.15784413199,0.269311496406">\n'
'<attribute name="units" source="constant" type="string" value="mm" />\n'
"</dataset>\n"
'<attribute name="y_set_indices" source="constant" type="string" value="1" />\n'
'<dataset name="y_set" source="constant" type="float" value="-0.64237113553,'
"-0.500750778455,1.38930992616,1.98393756064,0.784917470231,-1.17377831157,"
"-2.66405897615,-2.9669684623,-2.01825893141,-0.24129368636,1.72477821509,"
"3.27215424484,3.98722048131,3.71781556747,2.5610299588,0.799047653518,"
'-1.18858453138,-3.01284626565,-4.34725663835,-4.9755042398">\n'
'<attribute name="units" source="constant" type="string" value="mm" />\n'
"</dataset>\n"
'<dataset det_default="true" name="detector" source="detector">\n'
'<attribute name="NX_class" source="constant" type="string" value="SDS" />\n'
"</dataset>\n"
"</group>\n"
'<group name="sum">\n'
'<attribute name="signal" source="constant" type="string" value="sum" />\n'
'<attribute name="axes" source="constant" type="string" '
'value="energy_set,.,.,." />\n'
'<attribute name="NX_class" source="constant" type="string" value="NXdata" />\n'
'<attribute name="energy_set_indices" source="constant" type="string" '
'value="0" />\n'
'<hardlink name="energy_set" target="/entry/detector/energy_set" />\n'
'<attribute name="x_set_indices" source="constant" type="string" value="1" />\n'
'<hardlink name="x_set" target="/entry/detector/x_set" />\n'
'<attribute name="y_set_indices" source="constant" type="string" value="1" />\n'
'<hardlink name="y_set" target="/entry/detector/y_set" />\n'
'<dataset name="sum" ndattribute="StatsTotal" source="ndattribute" />\n'
"</group>\n"
'<group name="I0.data">\n'
'<attribute name="signal" source="constant" type="string" value="I0.data" />\n'
'<attribute name="axes" source="constant" type="string" '
'value="energy_set,.,.,." />\n'
'<attribute name="NX_class" source="constant" type="string" value="NXdata" />\n'
'<attribute name="energy_set_indices" source="constant" type="string" '
'value="0" />\n'
'<hardlink name="energy_set" target="/entry/detector/energy_set" />\n'
'<attribute name="x_set_indices" source="constant" type="string" value="1" />\n'
'<hardlink name="x_set" target="/entry/detector/x_set" />\n'
'<attribute name="y_set_indices" source="constant" type="string" value="1" />\n'
'<hardlink name="y_set" target="/entry/detector/y_set" />\n'
'<dataset name="I0.data" ndattribute="COUNTER1.COUNTER" source="ndattribute" />\n'
"</group>\n"
'<group name="It.data">\n'
'<attribute name="signal" source="constant" type="string" value="It.data" />\n'
'<attribute name="axes" source="constant" type="string" '
'value="energy_set,.,.,." />\n'
'<attribute name="NX_class" source="constant" type="string" value="NXdata" />\n'
'<attribute name="energy_set_indices" source="constant" type="string" '
'value="0" />\n'
'<hardlink name="energy_set" target="/entry/detector/energy_set" />\n'
'<attribute name="x_set_indices" source="constant" type="string" value="1" />\n'
'<hardlink name="x_set" target="/entry/detector/x_set" />\n'
'<attribute name="y_set_indices" source="constant" type="string" value="1" />\n'
'<hardlink name="y_set" target="/entry/detector/y_set" />\n'
'<dataset name="It.data" ndattribute="COUNTER2.COUNTER" source="ndattribute" />\n'
"</group>\n"
'<group name="t1x.value">\n'
'<attribute name="signal" source="constant" type="string" value="t1x.value" />\n'
'<attribute name="axes" source="constant" type="string" '
'value="energy_set,.,.,." />\n'
'<attribute name="NX_class" source="constant" type="string" value="NXdata" />\n'
'<attribute name="energy_set_indices" source="constant" type="string" '
'value="0" />\n'
'<hardlink name="energy_set" target="/entry/detector/energy_set" />\n'
'<attribute name="x_set_indices" source="constant" type="string" value="1" />\n'
'<hardlink name="x_set" target="/entry/detector/x_set" />\n'
'<attribute name="y_set_indices" source="constant" type="string" value="1" />\n'
'<hardlink name="y_set" target="/entry/detector/y_set" />\n'
'<dataset name="t1x.value" ndattribute="INENC1.VAL" source="ndattribute" />\n'
"</group>\n"
'<group name="NDAttributes" ndattr_default="true">\n'
'<attribute name="NX_class" source="constant" type="string" '
'value="NXcollection" />\n'
'<dataset name="NDArrayUniqueId" ndattribute="NDArrayUniqueId" '
'source="ndattribute" />\n'
'<dataset name="NDArrayTimeStamp" ndattribute="NDArrayTimeStamp" '
'source="ndattribute" />\n'
"</group>\n"
"</group>\n"
"</hdf5_layout>\n"
)
expected_xml_limited_attr = expected_xml.replace(
'<group name="NDAttributes" ndattr_default="true">',
'<group name="NDAttributes" ndattr_default="false">',
)
class TestHDFWriterPart(ChildTestCase):
maxDiff = None
def setUp(self):
self.process = Process("Process")
self.context = Context(self.process)
self.child = self.create_child_block(
hdf_writer_block, self.process, mri="BLOCK:HDF5", prefix="prefix"
)
self.config_dir = tmp_dir("config_dir")
self.process.start()
def tearDown(self):
self.process.stop(2)
shutil.rmtree(self.config_dir.value)
def test_init(self):
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
self.context.set_notify_dispatch_request(self.o.notify_dispatch_request)
c = RunnableController("mri", self.config_dir.value)
c.add_part(self.o)
self.process.add_controller(c)
b = c.block_view()
assert list(b.configure.meta.takes.elements) == [
"generator",
"fileDir",
"axesToMove",
"breakpoints",
"formatName",
"fileTemplate",
]
@patch("malcolm.modules.ADCore.parts.hdfwriterpart.check_driver_version")
def test_validate(self, check_mock):
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
# Version check should not be called with require_version None
self.o.require_version = None
self.set_attributes(self.child, driverVersion="1.1")
self.o.on_validate(self.context)
check_mock.assert_not_called()
# Test version check called if required_version not None
self.o.required_version = "1.0"
self.o.on_validate(self.context)
check_mock.assert_called_once_with("1.1", "1.0")
def configure_and_check_output(self, on_windows=False):
energy = LineGenerator("energy", "kEv", 13.0, 15.2, 2)
spiral = SpiralGenerator(["x", "y"], ["mm", "mm"], [0.0, 0.0], 5.0, scale=2.0)
generator = CompoundGenerator([energy, spiral], [], [], 0.1)
generator.prepare()
fileDir = "/tmp"
formatName = "xspress3"
fileTemplate = "thing-%s.h5"
completed_steps = 0
steps_to_do = 38
part_info = {
"DET": [NDArrayDatasetInfo(2)],
"PANDA": [
NDAttributeDatasetInfo.from_attribute_type(
"I0", AttributeDatasetType.DETECTOR, "COUNTER1.COUNTER"
),
NDAttributeDatasetInfo.from_attribute_type(
"It", AttributeDatasetType.MONITOR, "COUNTER2.COUNTER"
),
NDAttributeDatasetInfo.from_attribute_type(
"t1x", AttributeDatasetType.POSITION, "INENC1.VAL"
),
],
"STAT": [CalculatedNDAttributeDatasetInfo("sum", "StatsTotal")],
}
if on_windows:
part_info["WINPATH"] = [FilePathTranslatorInfo("Y", "/tmp", "")]
infos = self.o.on_configure(
self.context,
completed_steps,
steps_to_do,
part_info,
generator,
fileDir,
formatName,
fileTemplate,
)
assert len(infos) == 8
assert infos[0].name == "xspress3.data"
assert infos[0].filename == "thing-xspress3.h5"
assert infos[0].type == DatasetType.PRIMARY
assert infos[0].rank == 4
assert infos[0].path == "/entry/detector/detector"
assert infos[0].uniqueid == "/entry/NDAttributes/NDArrayUniqueId"
assert infos[1].name == "xspress3.sum"
assert infos[1].filename == "thing-xspress3.h5"
assert infos[1].type == DatasetType.SECONDARY
assert infos[1].rank == 2
assert infos[1].path == "/entry/sum/sum"
assert infos[1].uniqueid == "/entry/NDAttributes/NDArrayUniqueId"
assert infos[2].name == "I0.data"
assert infos[2].filename == "thing-xspress3.h5"
assert infos[2].type == DatasetType.PRIMARY
assert infos[2].rank == 2
assert infos[2].path == "/entry/I0.data/I0.data"
assert infos[2].uniqueid == "/entry/NDAttributes/NDArrayUniqueId"
assert infos[3].name == "It.data"
assert infos[3].filename == "thing-xspress3.h5"
assert infos[3].type == DatasetType.MONITOR
assert infos[3].rank == 2
assert infos[3].path == "/entry/It.data/It.data"
assert infos[3].uniqueid == "/entry/NDAttributes/NDArrayUniqueId"
assert infos[4].name == "t1x.value"
assert infos[4].filename == "thing-xspress3.h5"
assert infos[4].type == DatasetType.POSITION_VALUE
assert infos[4].rank == 2
assert infos[4].path == "/entry/t1x.value/t1x.value"
assert infos[4].uniqueid == "/entry/NDAttributes/NDArrayUniqueId"
assert infos[5].name == "energy.value_set"
assert infos[5].filename == "thing-xspress3.h5"
assert infos[5].type == DatasetType.POSITION_SET
assert infos[5].rank == 1
assert infos[5].path == "/entry/detector/energy_set"
assert infos[5].uniqueid == ""
assert infos[6].name == "x.value_set"
assert infos[6].filename == "thing-xspress3.h5"
assert infos[6].type == DatasetType.POSITION_SET
assert infos[6].rank == 1
assert infos[6].path == "/entry/detector/x_set"
assert infos[6].uniqueid == ""
assert infos[7].name == "y.value_set"
assert infos[7].filename == "thing-xspress3.h5"
assert infos[7].type == DatasetType.POSITION_SET
assert infos[7].rank == 1
assert infos[7].path == "/entry/detector/y_set"
assert infos[7].uniqueid == ""
expected_xml_filename_local = "/tmp/BLOCK_HDF5-layout.xml"
if on_windows:
expected_xml_filename_remote = "Y:\\BLOCK_HDF5-layout.xml"
expected_filepath = "Y:" + os.sep
else:
expected_xml_filename_remote = expected_xml_filename_local
expected_filepath = "/tmp" + os.sep
# Wait for the start_future so the post gets through to our child
# even on non-cothread systems
self.o.start_future.result(timeout=1)
assert self.child.handled_requests.mock_calls == [
call.put("positionMode", True),
call.put("arrayCounter", 0),
call.put("dimAttDatasets", True),
call.put("enableCallbacks", True),
call.put("fileName", "xspress3"),
call.put("filePath", expected_filepath),
call.put("fileTemplate", "%sthing-%s.h5"),
call.put("fileWriteMode", "Stream"),
call.put("lazyOpen", True),
call.put("storeAttr", True),
call.put("swmrMode", True),
call.put("extraDimSize3", 1),
call.put("extraDimSize4", 1),
call.put("extraDimSize5", 1),
call.put("extraDimSize6", 1),
call.put("extraDimSize7", 1),
call.put("extraDimSize8", 1),
call.put("extraDimSize9", 1),
call.put("extraDimSizeN", 20),
call.put("extraDimSizeX", 2),
call.put("extraDimSizeY", 1),
call.put("numExtraDims", 1),
call.put("posNameDim3", ""),
call.put("posNameDim4", ""),
call.put("posNameDim5", ""),
call.put("posNameDim6", ""),
call.put("posNameDim7", ""),
call.put("posNameDim8", ""),
call.put("posNameDim9", ""),
call.put("posNameDimN", "d1"),
call.put("posNameDimX", "d0"),
call.put("posNameDimY", ""),
call.put("flushAttrPerNFrames", 0),
call.put("flushDataPerNFrames", 38),
call.put("xmlLayout", expected_xml_filename_remote),
call.put("numCapture", 0),
call.post("start"),
call.when_value_matches("arrayCounterReadback", greater_than_zero, None),
]
with open(expected_xml_filename_local) as f:
actual_xml = f.read().replace(">", ">\n")
# Check the layout filename Malcolm uses for file creation
assert self.o.layout_filename == expected_xml_filename_local
return actual_xml
@staticmethod
def mock_xml_is_valid_check(part):
mock_xml_layout_value = MagicMock(name="mock_xml_layout_value")
mock_xml_layout_value.return_value = True
part._check_xml_is_valid = mock_xml_layout_value
def test_configure(self):
self.mock_when_value_matches(self.child)
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
self.mock_xml_is_valid_check(self.o)
self.context.set_notify_dispatch_request(self.o.notify_dispatch_request)
actual_xml = self.configure_and_check_output()
assert actual_xml == expected_xml
actual_tree = ElementTree.XML(actual_xml)
expected_tree = ElementTree.XML(expected_xml)
assert ElementTree.dump(actual_tree) == ElementTree.dump(expected_tree)
def test_honours_write_all_attributes_flag(self):
self.mock_when_value_matches(self.child)
self.o = HDFWriterPart(
name="m", mri="BLOCK:HDF5", write_all_nd_attributes=False
)
self.mock_xml_is_valid_check(self.o)
self.context.set_notify_dispatch_request(self.o.notify_dispatch_request)
actual_xml = self.configure_and_check_output()
actual_tree = ElementTree.XML(actual_xml)
expected_tree = ElementTree.XML(expected_xml)
assert ElementTree.dump(actual_tree) == ElementTree.dump(expected_tree)
def test_configure_windows(self):
self.mock_when_value_matches(self.child)
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5", runs_on_windows=True)
self.mock_xml_is_valid_check(self.o)
self.context.set_notify_dispatch_request(self.o.notify_dispatch_request)
actual_xml = self.configure_and_check_output(on_windows=True)
assert actual_xml == expected_xml
actual_tree = ElementTree.XML(actual_xml)
expected_tree = ElementTree.XML(expected_xml)
assert ElementTree.dump(actual_tree) == ElementTree.dump(expected_tree)
def test_run(self):
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
self.context.set_notify_dispatch_request(self.o.notify_dispatch_request)
self.o.done_when_captured = 38
# Need a registrar object or we get AssertionError
self.o.registrar = MagicMock()
# Run waits for this value, so say we have finished immediately
self.set_attributes(self.child, numCapturedReadback=self.o.done_when_captured)
self.mock_when_value_matches(self.child)
# Run
self.o.on_run(self.context)
# Check calls
assert self.child.handled_requests.mock_calls == [
call.when_value_matches("numCapturedReadback", 38, None)
]
assert self.o.registrar.report.called_once
assert self.o.registrar.report.call_args_list[0][0][0].steps == 38
def test_run_and_flush(self):
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
def set_num_captured():
# Sleep for 2.5 seconds to ensure 2 flushes, and then set value to finish
cothread.Sleep(2.5)
self.set_attributes(
self.child, numCapturedReadback=self.o.done_when_captured
)
self.o.done_when_captured = 5
# Say that we're getting the first frame
self.o.first_array_future = Future(None)
self.o.first_array_future.set_result(None)
self.o.start_future = Future(None)
# Need a registrar object or we get AssertionError
self.o.registrar = MagicMock()
# Reduce frame timeout so we don't hang on this test for too long
self.o.frame_timeout = 5
# Spawn process to finish it after a few seconds
self.process.spawn(set_num_captured)
# Run
self.o.on_run(self.context)
# Check calls
assert self.child.handled_requests.mock_calls == [
call.post("flushNow"),
call.post("flushNow"),
]
assert self.o.registrar.report.called_once
assert self.o.registrar.report.call_args_list[0][0][0].steps == 0
assert self.o.registrar.report.call_args_list[1][0][0].steps == 5
def test_run_raises_TimeoutError_for_stalled_writer(self):
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
self.context.set_notify_dispatch_request(self.o.notify_dispatch_request)
self.o.done_when_captured = 10
# Need a registrar object or we get AssertionError
self.o.registrar = MagicMock()
self.o.start_future = MagicMock()
# Mock the last update
self.o.last_capture_update = MagicMock()
self.o.last_capture_update.return_value = 0.0
# Set a short timeout for testing
self.o.frame_timeout = 0.1
# Now check the error is raised
self.assertRaises(TimeoutError, self.o.on_run, self.context)
def test_seek(self):
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
self.context.set_notify_dispatch_request(self.o.notify_dispatch_request)
# Num captured readback usually reads a bit higher than the completed steps
# after a pause is requested
completed_steps = 4
self.set_attributes(self.child, numCapturedReadback=6)
# Call the seek
steps_to_do = 3
self.o.on_seek(self.context, completed_steps, steps_to_do)
# We expect done when captured to be the current captured readback + steps to do
assert self.o.done_when_captured == 9
def test_post_run_ready(self):
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
self.context.set_notify_dispatch_request(self.o.notify_dispatch_request)
# Say that we've returned from start
self.o.start_future = Future(None)
self.o.start_future.set_result(None)
fname = "/tmp/test_filename"
with open(fname, "w") as f:
f.write("thing")
assert os.path.isfile(fname)
self.o.layout_filename = fname
self.o.on_post_run_ready(self.context)
assert self.child.handled_requests.mock_calls == []
assert os.path.isfile(fname)
self.o.on_reset(self.context)
assert not os.path.isfile(fname)
def test_post_run_ready_not_done_flush(self):
# Say that we've returned from start
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
self.o.start_future = Future(None)
fname = "/tmp/test_filename"
with open(fname, "w") as f:
f.write("thing")
assert os.path.isfile(fname)
self.o.layout_filename = fname
self.o.on_post_run_ready(self.context)
assert self.child.handled_requests.mock_calls == [call.post("flushNow")]
assert os.path.isfile(fname)
self.o.on_reset(self.context)
assert not os.path.isfile(fname)
def test_check_xml_is_valid_method_succeeds_for_valid_value(self):
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
child = MagicMock(name="child_mock")
child.xmlLayoutValid.value = True
try:
self.o._check_xml_is_valid(child)
except AssertionError:
self.fail("_check_xml_is_valid() threw unexpected AssertionError")
def test_check_xml_is_valid_method_throws_AssertionError_for_bad_value(self):
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
child = MagicMock(name="child_mock")
child.xmlLayoutValid.value = False
child.xmlErrorMsg.value = "XML description file cannot be opened"
self.assertRaises(AssertionError, self.o._check_xml_is_valid, child)
@patch("malcolm.modules.ADCore.parts.hdfwriterpart.time.time")
def test_has_file_writing_stalled(self, time_mock):
self.o = HDFWriterPart(name="m", mri="BLOCK:HDF5")
# First case - no last capture update so return False
assert self.o._has_file_writing_stalled() is False
# Set up the attributes and mock for the last two cases
self.o.last_capture_update = 10.0
self.o.frame_timeout = 60.0
time_mock.side_effect = [30.0, 71.0]
# Second case - last capture update is within frame timeout
assert self.o._has_file_writing_stalled() is False
# Final case - last capture update is outside frame timeout
assert self.o._has_file_writing_stalled() is True
| 44.597723 | 88 | 0.643918 |
4a213b9f6526a9a193d0ca40c8a18de360095ba1
| 22,968 |
py
|
Python
|
python/ccxt/test/test_async.py
|
Dan-krm/ccxt
|
2ed8b7b8598e2934559822d81a8d14885b4d4ad3
|
[
"MIT"
] | 1 |
2021-11-16T15:45:34.000Z
|
2021-11-16T15:45:34.000Z
|
python/ccxt/test/test_async.py
|
Dan-krm/ccxt
|
2ed8b7b8598e2934559822d81a8d14885b4d4ad3
|
[
"MIT"
] | null | null | null |
python/ccxt/test/test_async.py
|
Dan-krm/ccxt
|
2ed8b7b8598e2934559822d81a8d14885b4d4ad3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
import asyncio
import json
# import logging
import os
import sys
import time # noqa: F401
from os import _exit
from traceback import format_tb
# ------------------------------------------------------------------------------
# logging.basicConfig(level=logging.INFO)
# ------------------------------------------------------------------------------
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root)
# ------------------------------------------------------------------------------
import ccxt.async_support as ccxt # noqa: E402
from test_trade import test_trade # noqa: E402
from test_order import test_order # noqa: E402
from test_ohlcv import test_ohlcv # noqa: E402
from test_position import test_position # noqa: E402
from test_transaction import test_transaction # noqa: E402
# ------------------------------------------------------------------------------
class Argv(object):
token_bucket = False
verbose = False
nonce = None
exchange = None
symbol = None
pass
argv = Argv()
parser = argparse.ArgumentParser()
parser.add_argument('--token_bucket', action='store_true', help='enable token bucket experimental test')
parser.add_argument('--verbose', action='store_true', help='enable verbose output')
parser.add_argument('--nonce', type=int, help='integer')
parser.add_argument('exchange', type=str, help='exchange id in lowercase', nargs='?')
parser.add_argument('symbol', type=str, help='symbol in uppercase', nargs='?')
parser.parse_args(namespace=argv)
exchanges = {}
# ------------------------------------------------------------------------------
path = os.path.dirname(ccxt.__file__)
if 'site-packages' in os.path.dirname(ccxt.__file__):
raise Exception("You are running test_async.py/test.py against a globally-installed version of the library! It was previously installed into your site-packages folder by pip or pip3. To ensure testing against the local folder uninstall it first with pip uninstall ccxt or pip3 uninstall ccxt")
# ------------------------------------------------------------------------------
# string coloring functions
def style(s, style):
return str(s) # style + str (s) + '\033[0m'
def green(s):
return style(s, '\033[92m')
def blue(s):
return style(s, '\033[94m')
def yellow(s):
return style(s, '\033[93m')
def red(s):
return style(s, '\033[91m')
def pink(s):
return style(s, '\033[95m')
def bold(s):
return style(s, '\033[1m')
def underline(s):
return style(s, '\033[4m')
# print a colored string
def dump(*args):
print(' '.join([str(arg) for arg in args]))
# print an error string
def dump_error(*args):
string = ' '.join([str(arg) for arg in args])
print(string)
sys.stderr.write(string + "\n")
sys.stderr.flush()
# ------------------------------------------------------------------------------
def handle_all_unhandled_exceptions(type, value, traceback):
dump_error(yellow(type), yellow(value), '\n\n' + yellow('\n'.join(format_tb(traceback))))
_exit(1) # unrecoverable crash
sys.excepthook = handle_all_unhandled_exceptions
# ------------------------------------------------------------------------------
async def test_order_book(exchange, symbol):
method = 'fetchOrderBook'
if exchange.has[method]:
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
# dump(green(exchange.id), green(symbol), 'fetching order book...')
orderbook = await getattr(exchange, method)(symbol)
dump(
green(exchange.id),
green(symbol),
'order book',
orderbook['datetime'],
'bid: ' + str(orderbook['bids'][0][0] if len(orderbook['bids']) else 'N/A'),
'bidVolume: ' + str(orderbook['bids'][0][1] if len(orderbook['bids']) else 'N/A'),
'ask: ' + str(orderbook['asks'][0][0] if len(orderbook['asks']) else 'N/A'),
'askVolume: ' + str(orderbook['asks'][0][1] if len(orderbook['asks']) else 'N/A'))
else:
dump(yellow(exchange.id), method + '() is not supported')
# ------------------------------------------------------------------------------
async def test_ohlcvs(exchange, symbol):
method = 'fetchOHLCV'
ignored_exchanges = [
'cex', # CEX can return historical candles for a certain date only
'okex', # okex fetchOHLCV counts "limit" candles from current time backwards
'okcoinusd', # okex base class
]
if exchange.id in ignored_exchanges:
return
if exchange.has[method]:
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
timeframes = exchange.timeframes if exchange.timeframes else {'1d': '1d'}
timeframe = list(timeframes.keys())[0]
limit = 10
duration = exchange.parse_timeframe(timeframe)
since = exchange.milliseconds() - duration * limit * 1000 - 1000
ohlcvs = await getattr(exchange, method)(symbol, timeframe, since, limit)
for ohlcv in ohlcvs:
test_ohlcv(exchange, ohlcv, symbol, int(time.time() * 1000))
dump(green(exchange.id), 'fetched', green(len(ohlcvs)), 'OHLCVs')
else:
dump(yellow(exchange.id), method + '() is not supported')
# ------------------------------------------------------------------------------
async def test_tickers(exchange, symbol):
method = 'fetchTickers'
ignored_exchanges = [
'digifinex', # requires apiKey to call v2 tickers
]
if exchange.id in ignored_exchanges:
return
if exchange.has[method]:
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
tickers = None
try:
# dump(green(exchange.id), 'fetching all tickers at once...')
tickers = await getattr(exchange, method)()
dump(green(exchange.id), 'fetched all', green(len(list(tickers.keys()))), 'tickers')
except Exception as e:
dump(green(exchange.id), 'failed to fetch all tickers, fetching multiple tickers at once...')
tickers = await exchange.fetch_tickers([symbol])
dump(green(exchange.id), 'fetched', green(len(list(tickers.keys()))), 'tickers')
elif argv.token_bucket:
await test_tickers_async(exchange)
if argv.token_bucket:
await test_l2_order_books_async(exchange)
# ------------------------------------------------------------------------------
def get_active_symbols(exchange):
return [symbol for symbol in exchange.symbols if is_active_symbol(exchange, symbol)]
def is_active_symbol(exchange, symbol):
return ('.' not in symbol) and (('active' not in exchange.markets[symbol]) or (exchange.markets[symbol]['active']))
async def test_tickers_async(exchange):
print('Activated here')
dump(green(exchange.id), 'fetching all tickers by simultaneous multiple concurrent requests')
symbols_to_load = get_active_symbols(exchange)
input_coroutines = [exchange.fetch_ticker(symbol) for symbol in symbols_to_load]
tickers = await asyncio.gather(*input_coroutines, return_exceptions=True)
for ticker, symbol in zip(tickers, symbols_to_load):
if not isinstance(ticker, dict):
dump_error(red('[Error with symbol loading ticker]'),
' Symbol failed to load: {0}, ERROR: {1}'.format(symbol, ticker))
dump(green(exchange.id), 'fetched', green(len(list(tickers))), 'tickers')
async def test_l2_order_books_async(exchange):
dump(green(exchange.id), 'fetching all order books by simultaneous multiple concurrent requests')
symbols_to_load = get_active_symbols(exchange)
input_coroutines = [exchange.fetch_l2_order_book(symbol) for symbol in symbols_to_load]
orderbooks = await asyncio.gather(*input_coroutines, return_exceptions=True)
for orderbook, symbol in zip(orderbooks, symbols_to_load):
if not isinstance(orderbook, dict):
dump_error(red('[Error with symbol loading l2 order book]'),
' Symbol failed to load: {0}, ERROR: {1}'.format(symbol, orderbook))
dump(green(exchange.id), 'fetched', green(len(list(orderbooks))), 'order books')
# ------------------------------------------------------------------------------
async def test_ticker(exchange, symbol):
method = 'fetchTicker'
ignored_exchanges = [
'digifinex', # requires apiKey to call v2 tickers
]
if exchange.id in ignored_exchanges:
return
if exchange.has[method]:
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
ticker = await getattr(exchange, method)(symbol)
dump(
green(exchange.id),
green(symbol),
'ticker',
ticker['datetime'],
'high: ' + str(ticker['high']),
'low: ' + str(ticker['low']),
'bid: ' + str(ticker['bid']),
'ask: ' + str(ticker['ask']),
'volume: ' + str(ticker['quoteVolume']))
else:
dump(green(exchange.id), green(symbol), method + '() is not supported')
# ------------------------------------------------------------------------------
async def test_trades(exchange, symbol):
method = 'fetchTrades'
if exchange.has[method]:
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
# dump(green(exchange.id), green(symbol), 'fetching trades...')
trades = await getattr(exchange, method)(symbol)
if trades:
test_trade(exchange, trades[0], symbol, int(time.time() * 1000))
dump(green(exchange.id), green(symbol), 'fetched', green(len(trades)), 'trades')
else:
dump(green(exchange.id), green(symbol), method + '() is not supported')
# ------------------------------------------------------------------------------
async def test_orders(exchange, symbol):
method = 'fetchOrders'
if exchange.has[method]:
skipped_exchanges = [
'bitmart',
'rightbtc',
]
if exchange.id in skipped_exchanges:
dump(green(exchange.id), green(symbol), method + '() skipped')
return
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
# dump(green(exchange.id), green(symbol), 'fetching orders...')
orders = await exchange.fetch_orders(symbol)
for order in orders:
test_order(exchange, order, symbol, int(time.time() * 1000))
dump(green(exchange.id), green(symbol), 'fetched', green(len(orders)), 'orders')
else:
dump(green(exchange.id), green(symbol), method + '() is not supported')
# ------------------------------------------------------------------------------
async def test_positions(exchange, symbol):
method = 'fetchPositions'
if exchange.has[method]:
skipped_exchanges = [
]
if exchange.id in skipped_exchanges:
dump(green(exchange.id), green(symbol), method + '() skipped')
return
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
# without symbol
dump(green(exchange.id), 'fetching positions...')
positions = await getattr(exchange, method)()
for position in positions:
test_position(exchange, position, None, int(time.time() * 1000))
dump(green(exchange.id), 'fetched', green(len(positions)), 'positions')
# with symbol
dump(green(exchange.id), green(symbol), 'fetching positions...')
positions = await getattr(exchange, method)([symbol])
for position in positions:
test_position(exchange, position, symbol, int(time.time() * 1000))
dump(green(exchange.id), green(symbol), 'fetched', green(len(positions)), 'positions')
else:
dump(green(exchange.id), green(symbol), method + '() is not supported')
# ------------------------------------------------------------------------------
async def test_closed_orders(exchange, symbol):
method = 'fetchClosedOrders'
if exchange.has[method]:
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
# dump(green(exchange.id), green(symbol), 'fetching orders...')
orders = await getattr(exchange, method)(symbol)
for order in orders:
test_order(exchange, order, symbol, int(time.time() * 1000))
assert order['status'] == 'closed' or order['status'] == 'canceled'
dump(green(exchange.id), green(symbol), 'fetched', green(len(orders)), 'closed orders')
else:
dump(green(exchange.id), green(symbol), method + '() is not supported')
# ------------------------------------------------------------------------------
async def test_open_orders(exchange, symbol):
method = 'fetchOpenOrders'
if exchange.has[method]:
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
# dump(green(exchange.id), green(symbol), 'fetching orders...')
orders = await getattr(exchange, method)(symbol)
for order in orders:
test_order(exchange, order, symbol, int(time.time() * 1000))
assert order['status'] == 'open'
dump(green(exchange.id), green(symbol), 'fetched', green(len(orders)), 'open orders')
else:
dump(green(exchange.id), green(symbol), method + '() is not supported')
# ------------------------------------------------------------------------------
async def test_transactions(exchange, code):
method = 'fetchTransactions'
if exchange.has[method]:
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
transactions = await getattr(exchange, method)(code)
for transaction in transactions:
test_transaction(exchange, transaction, code, int(time.time() * 1000))
dump(green(exchange.id), green(code), 'fetched', green(len(transactions)), 'transactions')
else:
dump(green(exchange.id), green(code), method + '() is not supported')
# ------------------------------------------------------------------------------
async def test_balance(exchange):
method = 'fetchBalance'
if exchange.has[method]:
delay = int(exchange.rateLimit / 1000)
await asyncio.sleep(delay)
await getattr(exchange, method)()
dump(green(exchange.id), 'fetched balance')
else:
dump(green(exchange.id), method + '() is not supported')
# ------------------------------------------------------------------------------
async def test_symbol(exchange, symbol, code):
dump(green('SYMBOL: ' + symbol))
dump(green('CODE: ' + code))
dump('Testing fetch_ticker:' + symbol)
await test_ticker(exchange, symbol)
dump('Testing fetch_tickers:' + symbol)
await test_tickers(exchange, symbol)
dump('Testing fetch_ohlcv:' + symbol)
await test_ohlcvs(exchange, symbol)
if exchange.id == 'coinmarketcap':
response = await exchange.fetchGlobal()
dump(green(response))
else:
dump('Testing fetch_order_book:' + symbol)
await test_order_book(exchange, symbol)
dump('Testing fetch_trades:' + symbol)
await test_trades(exchange, symbol)
if (not hasattr(exchange, 'apiKey') or (len(exchange.apiKey) < 1)):
return
method = 'signIn'
if exchange.has[method]:
dump('Testing ' + method + '()')
await getattr(exchange, method)()
dump('Testing fetch_orders:' + symbol)
await test_orders(exchange, symbol)
dump('Testing fetch_open_orders:' + symbol)
await test_open_orders(exchange, symbol)
dump('Testing fetch_closed_orders:' + symbol)
await test_closed_orders(exchange, symbol)
dump('Testing fetch_transactions:' + code)
await test_transactions(exchange, code)
dump('Testing fetch_balance')
await test_balance(exchange)
dump('Testing fetch_positions:' + symbol)
await test_positions(exchange, symbol)
# ------------------------------------------------------------------------------
async def load_exchange(exchange):
await exchange.load_markets()
def get_test_symbol(exchange, symbols):
symbol = None
for s in symbols:
market = exchange.safe_value(exchange.markets, s)
if market is not None:
active = exchange.safe_value(market, 'active')
if active or (active is None):
symbol = s
break
return symbol
async def test_exchange(exchange, symbol=None):
dump(green('EXCHANGE: ' + exchange.id))
# delay = 2
# ..........................................................................
# public API
codes = [
'BTC',
'ETH',
'XRP',
'LTC',
'BCH',
'EOS',
'BNB',
'BSV',
'USDT',
'ATOM',
'BAT',
'BTG',
'DASH',
'DOGE',
'ETC',
'IOTA',
'LSK',
'MKR',
'NEO',
'PAX',
'QTUM',
'TRX',
'TUSD',
'USD',
'USDC',
'WAVES',
'XEM',
'XMR',
'ZEC',
'ZRX',
]
code = codes[0]
for i in range(0, len(codes)):
if codes[i] in exchange.currencies:
code = codes[i]
if not symbol:
symbol = get_test_symbol(exchange, [
'BTC/USD',
'BTC/USDT',
'BTC/CNY',
'BTC/EUR',
'BTC/ETH',
'ETH/BTC',
'ETH/USDT',
'BTC/JPY',
'LTC/BTC',
'USD/SLL',
'EUR/USD',
])
if symbol is None:
for code in codes:
markets = list(exchange.markets.values())
activeMarkets = [market for market in markets if market['base'] == code]
if len(activeMarkets):
activeSymbols = [market['symbol'] for market in activeMarkets]
symbol = get_test_symbol(exchange, activeSymbols)
break
if symbol is None:
markets = list(exchange.markets.values())
activeMarkets = [market for market in markets if market['base'] in codes]
activeSymbols = [market['symbol'] for market in activeMarkets]
symbol = get_test_symbol(exchange, activeSymbols)
if symbol is None:
markets = list(exchange.markets.values())
activeMarkets = [market for market in markets if not exchange.safe_value(market, 'active', False)]
activeSymbols = [market['symbol'] for market in activeMarkets]
symbol = get_test_symbol(exchange, activeSymbols)
if symbol is None:
symbol = get_test_symbol(exchange, exchange.symbols)
if symbol is None:
symbol = exchange.symbols[0]
if symbol.find('.d') < 0:
await test_symbol(exchange, symbol, code)
# ..........................................................................
# private API
# move to testnet/sandbox if possible before accessing the balance if possible
# if 'test' in exchange.urls:
# exchange.urls['api'] = exchange.urls['test']
# await asyncio.sleep(exchange.rateLimit / 1000)
# time.sleep(delay)
# amount = 1
# price = 0.0161
# marketBuy = exchange.create_market_buy_order(symbol, amount)
# print(marketBuy)
# time.sleep(delay)
# marketSell = exchange.create_market_sell_order(symbol, amount)
# print(marketSell)
# time.sleep(delay)
# limitBuy = exchange.create_limit_buy_order(symbol, amount, price)
# print(limitBuy)
# time.sleep(delay)
# limitSell = exchange.create_limit_sell_order(symbol, amount, price)
# print(limitSell)
# time.sleep(delay)
# ------------------------------------------------------------------------------
async def try_all_proxies(exchange, proxies=['']):
current_proxy = 0
max_retries = len(proxies)
if exchange.proxy in proxies:
current_proxy = proxies.index(exchange.proxy)
for num_retries in range(0, max_retries):
try:
exchange.proxy = proxies[current_proxy]
dump(green(exchange.id), 'using proxy', '`' + exchange.proxy + '`')
current_proxy = (current_proxy + 1) % len(proxies)
await load_exchange(exchange)
await test_exchange(exchange)
except (ccxt.RequestTimeout, ccxt.AuthenticationError, ccxt.NotSupported, ccxt.DDoSProtection, ccxt.ExchangeNotAvailable, ccxt.ExchangeError) as e:
print({'type': type(e).__name__, 'num_retries': num_retries, 'max_retries': max_retries}, str(e)[0:200])
if (num_retries + 1) == max_retries:
dump_error(yellow('[' + type(e).__name__ + ']'), str(e)[0:200])
else:
# no exception
return True
# exception
return False
# ------------------------------------------------------------------------------
proxies = [
'',
'https://cors-anywhere.herokuapp.com/',
]
# prefer local testing keys to global keys
keys_folder = os.path.dirname(root)
keys_global = os.path.join(keys_folder, 'keys.json')
keys_local = os.path.join(keys_folder, 'keys.local.json')
keys_file = keys_local if os.path.exists(keys_local) else keys_global
# load the api keys from config
with open(keys_file, encoding='utf8') as file:
config = json.load(file)
# instantiate all exchanges
for id in ccxt.exchanges:
if id == 'theocean':
continue
exchange = getattr(ccxt, id)
exchange_config = {'verbose': argv.verbose}
if sys.version_info[0] < 3:
exchange_config.update()
if id in config:
exchange_config = ccxt.Exchange.deep_extend(exchange_config, config[id])
exchanges[id] = exchange(exchange_config)
# ------------------------------------------------------------------------------
async def main():
if argv.exchange:
if argv.exchange != 'theocean':
exchange = exchanges[argv.exchange]
symbol = argv.symbol
if hasattr(exchange, 'skip') and exchange.skip:
dump(green(exchange.id), 'skipped')
else:
if symbol:
await load_exchange(exchange)
await test_symbol(exchange, symbol)
else:
await try_all_proxies(exchange, proxies)
else:
for exchange in sorted(exchanges.values(), key=lambda x: x.id):
if hasattr(exchange, 'skip') and exchange.skip:
dump(green(exchange.id), 'skipped')
else:
await try_all_proxies(exchange, proxies)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
asyncio.run(main())
| 34.958904 | 297 | 0.563871 |
4a213c29113e5e23af2caf7fbcb807be3d0166d2
| 14,286 |
py
|
Python
|
python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py
|
jichangjichang/Paddle
|
4fa3cee5499c6df0ad6043b0cfa220d09f2034e8
|
[
"Apache-2.0"
] | 9 |
2017-12-04T02:58:01.000Z
|
2020-12-03T14:46:30.000Z
|
python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py
|
jichangjichang/Paddle
|
4fa3cee5499c6df0ad6043b0cfa220d09f2034e8
|
[
"Apache-2.0"
] | 7 |
2017-12-05T20:29:08.000Z
|
2018-10-15T08:57:40.000Z
|
python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py
|
jichangjichang/Paddle
|
4fa3cee5499c6df0ad6043b0cfa220d09f2034e8
|
[
"Apache-2.0"
] | 6 |
2018-03-19T22:38:46.000Z
|
2019-11-01T22:28:27.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from functools import partial
import paddle.fluid.core as core
from op_test import OpTest
# TestFusedElementwiseActivationOp
# TestFusedElementwiseActivationOp_scalar
# TestFusedElementwiseActivationOp_scalar2
# TestFusedElementwiseActivationOp_Vector
# TestFusedElementwiseActivationOp_broadcast_0
# TestFusedElementwiseActivationOp_broadcast_1
# TestFusedElementwiseActivationOp_broadcast_2
# TestFusedElementwiseActivationOp_broadcast_3
# TestFusedElementwiseActivationOp_broadcast_4
# TestFusedElementwiseActivationOp_rowwise_add_0
# TestFusedElementwiseActivationOp_rowwise_add_1
# TestFusedElementwiseActivationOp_channelwise_add
def create_test_class(test_case, callback, attrs):
class TestFusedElementwiseActivationOp_base(OpTest):
def setUp(self):
self.op_type = "fused_elemwise_activation"
self.dtype = np.float32
self.axis = -1
self.init_input()
self.init_output()
self.init_attr()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
if self.attrs["keep_intermediate_value"]:
self.outputs = {
'Out': self.out,
"IntermediateOut": self.intermediate_out
}
else:
self.outputs = {'Out': self.out}
def init_input(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.axis = -1
def init_output(self):
self.x, self.y, self.intermediate_out, self.out = \
callback(self.x, self.y, self.x, self.y)
def init_attr(self):
self.attrs = {'axis': self.axis, }
for key in attrs.keys():
self.attrs[key] = attrs[key]
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
if self.attrs["keep_intermediate_value"]:
self.check_grad(
['X', 'Y'], ['Out', 'IntermediateOut'],
max_relative_error=0.005,
sum_outputs=['Out'])
else:
self.check_grad(['X', 'Y'], ['Out'], max_relative_error=0.005)
def test_check_grad_ingore_x(self):
if self.attrs["keep_intermediate_value"]:
self.check_grad(
['Y'], ['Out', 'IntermediateOut'],
max_relative_error=0.005,
no_grad_set=set("X"),
sum_outputs=['Out'])
else:
self.check_grad(
['Y'], ['Out'],
max_relative_error=0.005,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if self.attrs["keep_intermediate_value"]:
self.check_grad(
['X'], ['Out', 'IntermediateOut'],
max_relative_error=0.005,
no_grad_set=set("Y"),
sum_outputs=['Out'])
else:
self.check_grad(
['X'], ['Out'],
max_relative_error=0.005,
no_grad_set=set("Y"))
class TestFusedElementwiseActivationOp_scalar(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
class TestFusedElementwiseActivationOp_scalar2(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype)
class TestFusedElementwiseActivationOp_Vector(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.random((32, )).astype(self.dtype)
self.y = np.random.random((32, )).astype(self.dtype)
class TestFusedElementwiseActivationOp_broadcast_0(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(2).astype(self.dtype)
self.axis = 0
def init_output(self):
self.x, self.y, self.intermediate_out, self.out = \
callback(self.x, self.y, self.x, self.y.reshape(2, 1, 1))
class TestFusedElementwiseActivationOp_broadcast_1(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(3).astype(self.dtype)
self.axis = 1
def init_output(self):
self.x, self.y, self.intermediate_out, self.out = \
callback(self.x, self.y, self.x, self.y.reshape(1, 3, 1))
class TestFusedElementwiseActivationOp_broadcast_2(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(4).astype(self.dtype)
def init_output(self):
self.x, self.y, self.intermediate_out, self.out = \
callback(self.x, self.y, self.x, self.y.reshape(1, 1, 4))
class TestFusedElementwiseActivationOp_broadcast_3(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.y = np.random.rand(3, 4).astype(self.dtype)
self.axis = 1
def init_output(self):
self.x, self.y, self.intermediate_out, self.out = \
callback(self.x, self.y, self.x, self.y.reshape(1, 3, 4, 1))
class TestFusedElementwiseActivationOp_broadcast_4(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.y = np.random.rand(2, 1).astype(self.dtype)
self.axis = 0
def init_output(self):
self.x, self.y, self.intermediate_out, self.out = \
callback(self.x, self.y, self.x, self.y.reshape(2, 1, 1, 1))
class TestFusedElementwiseActivationOp_rowwise_add_0(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(3, 4).astype(self.dtype)
self.axis = 1
def init_output(self):
self.x, self.y, self.intermediate_out, self.out = \
callback(self.x, self.y, self.x, self.y.reshape(1, 3, 4))
class TestFusedElementwiseActivationOp_rowwise_add_1(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.rand(2, 1).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.axis = 1
def init_output(self):
self.x, self.y, self.intermediate_out, self.out = \
callback(self.x, self.y, self.x, self.y.reshape(1, 1))
class TestFusedElementwiseActivationOp_channelwise_add(
TestFusedElementwiseActivationOp_base):
def init_input(self):
self.x = np.random.rand(3, 20, 20).astype(self.dtype)
self.y = np.random.rand(3, 1, 1).astype(self.dtype)
TestFusedElementwiseActivationOp_base.__name__ = test_case + "_base"
TestFusedElementwiseActivationOp_scalar.__name__ = test_case + "_scalar"
TestFusedElementwiseActivationOp_scalar2.__name__ = test_case + "_scalar2"
TestFusedElementwiseActivationOp_Vector.__name__ = test_case + "_Vector"
TestFusedElementwiseActivationOp_broadcast_0.__name__ = test_case + "_broadcast_0"
TestFusedElementwiseActivationOp_broadcast_1.__name__ = test_case + "_broadcast_1"
TestFusedElementwiseActivationOp_broadcast_2.__name__ = test_case + "_broadcast_2"
TestFusedElementwiseActivationOp_broadcast_3.__name__ = test_case + "_broadcast_3"
TestFusedElementwiseActivationOp_broadcast_4.__name__ = test_case + "_broadcast_4"
TestFusedElementwiseActivationOp_rowwise_add_0.__name__ = test_case + "_rowwise_add_0"
TestFusedElementwiseActivationOp_rowwise_add_1.__name__ = test_case + "_rowwise_add_1"
TestFusedElementwiseActivationOp_channelwise_add.__name__ = test_case + "_channelwise_add"
globals()[test_case + "_base"] = TestFusedElementwiseActivationOp_base
globals()[test_case + "_scalar"] = TestFusedElementwiseActivationOp_scalar
globals()[test_case + "_scalar2"] = TestFusedElementwiseActivationOp_scalar2
globals()[test_case + "_Vector"] = TestFusedElementwiseActivationOp_Vector
globals()[test_case +
"_broadcast_0"] = TestFusedElementwiseActivationOp_broadcast_0
globals()[test_case +
"_broadcast_1"] = TestFusedElementwiseActivationOp_broadcast_1
globals()[test_case +
"_broadcast_2"] = TestFusedElementwiseActivationOp_broadcast_2
globals()[test_case +
"_broadcast_3"] = TestFusedElementwiseActivationOp_broadcast_3
globals()[test_case +
"_broadcast_4"] = TestFusedElementwiseActivationOp_broadcast_4
globals()[test_case +
"_rowwise_add_0"] = TestFusedElementwiseActivationOp_rowwise_add_0
globals()[test_case +
"_rowwise_add_1"] = TestFusedElementwiseActivationOp_rowwise_add_1
globals(
)[test_case +
"_channelwise_add"] = TestFusedElementwiseActivationOp_channelwise_add
def scale_add_func(x, y, x_bcast, y_bcast, scale, mode=0):
if mode == 0:
return x, y, (x_bcast + y_bcast), (x_bcast + y_bcast) * scale
else:
return y, x, (x_bcast + y_bcast), (x_bcast + y_bcast) * scale
def add_scale_func(x, y, x_bcast, y_bcast, scale, mode=0):
if mode == 0:
return x, y, y * scale, x_bcast + y_bcast * scale
else:
return y, x, x * scale, y_bcast + x_bcast * scale
def add_relu_func(x, y, x_bcast, y_bcast, mode=0):
# Copy from test_activation_op.py
# Because we set delta = 0.005 in calculating numeric gradient,
# if x is too small, such as 0.002, x_neg will be -0.003
# x_pos will be 0.007, so the numeric gradient is inaccurate.
# we should avoid this
if mode == 0:
y[np.abs(y) < 0.005] = 0.02
y_bcast[np.abs(y_bcast) < 0.005] = 0.02
return x, y, np.maximum(y, 0), x_bcast + np.maximum(y_bcast, 0)
else:
x[np.abs(x) < 0.005] = 0.02
x_bcast[np.abs(x_bcast) < 0.005] = 0.02
return y, x, np.maximum(x, 0), y_bcast + np.maximum(x_bcast, 0)
def relu_add_func(x, y, x_bcast, y_bcast, mode=0):
intermediate_out = x_bcast + y_bcast
out = np.maximum(intermediate_out, 0)
out[np.abs(out) < 0.005] = 0.02
if mode == 0:
return x, y, intermediate_out, out
else:
return y, x, intermediate_out, out
def mul_scale_func(x, y, x_bcast, y_bcast, scale, mode=0):
if mode == 0:
return x, y, y * scale, x_bcast * (y_bcast * scale)
else:
return y, x, x * scale, y_bcast * (x_bcast * scale)
scale = 0.1
scale_add_func = partial(scale_add_func, scale=scale)
add_scale_func = partial(add_scale_func, scale=scale)
mul_scale_func = partial(mul_scale_func, scale=scale)
for mode in {0, 1}:
scale_add_func = partial(scale_add_func, mode=mode)
add_scale_func = partial(add_scale_func, mode=mode)
mul_scale_func = partial(mul_scale_func, mode=mode)
relu_add_func = partial(relu_add_func, mode=mode)
add_relu_func = partial(add_relu_func, mode=mode)
for recomputation in {True, False}:
for keep_intermediate_value in {True, False}:
suffix = ("_keep_intermediate_value" if keep_intermediate_value else "") \
+ ("_recomputation" if recomputation else "") \
+ ("_mode_"+ str(mode))
create_test_class('scale_add' + suffix, scale_add_func, {
'scale': scale,
'functor_list': ["scale", "elementwise_add"],
'keep_intermediate_value': keep_intermediate_value,
'recomputation': recomputation
})
create_test_class('add_scale' + suffix, add_scale_func, {
'scale': scale,
'functor_list': ["elementwise_add", "scale"],
'keep_intermediate_value': keep_intermediate_value,
'recomputation': recomputation
})
create_test_class('add_relu' + suffix, add_relu_func, {
'functor_list': ["elementwise_add", "relu"],
'keep_intermediate_value': keep_intermediate_value,
'recomputation': recomputation
})
create_test_class('relu_add' + suffix, relu_add_func, {
'functor_list': ["relu", "elementwise_add"],
'keep_intermediate_value': keep_intermediate_value,
'recomputation': recomputation
})
create_test_class('mul_scale' + suffix, mul_scale_func, {
'scale': scale,
'functor_list': ["elementwise_mul", "scale"],
'keep_intermediate_value': keep_intermediate_value,
'recomputation': recomputation
})
if __name__ == '__main__':
unittest.main()
| 41.77193 | 94 | 0.628937 |
4a213d13ddf9d37a0d3d1ff753d588d277f2dd24
| 19,467 |
py
|
Python
|
tests/rest/media/v1/test_media_storage.py
|
lukaslihotzki/synapse
|
1dfdc87b9bb07cc3c958dde7f41f2af4322477e5
|
[
"Apache-2.0"
] | 2 |
2021-07-07T10:21:41.000Z
|
2021-12-28T00:13:20.000Z
|
tests/rest/media/v1/test_media_storage.py
|
lukaslihotzki/synapse
|
1dfdc87b9bb07cc3c958dde7f41f2af4322477e5
|
[
"Apache-2.0"
] | 2 |
2021-12-17T21:45:54.000Z
|
2021-12-29T20:12:09.000Z
|
tests/rest/media/v1/test_media_storage.py
|
lukaslihotzki/synapse
|
1dfdc87b9bb07cc3c958dde7f41f2af4322477e5
|
[
"Apache-2.0"
] | 1 |
2021-06-16T23:25:48.000Z
|
2021-06-16T23:25:48.000Z
|
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
from binascii import unhexlify
from io import BytesIO
from typing import Optional
from unittest.mock import Mock
from urllib import parse
import attr
from parameterized import parameterized_class
from PIL import Image as Image
from twisted.internet import defer
from twisted.internet.defer import Deferred
from synapse.logging.context import make_deferred_yieldable
from synapse.rest import admin
from synapse.rest.client.v1 import login
from synapse.rest.media.v1._base import FileInfo
from synapse.rest.media.v1.filepath import MediaFilePaths
from synapse.rest.media.v1.media_storage import MediaStorage
from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend
from tests import unittest
from tests.server import FakeSite, make_request
from tests.utils import default_config
class MediaStorageTests(unittest.HomeserverTestCase):
needs_threadpool = True
def prepare(self, reactor, clock, hs):
self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-")
self.addCleanup(shutil.rmtree, self.test_dir)
self.primary_base_path = os.path.join(self.test_dir, "primary")
self.secondary_base_path = os.path.join(self.test_dir, "secondary")
hs.config.media_store_path = self.primary_base_path
storage_providers = [FileStorageProviderBackend(hs, self.secondary_base_path)]
self.filepaths = MediaFilePaths(self.primary_base_path)
self.media_storage = MediaStorage(
hs, self.primary_base_path, self.filepaths, storage_providers
)
def test_ensure_media_is_in_local_cache(self):
media_id = "some_media_id"
test_body = "Test\n"
# First we create a file that is in a storage provider but not in the
# local primary media store
rel_path = self.filepaths.local_media_filepath_rel(media_id)
secondary_path = os.path.join(self.secondary_base_path, rel_path)
os.makedirs(os.path.dirname(secondary_path))
with open(secondary_path, "w") as f:
f.write(test_body)
# Now we run ensure_media_is_in_local_cache, which should copy the file
# to the local cache.
file_info = FileInfo(None, media_id)
# This uses a real blocking threadpool so we have to wait for it to be
# actually done :/
x = defer.ensureDeferred(
self.media_storage.ensure_media_is_in_local_cache(file_info)
)
# Hotloop until the threadpool does its job...
self.wait_on_thread(x)
local_path = self.get_success(x)
self.assertTrue(os.path.exists(local_path))
# Asserts the file is under the expected local cache directory
self.assertEquals(
os.path.commonprefix([self.primary_base_path, local_path]),
self.primary_base_path,
)
with open(local_path) as f:
body = f.read()
self.assertEqual(test_body, body)
@attr.s(slots=True, frozen=True)
class _TestImage:
"""An image for testing thumbnailing with the expected results
Attributes:
data: The raw image to thumbnail
content_type: The type of the image as a content type, e.g. "image/png"
extension: The extension associated with the format, e.g. ".png"
expected_cropped: The expected bytes from cropped thumbnailing, or None if
test should just check for success.
expected_scaled: The expected bytes from scaled thumbnailing, or None if
test should just check for a valid image returned.
expected_found: True if the file should exist on the server, or False if
a 404 is expected.
"""
data = attr.ib(type=bytes)
content_type = attr.ib(type=bytes)
extension = attr.ib(type=bytes)
expected_cropped = attr.ib(type=Optional[bytes], default=None)
expected_scaled = attr.ib(type=Optional[bytes], default=None)
expected_found = attr.ib(default=True, type=bool)
@parameterized_class(
("test_image",),
[
# smoll png
(
_TestImage(
unhexlify(
b"89504e470d0a1a0a0000000d4948445200000001000000010806"
b"0000001f15c4890000000a49444154789c63000100000500010d"
b"0a2db40000000049454e44ae426082"
),
b"image/png",
b".png",
unhexlify(
b"89504e470d0a1a0a0000000d4948445200000020000000200806"
b"000000737a7af40000001a49444154789cedc101010000008220"
b"ffaf6e484001000000ef0610200001194334ee0000000049454e"
b"44ae426082"
),
unhexlify(
b"89504e470d0a1a0a0000000d4948445200000001000000010806"
b"0000001f15c4890000000d49444154789c636060606000000005"
b"0001a5f645400000000049454e44ae426082"
),
),
),
# small png with transparency.
(
_TestImage(
unhexlify(
b"89504e470d0a1a0a0000000d49484452000000010000000101000"
b"00000376ef9240000000274524e5300010194fdae0000000a4944"
b"4154789c636800000082008177cd72b60000000049454e44ae426"
b"082"
),
b"image/png",
b".png",
# Note that we don't check the output since it varies across
# different versions of Pillow.
),
),
# small lossless webp
(
_TestImage(
unhexlify(
b"524946461a000000574542505650384c0d0000002f0000001007"
b"1011118888fe0700"
),
b"image/webp",
b".webp",
),
),
# an empty file
(
_TestImage(
b"",
b"image/gif",
b".gif",
expected_found=False,
),
),
],
)
class MediaRepoTests(unittest.HomeserverTestCase):
hijack_auth = True
user_id = "@test:user"
def make_homeserver(self, reactor, clock):
self.fetches = []
def get_file(destination, path, output_stream, args=None, max_size=None):
"""
Returns tuple[int,dict,str,int] of file length, response headers,
absolute URI, and response code.
"""
def write_to(r):
data, response = r
output_stream.write(data)
return response
d = Deferred()
d.addCallback(write_to)
self.fetches.append((d, destination, path, args))
return make_deferred_yieldable(d)
client = Mock()
client.get_file = get_file
self.storage_path = self.mktemp()
self.media_store_path = self.mktemp()
os.mkdir(self.storage_path)
os.mkdir(self.media_store_path)
config = self.default_config()
config["media_store_path"] = self.media_store_path
config["max_image_pixels"] = 2000000
provider_config = {
"module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend",
"store_local": True,
"store_synchronous": False,
"store_remote": True,
"config": {"directory": self.storage_path},
}
config["media_storage_providers"] = [provider_config]
hs = self.setup_test_homeserver(config=config, federation_http_client=client)
return hs
def prepare(self, reactor, clock, hs):
media_resource = hs.get_media_repository_resource()
self.download_resource = media_resource.children[b"download"]
self.thumbnail_resource = media_resource.children[b"thumbnail"]
self.store = hs.get_datastore()
self.media_repo = hs.get_media_repository()
self.media_id = "example.com/12345"
def _req(self, content_disposition):
channel = make_request(
self.reactor,
FakeSite(self.download_resource),
"GET",
self.media_id,
shorthand=False,
await_result=False,
)
self.pump()
# We've made one fetch, to example.com, using the media URL, and asking
# the other server not to do a remote fetch
self.assertEqual(len(self.fetches), 1)
self.assertEqual(self.fetches[0][1], "example.com")
self.assertEqual(
self.fetches[0][2], "/_matrix/media/r0/download/" + self.media_id
)
self.assertEqual(self.fetches[0][3], {"allow_remote": "false"})
headers = {
b"Content-Length": [b"%d" % (len(self.test_image.data))],
b"Content-Type": [self.test_image.content_type],
}
if content_disposition:
headers[b"Content-Disposition"] = [content_disposition]
self.fetches[0][0].callback(
(self.test_image.data, (len(self.test_image.data), headers))
)
self.pump()
self.assertEqual(channel.code, 200)
return channel
def test_disposition_filename_ascii(self):
"""
If the filename is filename=<ascii> then Synapse will decode it as an
ASCII string, and use filename= in the response.
"""
channel = self._req(b"inline; filename=out" + self.test_image.extension)
headers = channel.headers
self.assertEqual(
headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
)
self.assertEqual(
headers.getRawHeaders(b"Content-Disposition"),
[b"inline; filename=out" + self.test_image.extension],
)
def test_disposition_filenamestar_utf8escaped(self):
"""
If the filename is filename=*utf8''<utf8 escaped> then Synapse will
correctly decode it as the UTF-8 string, and use filename* in the
response.
"""
filename = parse.quote("\u2603".encode("utf8")).encode("ascii")
channel = self._req(
b"inline; filename*=utf-8''" + filename + self.test_image.extension
)
headers = channel.headers
self.assertEqual(
headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
)
self.assertEqual(
headers.getRawHeaders(b"Content-Disposition"),
[b"inline; filename*=utf-8''" + filename + self.test_image.extension],
)
def test_disposition_none(self):
"""
If there is no filename, one isn't passed on in the Content-Disposition
of the request.
"""
channel = self._req(None)
headers = channel.headers
self.assertEqual(
headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
)
self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None)
def test_thumbnail_crop(self):
"""Test that a cropped remote thumbnail is available."""
self._test_thumbnail(
"crop", self.test_image.expected_cropped, self.test_image.expected_found
)
def test_thumbnail_scale(self):
"""Test that a scaled remote thumbnail is available."""
self._test_thumbnail(
"scale", self.test_image.expected_scaled, self.test_image.expected_found
)
def test_invalid_type(self):
"""An invalid thumbnail type is never available."""
self._test_thumbnail("invalid", None, False)
@unittest.override_config(
{"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]}
)
def test_no_thumbnail_crop(self):
"""
Override the config to generate only scaled thumbnails, but request a cropped one.
"""
self._test_thumbnail("crop", None, False)
@unittest.override_config(
{"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]}
)
def test_no_thumbnail_scale(self):
"""
Override the config to generate only cropped thumbnails, but request a scaled one.
"""
self._test_thumbnail("scale", None, False)
def test_thumbnail_repeated_thumbnail(self):
"""Test that fetching the same thumbnail works, and deleting the on disk
thumbnail regenerates it.
"""
self._test_thumbnail(
"scale", self.test_image.expected_scaled, self.test_image.expected_found
)
if not self.test_image.expected_found:
return
# Fetching again should work, without re-requesting the image from the
# remote.
params = "?width=32&height=32&method=scale"
channel = make_request(
self.reactor,
FakeSite(self.thumbnail_resource),
"GET",
self.media_id + params,
shorthand=False,
await_result=False,
)
self.pump()
self.assertEqual(channel.code, 200)
if self.test_image.expected_scaled:
self.assertEqual(
channel.result["body"],
self.test_image.expected_scaled,
channel.result["body"],
)
# Deleting the thumbnail on disk then re-requesting it should work as
# Synapse should regenerate missing thumbnails.
origin, media_id = self.media_id.split("/")
info = self.get_success(self.store.get_cached_remote_media(origin, media_id))
file_id = info["filesystem_id"]
thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir(
origin, file_id
)
shutil.rmtree(thumbnail_dir, ignore_errors=True)
channel = make_request(
self.reactor,
FakeSite(self.thumbnail_resource),
"GET",
self.media_id + params,
shorthand=False,
await_result=False,
)
self.pump()
self.assertEqual(channel.code, 200)
if self.test_image.expected_scaled:
self.assertEqual(
channel.result["body"],
self.test_image.expected_scaled,
channel.result["body"],
)
def _test_thumbnail(self, method, expected_body, expected_found):
params = "?width=32&height=32&method=" + method
channel = make_request(
self.reactor,
FakeSite(self.thumbnail_resource),
"GET",
self.media_id + params,
shorthand=False,
await_result=False,
)
self.pump()
headers = {
b"Content-Length": [b"%d" % (len(self.test_image.data))],
b"Content-Type": [self.test_image.content_type],
}
self.fetches[0][0].callback(
(self.test_image.data, (len(self.test_image.data), headers))
)
self.pump()
if expected_found:
self.assertEqual(channel.code, 200)
if expected_body is not None:
self.assertEqual(
channel.result["body"], expected_body, channel.result["body"]
)
else:
# ensure that the result is at least some valid image
Image.open(BytesIO(channel.result["body"]))
else:
# A 404 with a JSON body.
self.assertEqual(channel.code, 404)
self.assertEqual(
channel.json_body,
{
"errcode": "M_NOT_FOUND",
"error": "Not found [b'example.com', b'12345']",
},
)
def test_x_robots_tag_header(self):
"""
Tests that the `X-Robots-Tag` header is present, which informs web crawlers
to not index, archive, or follow links in media.
"""
channel = self._req(b"inline; filename=out" + self.test_image.extension)
headers = channel.headers
self.assertEqual(
headers.getRawHeaders(b"X-Robots-Tag"),
[b"noindex, nofollow, noarchive, noimageindex"],
)
class TestSpamChecker:
"""A spam checker module that rejects all media that includes the bytes
`evil`.
"""
def __init__(self, config, api):
self.config = config
self.api = api
def parse_config(config):
return config
async def check_event_for_spam(self, foo):
return False # allow all events
async def user_may_invite(self, inviter_userid, invitee_userid, room_id):
return True # allow all invites
async def user_may_create_room(self, userid):
return True # allow all room creations
async def user_may_create_room_alias(self, userid, room_alias):
return True # allow all room aliases
async def user_may_publish_room(self, userid, room_id):
return True # allow publishing of all rooms
async def check_media_file_for_spam(self, file_wrapper, file_info) -> bool:
buf = BytesIO()
await file_wrapper.write_chunks_to(buf.write)
return b"evil" in buf.getvalue()
class SpamCheckerTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
admin.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.user = self.register_user("user", "pass")
self.tok = self.login("user", "pass")
# Allow for uploading and downloading to/from the media repo
self.media_repo = hs.get_media_repository_resource()
self.download_resource = self.media_repo.children[b"download"]
self.upload_resource = self.media_repo.children[b"upload"]
def default_config(self):
config = default_config("test")
config.update(
{
"spam_checker": [
{
"module": TestSpamChecker.__module__ + ".TestSpamChecker",
"config": {},
}
]
}
)
return config
def test_upload_innocent(self):
"""Attempt to upload some innocent data that should be allowed."""
image_data = unhexlify(
b"89504e470d0a1a0a0000000d4948445200000001000000010806"
b"0000001f15c4890000000a49444154789c63000100000500010d"
b"0a2db40000000049454e44ae426082"
)
self.helper.upload_media(
self.upload_resource, image_data, tok=self.tok, expect_code=200
)
def test_upload_ban(self):
"""Attempt to upload some data that includes bytes "evil", which should
get rejected by the spam checker.
"""
data = b"Some evil data"
self.helper.upload_media(
self.upload_resource, data, tok=self.tok, expect_code=400
)
| 33.738302 | 90 | 0.612216 |
4a213d85cc6b7a2b7712bd07675947f5d25a6f42
| 42,958 |
py
|
Python
|
tests/core/test_playback.py
|
queyenth/mopidy
|
c010d5d53a00bbc1d576e78433607a14211795e8
|
[
"Apache-2.0"
] | null | null | null |
tests/core/test_playback.py
|
queyenth/mopidy
|
c010d5d53a00bbc1d576e78433607a14211795e8
|
[
"Apache-2.0"
] | null | null | null |
tests/core/test_playback.py
|
queyenth/mopidy
|
c010d5d53a00bbc1d576e78433607a14211795e8
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import unittest
import mock
import pykka
from mopidy import backend, core
from mopidy.internal import deprecation
from mopidy.internal.models import PlaybackState
from mopidy.models import Track
from tests import dummy_audio
class TestPlaybackProvider(backend.PlaybackProvider):
def __init__(self, audio, backend):
super(TestPlaybackProvider, self).__init__(audio, backend)
self._call_limit = 10
self._call_count = 0
self._call_onetime = False
def reset_call_limit(self):
self._call_count = 0
self._call_onetime = False
def is_call_limit_reached(self):
return self._call_count > self._call_limit
def _translate_uri_call_limit(self, uri):
self._call_count += 1
if self._call_count > self._call_limit:
# return any url (not 'None') to stop the endless loop
return 'assert: call limit reached'
if 'limit_never' in uri:
# unplayable
return None
elif 'limit_one' in uri:
# one time playable
if self._call_onetime:
return None
self._call_onetime = True
return uri
def translate_uri(self, uri):
if 'error' in uri:
raise Exception(uri)
elif 'unplayable' in uri:
return None
elif 'limit' in uri:
return self._translate_uri_call_limit(uri)
else:
return uri
# TODO: Replace this with dummy_backend now that it uses a real
# playbackprovider Since we rely on our DummyAudio to actually emit events we
# need a "real" backend and not a mock so the right calls make it through to
# audio.
class TestBackend(pykka.ThreadingActor, backend.Backend):
uri_schemes = ['dummy']
def __init__(self, config, audio):
super(TestBackend, self).__init__()
self.playback = TestPlaybackProvider(audio=audio, backend=self)
class BaseTest(unittest.TestCase):
config = {'core': {'max_tracklist_length': 10000}}
tracks = [Track(uri='dummy:a', length=1234, name='foo'),
Track(uri='dummy:b', length=1234),
Track(uri='dummy:c', length=1234)]
def setUp(self): # noqa: N802
# TODO: use create_proxy helpers.
self.audio = dummy_audio.DummyAudio.start().proxy()
self.backend = TestBackend.start(
audio=self.audio, config=self.config).proxy()
self.core = core.Core(
audio=self.audio, backends=[self.backend], config=self.config)
self.playback = self.core.playback
# We don't have a core actor running, so call about to finish directly.
self.audio.set_about_to_finish_callback(
self.playback._on_about_to_finish)
with deprecation.ignore('core.tracklist.add:tracks_arg'):
self.core.tracklist.add(self.tracks)
self.events = []
self.patcher = mock.patch('mopidy.audio.listener.AudioListener.send')
self.send_mock = self.patcher.start()
def send(event, **kwargs):
self.events.append((event, kwargs))
self.send_mock.side_effect = send
def tearDown(self): # noqa: N802
pykka.ActorRegistry.stop_all()
self.patcher.stop()
def replay_events(self, until=None):
while self.events:
if self.events[0][0] == until:
break
event, kwargs = self.events.pop(0)
self.core.on_event(event, **kwargs)
def trigger_about_to_finish(self, replay_until=None):
self.replay_events()
callback = self.audio.get_about_to_finish_callback().get()
callback()
self.replay_events(until=replay_until)
class TestPlayHandling(BaseTest):
def test_get_current_tl_track_play(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.assertEqual(
self.core.playback.get_current_tl_track(), tl_tracks[0])
def test_get_current_track_play(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.assertEqual(
self.core.playback.get_current_track(), self.tracks[0])
def test_get_current_tlid_play(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.assertEqual(
self.core.playback.get_current_tlid(), tl_tracks[0].tlid)
def test_play_skips_to_next_on_unplayable_track(self):
"""Checks that we handle backend.change_track failing."""
tl_tracks = self.core.tracklist.get_tl_tracks()
self.audio.trigger_fake_playback_failure(tl_tracks[0].track.uri)
self.core.playback.play(tl_tracks[0])
self.replay_events()
current_tl_track = self.core.playback.get_current_tl_track()
self.assertEqual(tl_tracks[1], current_tl_track)
def test_resume_skips_to_next_on_unplayable_track(self):
"""Checks that we handle backend.change_track failing when
resuming playback."""
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.core.playback.pause()
self.audio.trigger_fake_playback_failure(tl_tracks[1].track.uri)
self.core.playback.next()
self.core.playback.resume()
self.replay_events()
current_tl_track = self.core.playback.get_current_tl_track()
self.assertEqual(tl_tracks[2], current_tl_track)
def test_play_tlid(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tlid=tl_tracks[1].tlid)
self.replay_events()
current_tl_track = self.core.playback.get_current_tl_track()
self.assertEqual(tl_tracks[1], current_tl_track)
class TestNextHandling(BaseTest):
def test_get_current_tl_track_next(self):
self.core.playback.play()
self.replay_events()
self.core.playback.next()
self.replay_events()
tl_tracks = self.core.tracklist.get_tl_tracks()
current_tl_track = self.core.playback.get_current_tl_track()
self.assertEqual(current_tl_track, tl_tracks[1])
def test_get_pending_tl_track_next(self):
self.core.playback.play()
self.replay_events()
self.core.playback.next()
tl_tracks = self.core.tracklist.get_tl_tracks()
self.assertEqual(self.core.playback._pending_tl_track, tl_tracks[1])
def test_get_current_track_next(self):
self.core.playback.play()
self.replay_events()
self.core.playback.next()
self.replay_events()
current_track = self.core.playback.get_current_track()
self.assertEqual(current_track, self.tracks[1])
def test_next_keeps_finished_track_in_tracklist(self):
tl_track = self.core.tracklist.get_tl_tracks()[0]
self.core.playback.play(tl_track)
self.replay_events()
self.core.playback.next()
self.replay_events()
self.assertIn(tl_track, self.core.tracklist.tl_tracks)
def test_next_skips_over_unplayable_track(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.audio.trigger_fake_playback_failure(tl_tracks[1].track.uri)
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.core.playback.next()
self.replay_events()
assert self.core.playback.get_current_tl_track() == tl_tracks[2]
def test_next_skips_over_change_track_error(self):
# Trigger an exception in translate_uri.
track = Track(uri='dummy:error', length=1234)
self.core.tracklist.add(tracks=[track], at_position=1)
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play()
self.replay_events()
self.core.playback.next()
self.replay_events()
assert self.core.playback.get_current_tl_track() == tl_tracks[2]
def test_next_skips_over_change_track_unplayable(self):
# Make translate_uri return None.
track = Track(uri='dummy:unplayable', length=1234)
self.core.tracklist.add(tracks=[track], at_position=1)
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play()
self.replay_events()
self.core.playback.next()
self.replay_events()
assert self.core.playback.get_current_tl_track() == tl_tracks[2]
class TestPreviousHandling(BaseTest):
# TODO Test previous() more
def test_get_current_tl_track_prev(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[1])
self.core.playback.previous()
self.replay_events()
self.assertEqual(
self.core.playback.get_current_tl_track(), tl_tracks[0])
def test_get_current_track_prev(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[1])
self.core.playback.previous()
self.replay_events()
self.assertEqual(
self.core.playback.get_current_track(), self.tracks[0])
def test_previous_keeps_finished_track_in_tracklist(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[1])
self.core.playback.previous()
self.replay_events()
self.assertIn(tl_tracks[1], self.core.tracklist.tl_tracks)
def test_previous_keeps_finished_track_even_in_consume_mode(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[1])
self.core.tracklist.consume = True
self.core.playback.previous()
self.replay_events()
self.assertIn(tl_tracks[1], self.core.tracklist.tl_tracks)
def test_previous_skips_over_unplayable_track(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.audio.trigger_fake_playback_failure(tl_tracks[1].track.uri)
self.core.playback.play(tl_tracks[2])
self.replay_events()
self.core.playback.previous()
self.replay_events()
assert self.core.playback.get_current_tl_track() == tl_tracks[0]
def test_previous_skips_over_change_track_error(self):
# Trigger an exception in translate_uri.
track = Track(uri='dummy:error', length=1234)
self.core.tracklist.add(tracks=[track], at_position=1)
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[2])
self.replay_events()
self.core.playback.previous()
self.replay_events()
assert self.core.playback.get_current_tl_track() == tl_tracks[0]
def test_previous_skips_over_change_track_unplayable(self):
# Makes translate_uri return None.
track = Track(uri='dummy:unplayable', length=1234)
self.core.tracklist.add(tracks=[track], at_position=1)
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[2])
self.replay_events()
self.core.playback.previous()
self.replay_events()
assert self.core.playback.get_current_tl_track() == tl_tracks[0]
class TestOnAboutToFinish(BaseTest):
def test_on_about_to_finish_keeps_finished_track_in_tracklist(self):
tl_track = self.core.tracklist.get_tl_tracks()[0]
self.core.playback.play(tl_track)
self.trigger_about_to_finish()
self.assertIn(tl_track, self.core.tracklist.tl_tracks)
def test_on_about_to_finish_skips_over_change_track_error(self):
# Trigger an exception in translate_uri.
track = Track(uri='dummy:error', length=1234)
self.core.tracklist.add(tracks=[track], at_position=1)
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.trigger_about_to_finish()
assert self.core.playback.get_current_tl_track() == tl_tracks[2]
def test_on_about_to_finish_skips_over_change_track_unplayable(self):
# Makes translate_uri return None.
track = Track(uri='dummy:unplayable', length=1234)
self.core.tracklist.add(tracks=[track], at_position=1)
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.trigger_about_to_finish()
assert self.core.playback.get_current_tl_track() == tl_tracks[2]
class TestConsumeHandling(BaseTest):
def test_next_in_consume_mode_removes_finished_track(self):
tl_track = self.core.tracklist.get_tl_tracks()[0]
self.core.playback.play(tl_track)
self.core.tracklist.set_consume(True)
self.replay_events()
self.core.playback.next()
self.replay_events()
self.assertNotIn(tl_track, self.core.tracklist.get_tl_tracks())
def test_next_in_consume_mode_removes_unplayable_track(self):
last_playable_tl_track = self.core.tracklist.get_tl_tracks()[-2]
unplayable_tl_track = self.core.tracklist.get_tl_tracks()[-1]
self.audio.trigger_fake_playback_failure(unplayable_tl_track.track.uri)
self.core.playback.play(last_playable_tl_track)
self.core.tracklist.set_consume(True)
self.core.playback.next()
self.replay_events()
self.assertNotIn(
unplayable_tl_track, self.core.tracklist.get_tl_tracks())
def test_on_about_to_finish_in_consume_mode_removes_finished_track(self):
tl_track = self.core.tracklist.get_tl_tracks()[0]
self.core.playback.play(tl_track)
self.core.tracklist.consume = True
self.trigger_about_to_finish()
self.assertNotIn(tl_track, self.core.tracklist.get_tl_tracks())
def test_next_in_consume_and_repeat_mode_returns_none_on_last_track(self):
self.core.playback.play()
self.core.tracklist.set_consume(True)
self.core.tracklist.set_repeat(True)
self.replay_events()
for track in self.core.tracklist.get_tl_tracks():
self.core.playback.next()
self.replay_events()
self.core.playback.next()
self.replay_events()
self.assertEqual(self.playback.get_state(), 'stopped')
class TestCurrentAndPendingTlTrack(BaseTest):
def test_get_current_tl_track_none(self):
self.assertEqual(
self.core.playback.get_current_tl_track(), None)
def test_get_current_tlid_none(self):
self.assertEqual(self.core.playback.get_current_tlid(), None)
def test_pending_tl_track_is_none(self):
self.core.playback.play()
self.replay_events()
self.assertEqual(self.playback._pending_tl_track, None)
def test_pending_tl_track_after_about_to_finish(self):
self.core.playback.play()
self.replay_events()
self.trigger_about_to_finish(replay_until='stream_changed')
self.assertEqual(self.playback._pending_tl_track.track.uri, 'dummy:b')
def test_pending_tl_track_after_stream_changed(self):
self.trigger_about_to_finish()
self.assertEqual(self.playback._pending_tl_track, None)
def test_current_tl_track_after_about_to_finish(self):
self.core.playback.play()
self.replay_events()
self.trigger_about_to_finish(replay_until='stream_changed')
self.assertEqual(self.playback.current_tl_track.track.uri, 'dummy:a')
def test_current_tl_track_after_stream_changed(self):
self.core.playback.play()
self.replay_events()
self.trigger_about_to_finish()
self.assertEqual(self.playback.current_tl_track.track.uri, 'dummy:b')
def test_current_tl_track_after_end_of_stream(self):
self.core.playback.play()
self.replay_events()
self.trigger_about_to_finish()
self.trigger_about_to_finish()
self.trigger_about_to_finish() # EOS
self.assertEqual(self.playback.current_tl_track, None)
@mock.patch(
'mopidy.core.playback.listener.CoreListener', spec=core.CoreListener)
class EventEmissionTest(BaseTest):
maxDiff = None # noqa: N815
def test_play_when_stopped_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.assertListEqual(
[
mock.call(
'playback_state_changed',
old_state='stopped', new_state='playing'),
mock.call(
'track_playback_started', tl_track=tl_tracks[0]),
],
listener_mock.send.mock_calls)
def test_play_when_paused_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.core.playback.pause()
self.replay_events()
listener_mock.reset_mock()
self.core.playback.play(tl_tracks[1])
self.replay_events()
self.assertListEqual(
[
mock.call(
'track_playback_ended',
tl_track=tl_tracks[0], time_position=mock.ANY),
mock.call(
'playback_state_changed',
old_state='paused', new_state='playing'),
mock.call(
'track_playback_started', tl_track=tl_tracks[1]),
],
listener_mock.send.mock_calls)
def test_play_when_playing_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
listener_mock.reset_mock()
self.core.playback.play(tl_tracks[2])
self.replay_events()
self.assertListEqual(
[
mock.call(
'track_playback_ended',
tl_track=tl_tracks[0], time_position=mock.ANY),
mock.call(
'playback_state_changed', old_state='playing',
new_state='playing'),
mock.call(
'track_playback_started', tl_track=tl_tracks[2]),
],
listener_mock.send.mock_calls)
def test_pause_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.core.playback.seek(1000)
listener_mock.reset_mock()
self.core.playback.pause()
self.assertListEqual(
[
mock.call(
'playback_state_changed',
old_state='playing', new_state='paused'),
mock.call(
'track_playback_paused',
tl_track=tl_tracks[0], time_position=1000),
],
listener_mock.send.mock_calls)
def test_resume_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.core.playback.pause()
self.core.playback.seek(1000)
listener_mock.reset_mock()
self.core.playback.resume()
self.assertListEqual(
[
mock.call(
'playback_state_changed',
old_state='paused', new_state='playing'),
mock.call(
'track_playback_resumed',
tl_track=tl_tracks[0], time_position=1000),
],
listener_mock.send.mock_calls)
def test_stop_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.core.playback.seek(1000)
self.replay_events()
listener_mock.reset_mock()
self.core.playback.stop()
self.replay_events()
self.assertListEqual(
[
mock.call(
'playback_state_changed',
old_state='playing', new_state='stopped'),
mock.call(
'track_playback_ended',
tl_track=tl_tracks[0], time_position=1000),
],
listener_mock.send.mock_calls)
def test_next_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.core.playback.seek(1000)
self.replay_events()
listener_mock.reset_mock()
self.core.playback.next()
self.replay_events()
self.assertListEqual(
[
mock.call(
'track_playback_ended',
tl_track=tl_tracks[0], time_position=mock.ANY),
mock.call(
'playback_state_changed',
old_state='playing', new_state='playing'),
mock.call(
'track_playback_started', tl_track=tl_tracks[1]),
],
listener_mock.send.mock_calls)
def test_next_emits_events_when_consume_mode_is_enabled(
self,
listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.tracklist.set_consume(True)
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.core.playback.seek(1000)
self.replay_events()
listener_mock.reset_mock()
self.core.playback.next()
self.replay_events()
self.assertListEqual(
[
mock.call(
'tracklist_changed'),
mock.call(
'track_playback_ended',
tl_track=tl_tracks[0], time_position=mock.ANY),
mock.call(
'playback_state_changed',
old_state='playing', new_state='playing'),
mock.call(
'track_playback_started', tl_track=tl_tracks[1]),
],
listener_mock.send.mock_calls)
def test_gapless_track_change_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
listener_mock.reset_mock()
self.trigger_about_to_finish()
self.assertListEqual(
[
mock.call(
'track_playback_ended',
tl_track=tl_tracks[0], time_position=mock.ANY),
mock.call(
'playback_state_changed',
old_state='playing', new_state='playing'),
mock.call(
'track_playback_started', tl_track=tl_tracks[1]),
],
listener_mock.send.mock_calls)
def test_seek_emits_seeked_event(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
listener_mock.reset_mock()
self.core.playback.seek(1000)
self.replay_events()
listener_mock.send.assert_called_once_with(
'seeked', time_position=1000)
def test_seek_past_end_of_track_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
listener_mock.reset_mock()
self.core.playback.seek(self.tracks[0].length * 5)
self.replay_events()
self.assertListEqual(
[
mock.call(
'track_playback_ended',
tl_track=tl_tracks[0], time_position=mock.ANY),
mock.call(
'playback_state_changed',
old_state='playing', new_state='playing'),
mock.call(
'track_playback_started', tl_track=tl_tracks[1]),
],
listener_mock.send.mock_calls)
def test_seek_race_condition_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.trigger_about_to_finish(replay_until='stream_changed')
self.replay_events()
listener_mock.reset_mock()
self.core.playback.seek(1000)
self.replay_events()
# When we trigger seek after an about to finish the other code that
# emits track stopped/started and playback state changed events gets
# triggered as we have to switch back to the previous track.
# The correct behavior would be to only emit seeked.
self.assertListEqual(
[mock.call('seeked', time_position=1000)],
listener_mock.send.mock_calls)
def test_previous_emits_events(self, listener_mock):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[1])
self.replay_events()
listener_mock.reset_mock()
self.core.playback.previous()
self.replay_events()
self.assertListEqual(
[
mock.call(
'track_playback_ended',
tl_track=tl_tracks[1], time_position=mock.ANY),
mock.call(
'playback_state_changed',
old_state='playing', new_state='playing'),
mock.call(
'track_playback_started', tl_track=tl_tracks[0]),
],
listener_mock.send.mock_calls)
class TestUnplayableURI(BaseTest):
tracks = [
Track(uri='unplayable://'),
Track(uri='dummy:b'),
]
def setUp(self): # noqa: N802
super(TestUnplayableURI, self).setUp()
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback._set_current_tl_track(tl_tracks[0])
def test_play_skips_to_next_if_track_is_unplayable(self):
self.core.playback.play()
self.replay_events()
current_track = self.core.playback.get_current_track()
self.assertEqual(current_track, self.tracks[1])
def test_pause_changes_state_even_if_track_is_unplayable(self):
self.core.playback.pause()
self.assertEqual(self.core.playback.state, core.PlaybackState.PAUSED)
def test_resume_does_nothing_if_track_is_unplayable(self):
self.core.playback.state = core.PlaybackState.PAUSED
self.core.playback.resume()
self.assertEqual(self.core.playback.state, core.PlaybackState.PAUSED)
def test_stop_changes_state_even_if_track_is_unplayable(self):
self.core.playback.state = core.PlaybackState.PAUSED
self.core.playback.stop()
self.assertEqual(self.core.playback.state, core.PlaybackState.STOPPED)
def test_time_position_returns_0_if_track_is_unplayable(self):
result = self.core.playback.time_position
self.assertEqual(result, 0)
def test_seek_fails_for_unplayable_track(self):
self.core.playback.state = core.PlaybackState.PLAYING
success = self.core.playback.seek(1000)
self.assertFalse(success)
class SeekTest(BaseTest):
def test_seek_normalizes_negative_positions_to_zero(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.core.playback.seek(-100) # Dummy audio doesn't progress time.
self.assertEqual(0, self.core.playback.get_time_position())
def test_seek_fails_for_track_without_duration(self):
track = self.tracks[0].replace(length=None)
self.core.tracklist.clear()
self.core.tracklist.add([track])
self.core.playback.play()
self.replay_events()
self.assertFalse(self.core.playback.seek(1000))
self.assertEqual(0, self.core.playback.get_time_position())
def test_seek_play_stay_playing(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.core.playback.seek(1000)
self.assertEqual(self.core.playback.state, core.PlaybackState.PLAYING)
def test_seek_paused_stay_paused(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.core.playback.pause()
self.replay_events()
self.core.playback.seek(1000)
self.assertEqual(self.core.playback.state, core.PlaybackState.PAUSED)
def test_seek_race_condition_after_about_to_finish(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.trigger_about_to_finish(replay_until='stream_changed')
self.core.playback.seek(1000)
self.replay_events()
current_tl_track = self.core.playback.get_current_tl_track()
self.assertEqual(current_tl_track, tl_tracks[0])
class TestStream(BaseTest):
def test_get_stream_title_before_playback(self):
self.assertEqual(self.playback.get_stream_title(), None)
def test_get_stream_title_during_playback(self):
self.core.playback.play()
self.replay_events()
self.assertEqual(self.playback.get_stream_title(), None)
def test_get_stream_title_during_playback_with_tags_change(self):
self.core.playback.play()
self.audio.trigger_fake_tags_changed({'title': ['foobar']}).get()
self.replay_events()
self.assertEqual(self.playback.get_stream_title(), 'foobar')
def test_get_stream_title_during_playback_with_tags_unchanged(self):
self.core.playback.play()
self.audio.trigger_fake_tags_changed({'title': ['foo']}).get()
self.replay_events()
assert self.playback.get_stream_title() is None
def test_get_stream_title_after_next(self):
self.core.playback.play()
self.audio.trigger_fake_tags_changed({'title': ['foobar']}).get()
self.replay_events()
self.core.playback.next()
self.replay_events()
self.assertEqual(self.playback.get_stream_title(), None)
def test_get_stream_title_after_next_with_tags_change(self):
self.core.playback.play()
self.audio.trigger_fake_tags_changed({'title': ['foo']}).get()
self.replay_events()
self.core.playback.next()
self.audio.trigger_fake_tags_changed({'title': ['bar']}).get()
self.replay_events()
self.assertEqual(self.playback.get_stream_title(), 'bar')
def test_get_stream_title_after_stop(self):
self.core.playback.play()
self.audio.trigger_fake_tags_changed({'title': ['foobar']}).get()
self.replay_events()
self.core.playback.stop()
self.replay_events()
self.assertEqual(self.playback.get_stream_title(), None)
class TestBackendSelection(unittest.TestCase):
def setUp(self): # noqa: N802
config = {
'core': {
'max_tracklist_length': 10000,
}
}
self.backend1 = mock.Mock()
self.backend1.uri_schemes.get.return_value = ['dummy1']
self.playback1 = mock.Mock(spec=backend.PlaybackProvider)
self.backend1.playback = self.playback1
self.backend2 = mock.Mock()
self.backend2.uri_schemes.get.return_value = ['dummy2']
self.playback2 = mock.Mock(spec=backend.PlaybackProvider)
self.backend2.playback = self.playback2
self.tracks = [
Track(uri='dummy1:a', length=40000),
Track(uri='dummy2:a', length=40000),
]
self.core = core.Core(config, mixer=None, backends=[
self.backend1, self.backend2])
self.tl_tracks = self.core.tracklist.add(self.tracks)
def trigger_stream_changed(self):
pending = self.core.playback._pending_tl_track
if pending:
self.core.stream_changed(uri=pending.track.uri)
else:
self.core.stream_changed(uri=None)
def test_play_selects_dummy1_backend(self):
self.core.playback.play(self.tl_tracks[0])
self.trigger_stream_changed()
self.playback1.prepare_change.assert_called_once_with()
self.playback1.change_track.assert_called_once_with(self.tracks[0])
self.playback1.play.assert_called_once_with()
self.assertFalse(self.playback2.play.called)
def test_play_selects_dummy2_backend(self):
self.core.playback.play(self.tl_tracks[1])
self.trigger_stream_changed()
self.assertFalse(self.playback1.play.called)
self.playback2.prepare_change.assert_called_once_with()
self.playback2.change_track.assert_called_once_with(self.tracks[1])
self.playback2.play.assert_called_once_with()
def test_pause_selects_dummy1_backend(self):
self.core.playback.play(self.tl_tracks[0])
self.trigger_stream_changed()
self.core.playback.pause()
self.playback1.pause.assert_called_once_with()
self.assertFalse(self.playback2.pause.called)
def test_pause_selects_dummy2_backend(self):
self.core.playback.play(self.tl_tracks[1])
self.trigger_stream_changed()
self.core.playback.pause()
self.assertFalse(self.playback1.pause.called)
self.playback2.pause.assert_called_once_with()
def test_resume_selects_dummy1_backend(self):
self.core.playback.play(self.tl_tracks[0])
self.trigger_stream_changed()
self.core.playback.pause()
self.core.playback.resume()
self.playback1.resume.assert_called_once_with()
self.assertFalse(self.playback2.resume.called)
def test_resume_selects_dummy2_backend(self):
self.core.playback.play(self.tl_tracks[1])
self.trigger_stream_changed()
self.core.playback.pause()
self.core.playback.resume()
self.assertFalse(self.playback1.resume.called)
self.playback2.resume.assert_called_once_with()
def test_stop_selects_dummy1_backend(self):
self.core.playback.play(self.tl_tracks[0])
self.trigger_stream_changed()
self.core.playback.stop()
self.trigger_stream_changed()
self.playback1.stop.assert_called_once_with()
self.assertFalse(self.playback2.stop.called)
def test_stop_selects_dummy2_backend(self):
self.core.playback.play(self.tl_tracks[1])
self.trigger_stream_changed()
self.core.playback.stop()
self.trigger_stream_changed()
self.assertFalse(self.playback1.stop.called)
self.playback2.stop.assert_called_once_with()
def test_seek_selects_dummy1_backend(self):
self.core.playback.play(self.tl_tracks[0])
self.trigger_stream_changed()
self.core.playback.seek(10000)
self.playback1.seek.assert_called_once_with(10000)
self.assertFalse(self.playback2.seek.called)
def test_seek_selects_dummy2_backend(self):
self.core.playback.play(self.tl_tracks[1])
self.trigger_stream_changed()
self.core.playback.seek(10000)
self.assertFalse(self.playback1.seek.called)
self.playback2.seek.assert_called_once_with(10000)
def test_time_position_selects_dummy1_backend(self):
self.core.playback.play(self.tl_tracks[0])
self.trigger_stream_changed()
self.core.playback.time_position
self.playback1.get_time_position.assert_called_once_with()
self.assertFalse(self.playback2.get_time_position.called)
def test_time_position_selects_dummy2_backend(self):
self.core.playback.play(self.tl_tracks[1])
self.trigger_stream_changed()
self.core.playback.time_position
self.assertFalse(self.playback1.get_time_position.called)
self.playback2.get_time_position.assert_called_once_with()
class TestCorePlaybackWithOldBackend(unittest.TestCase):
def test_type_error_from_old_backend_does_not_crash_core(self):
config = {
'core': {
'max_tracklist_length': 10000,
}
}
b = mock.Mock()
b.actor_ref.actor_class.__name__ = 'DummyBackend'
b.uri_schemes.get.return_value = ['dummy1']
b.playback = mock.Mock(spec=backend.PlaybackProvider)
b.playback.play.side_effect = TypeError
b.library.lookup.return_value.get.return_value = [
Track(uri='dummy1:a', length=40000)]
c = core.Core(config, mixer=None, backends=[b])
c.tracklist.add(uris=['dummy1:a'])
c.playback.play() # No TypeError == test passed.
b.playback.play.assert_called_once_with()
class TestBug1177Regression(unittest.TestCase):
def test(self):
config = {
'core': {
'max_tracklist_length': 10000,
}
}
b = mock.Mock()
b.uri_schemes.get.return_value = ['dummy']
b.playback = mock.Mock(spec=backend.PlaybackProvider)
b.playback.change_track.return_value.get.return_value = True
b.playback.play.return_value.get.return_value = True
track1 = Track(uri='dummy:a', length=40000)
track2 = Track(uri='dummy:b', length=40000)
c = core.Core(config, mixer=None, backends=[b])
c.tracklist.add([track1, track2])
c.playback.play()
b.playback.change_track.assert_called_once_with(track1)
b.playback.change_track.reset_mock()
c.playback.pause()
c.playback.next()
b.playback.change_track.assert_called_once_with(track2)
class TestCorePlaybackSaveLoadState(BaseTest):
def test_save(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[1])
self.replay_events()
state = PlaybackState(
time_position=0, state='playing', tlid=tl_tracks[1].tlid)
value = self.core.playback._save_state()
self.assertEqual(state, value)
def test_load(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.stop()
self.replay_events()
self.assertEqual('stopped', self.core.playback.get_state())
state = PlaybackState(
time_position=0, state='playing', tlid=tl_tracks[2].tlid)
coverage = ['play-last']
self.core.playback._load_state(state, coverage)
self.replay_events()
self.assertEqual('playing', self.core.playback.get_state())
self.assertEqual(tl_tracks[2],
self.core.playback.get_current_tl_track())
def test_load_not_covered(self):
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.stop()
self.replay_events()
self.assertEqual('stopped', self.core.playback.get_state())
state = PlaybackState(
time_position=0, state='playing', tlid=tl_tracks[2].tlid)
coverage = ['other']
self.core.playback._load_state(state, coverage)
self.replay_events()
self.assertEqual('stopped', self.core.playback.get_state())
self.assertEqual(None,
self.core.playback.get_current_tl_track())
def test_load_invalid_type(self):
with self.assertRaises(TypeError):
self.core.playback._load_state(11, None)
def test_load_none(self):
self.core.playback._load_state(None, None)
class TestBug1352Regression(BaseTest):
tracks = [
Track(uri='dummy:a', length=40000),
Track(uri='dummy:b', length=40000),
]
def test_next_when_paused_updates_history(self):
self.core.history._add_track = mock.Mock()
self.core.tracklist._mark_playing = mock.Mock()
tl_tracks = self.core.tracklist.get_tl_tracks()
self.playback.play()
self.replay_events()
self.core.history._add_track.assert_called_once_with(self.tracks[0])
self.core.tracklist._mark_playing.assert_called_once_with(tl_tracks[0])
self.core.history._add_track.reset_mock()
self.core.tracklist._mark_playing.reset_mock()
self.playback.pause()
self.playback.next()
self.replay_events()
self.core.history._add_track.assert_called_once_with(self.tracks[1])
self.core.tracklist._mark_playing.assert_called_once_with(tl_tracks[1])
class TestEndlessLoop(BaseTest):
tracks_play = [
Track(uri='dummy:limit_never:a'),
Track(uri='dummy:limit_never:b')
]
tracks_other = [
Track(uri='dummy:limit_never:a'),
Track(uri='dummy:limit_one'),
Track(uri='dummy:limit_never:b')
]
def test_play(self):
self.core.tracklist.clear()
self.core.tracklist.add(self.tracks_play)
self.backend.playback.reset_call_limit().get()
self.core.tracklist.set_repeat(True)
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[0])
self.replay_events()
self.assertFalse(self.backend.playback.is_call_limit_reached().get())
def test_next(self):
self.core.tracklist.clear()
self.core.tracklist.add(self.tracks_other)
self.backend.playback.reset_call_limit().get()
self.core.tracklist.set_repeat(True)
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[1])
self.replay_events()
self.core.playback.next()
self.replay_events()
self.assertFalse(self.backend.playback.is_call_limit_reached().get())
def test_previous(self):
self.core.tracklist.clear()
self.core.tracklist.add(self.tracks_other)
self.backend.playback.reset_call_limit().get()
self.core.tracklist.set_repeat(True)
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[1])
self.replay_events()
self.core.playback.previous()
self.replay_events()
self.assertFalse(self.backend.playback.is_call_limit_reached().get())
def test_on_about_to_finish(self):
self.core.tracklist.clear()
self.core.tracklist.add(self.tracks_other)
self.backend.playback.reset_call_limit().get()
self.core.tracklist.set_repeat(True)
tl_tracks = self.core.tracklist.get_tl_tracks()
self.core.playback.play(tl_tracks[1])
self.replay_events()
self.trigger_about_to_finish()
self.assertFalse(self.backend.playback.is_call_limit_reached().get())
| 32.842508 | 79 | 0.654127 |
4a213da75f8561f233d4f33fc4b9f7dfc94097a8
| 2,403 |
py
|
Python
|
openGaussBase/testcase/GUC/CONNECTIONAUTHENTICATION/Opengauss_Function_Guc_Connectionauthentication_Case0147.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/GUC/CONNECTIONAUTHENTICATION/Opengauss_Function_Guc_Connectionauthentication_Case0147.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/GUC/CONNECTIONAUTHENTICATION/Opengauss_Function_Guc_Connectionauthentication_Case0147.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC
Case Name : auth_iteration_count参数使用gs_guc set设置为空值
Description : 1、查看auth_iteration_count默认值;
source /opt/opengauss810/env
gs_guc check -D {cluster/dn1} -c auth_iteration_count
2、使用设置gs_guc set设置auth_iteration_count为空值
gs_guc set -D {cluster/dn1} -c "auth_iteration_count=' '"
gs_guc set -N all -D {cluster/dn1} -c "auth_iteration_count=' '"
Expect : 1、显示默认值;
2、参数修改失败;
History :
"""
import unittest
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
COMMONSH = CommonSH('PrimaryDbUser')
class GucTest(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.constant = Constant()
self.log.info('==Guc_Connectionauthentication_Case0147开始==')
status = COMMONSH.get_db_cluster_status()
self.assertTrue("Normal" in status or "Degraded" in status)
def test_startdb(self):
self.log.info("查询该参数默认值")
result = COMMONSH.execute_gsguc(
'check', '10000', 'auth_iteration_count')
self.assertTrue(result)
self.log.info("设置auth_iteration_count为空值,重启使其生效")
result = COMMONSH.execute_gsguc('set', self.constant.TPCC_ERROR,
f'auth_iteration_count=\' \'')
self.assertTrue(result)
def tearDown(self):
self.log.info("恢复默认值")
result = COMMONSH.execute_gsguc('set', self.constant.GSGUC_SUCCESS_MSG,
f'auth_iteration_count=10000')
self.log.info(result)
COMMONSH.restart_db_cluster()
status = COMMONSH.get_db_cluster_status()
self.assertTrue("Normal" in status or "Degraded" in status)
self.log.info('==Guc_Connectionauthentication_Case0147完成==')
| 35.338235 | 84 | 0.667915 |
4a213ddc075250f7922f857fe5cfe4f8195b39d7
| 1,876 |
py
|
Python
|
docs/source/neo/example/sample4.py
|
BSathvik/neo-python
|
90eddde0128f8ba41207d88fd68041682e307315
|
[
"MIT"
] | 387 |
2017-07-17T18:25:54.000Z
|
2021-11-18T06:19:47.000Z
|
docs/source/neo/example/sample4.py
|
BSathvik/neo-python
|
90eddde0128f8ba41207d88fd68041682e307315
|
[
"MIT"
] | 967 |
2017-08-19T15:48:03.000Z
|
2021-06-01T21:42:39.000Z
|
docs/source/neo/example/sample4.py
|
BSathvik/neo-python
|
90eddde0128f8ba41207d88fd68041682e307315
|
[
"MIT"
] | 286 |
2017-07-17T03:44:36.000Z
|
2021-11-18T06:19:32.000Z
|
from boa.interop.Neo.Runtime import GetTrigger,CheckWitness
from boa.interop.Neo.Storage import Get,Put,Delete,GetContext
from boa.interop.Neo.TriggerType import Application, Verification
OWNER = b'\x03\x19\xe0)\xb9%\x85w\x90\xe4\x17\x85\xbe\x9c\xce\xc6\xca\xb1\x98\x96'
def Main(operation, addr, value):
print("Running Sample v4")
trigger = GetTrigger()
print(trigger)
# This determines that the SC is runnning in Verification mode
# This determines whether the TX will be relayed to the rest of the network
# The `Verification` portion of SC is *read-only*, so calls to `Storage.Put` will fail.
# You can, however, use `Storage.Get`
if trigger == Verification():
print("Running Verification!")
# This routine is: if the invoker ( or the Address that signed the contract ) is not OWNER,
# Then we return False, and the TX will not be relayed to the network
# Otherwise, we know the owner address signed the TX and return True
is_owner = CheckWitness(OWNER)
if is_owner:
print("Is Owner!")
return True
print("Not Owner")
return False
elif trigger == Application():
print("Running Application!")
if not is_valid_addr(addr):
print("Not Valid Address")
return False
ctx = GetContext()
if operation == 'add':
balance = Get(ctx, addr)
new_balance = balance + value
Put(ctx, addr, new_balance)
return new_balance
elif operation == 'remove':
balance = Get(ctx, addr)
Put(ctx, addr, balance - value)
return balance - value
elif operation == 'balance':
return Get(ctx, addr)
return False
def is_valid_addr(addr):
if len(addr) == 20:
return True
return False
| 28 | 99 | 0.624733 |
4a213edb3ecbd1f0d886d4a2085a829c062eef39
| 6,135 |
py
|
Python
|
tests/test_parameters.py
|
pywr/pywr-next
|
b3f95da7b57a3ee62bb9320f6ceaa9bbadb0548c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_parameters.py
|
pywr/pywr-next
|
b3f95da7b57a3ee62bb9320f6ceaa9bbadb0548c
|
[
"Apache-2.0"
] | 5 |
2021-07-29T21:51:17.000Z
|
2022-02-07T13:15:03.000Z
|
tests/test_parameters.py
|
pywr/pywr-next
|
b3f95da7b57a3ee62bb9320f6ceaa9bbadb0548c
|
[
"Apache-2.0"
] | 1 |
2021-02-06T18:54:38.000Z
|
2021-02-06T18:54:38.000Z
|
import pytest
import numpy as np
from pywr.model import Model
@pytest.fixture()
def simple_data():
return {
"timestepper": {"start": "2020-01-01", "end": "2020-12-31", "timestep": 1},
"nodes": [
{"name": "input1", "type": "input"},
{"name": "link1", "type": "link"},
{
"name": "output1",
"type": "output",
"cost": -10.0,
"max_flow": 10.0,
},
],
"edges": [
{"from_node": "input1", "to_node": "link1"},
{"from_node": "link1", "to_node": "output1"},
],
"parameters": [],
}
@pytest.fixture()
def simple_storage_data():
return {
"timestepper": {"start": "2020-01-01", "end": "2020-01-31", "timestep": 1},
"nodes": [
{"name": "input1", "type": "input"},
{
"name": "storage1",
"type": "storage",
"max_volume": 20,
"initial_volume": 20,
},
{
"name": "output1",
"type": "output",
"cost": -10.0,
"max_flow": 1.0,
},
],
"edges": [
{"from_node": "input1", "to_node": "storage1"},
{"from_node": "storage1", "to_node": "output1"},
],
"parameters": [],
}
class TestAggregatedParameter:
__test_funcs__ = {
"sum": np.sum,
"product": np.product,
"mean": np.mean,
"max": np.max,
"min": np.min,
}
@pytest.mark.parametrize("agg_func", ["sum", "product", "mean", "max", "min"])
def test_two_parameters(self, simple_data, agg_func):
"""Test an aggregated node with two parameters."""
test_func = self.__test_funcs__[agg_func]
simple_data["parameters"] = [
{"name": "p1", "type": "constant", "value": 10.0},
{"name": "p2", "type": "constant", "value": 10.0},
{
"name": "agg",
"type": "aggregated",
"agg_func": agg_func,
"parameters": ["p1", "p2"],
},
]
model = Model(**simple_data)
model.recorders.add(
**{
"name": "assert",
"type": "assertion",
"component": "agg",
"metric": "parameter",
"values": [test_func([10.0, 10.0])] * 366,
}
)
assert len(model.parameters) == 3
model.run()
def test_ordering(self, simple_data):
"""Test that a model loads if the aggregated parameter is defined before its dependencies."""
simple_data["parameters"] = [
{
"name": "agg",
"type": "aggregated",
"agg_func": "sum",
"parameters": ["p1", "p2"],
},
{"name": "p1", "type": "constant", "value": 10.0},
{"name": "p2", "type": "constant", "value": 10.0},
]
model = Model(**simple_data)
assert len(model.parameters) == 3
model.run()
def test_cycle_error(self, simple_data):
"""Test that a cycle in parameter dependencies does not load."""
simple_data["parameters"] = [
{
"name": "agg1",
"type": "aggregated",
"agg_func": "sum",
"parameters": ["p1", "agg2"],
},
{"name": "p1", "type": "constant", "value": 10.0},
{
"name": "agg2",
"type": "aggregated",
"agg_func": "sum",
"parameters": ["p1", "agg1"],
},
]
model = Model(**simple_data)
assert len(model.parameters) == 3
with pytest.raises(RuntimeError):
model.run()
class TestControlCurvePiecewiseInterpolatedParameter:
def test_basic(self, simple_storage_data):
"""Basic functional test of `ControlCurvePiecewiseInterpolatedParameter`"""
simple_storage_data["parameters"] = [
{"name": "cc1", "type": "constant", "value": 0.8},
{"name": "cc2", "type": "constant", "value": 0.5},
{
"name": "cc_interp",
"type": "ControlCurvePiecewiseInterpolated",
"storage_node": "storage1",
"control_curves": ["cc1", "cc2"],
"values": [
[10.0, 1.0],
[0.0, 0.0],
[-1.0, -10.0],
],
},
]
model = Model(**simple_storage_data)
model.recorders.add(
**{
"name": "assert",
"type": "assertion",
"component": "cc_interp",
"metric": "parameter",
"values": [
10.0, # 20 Ml (full)
1.0 + 9.0 * 0.15 / 0.2, # 19 Ml (95%)
1.0 + 9.0 * 0.10 / 0.2, # 18 Ml (90%)
1.0 + 9.0 * 0.05 / 0.2, # 17 Ml (85%)
0.0, # 16 Ml (80%)
0.0, # 15 Ml (75%)
0.0, # 14 Ml (70%)
0.0, # 13 Ml (65%)
0.0, # 12 Ml (60%)
0.0, # 11 Ml (55%)
-1.0, # 10 Ml (50%)
-1.0 - 9.0 * 0.05 / 0.5, # 09 Ml (45%)
-1.0 - 9.0 * 0.10 / 0.5, # 09 Ml (40%)
-1.0 - 9.0 * 0.15 / 0.5, # 09 Ml (35%)
-1.0 - 9.0 * 0.20 / 0.5, # 09 Ml (30%)
-1.0 - 9.0 * 0.25 / 0.5, # 09 Ml (25%)
-1.0 - 9.0 * 0.30 / 0.5, # 09 Ml (20%)
-1.0 - 9.0 * 0.35 / 0.5, # 09 Ml (15%)
-1.0 - 9.0 * 0.40 / 0.5, # 09 Ml (10%)
-1.0 - 9.0 * 0.45 / 0.5, # 09 Ml (05%)
-10.0, # 09 Ml (00%)
]
+ [-10.0] * 10,
}
)
assert len(model.parameters) == 3
model.run()
| 31.623711 | 101 | 0.392502 |
4a213fb447debf40c7d79b75d3d397c623221d32
| 6,159 |
py
|
Python
|
riiid/validation.py
|
fabien-vavrand/kaggle-riiid
|
3302955980e0d4bb2dbc72bcd369000b0724f1e7
|
[
"MIT"
] | null | null | null |
riiid/validation.py
|
fabien-vavrand/kaggle-riiid
|
3302955980e0d4bb2dbc72bcd369000b0724f1e7
|
[
"MIT"
] | null | null | null |
riiid/validation.py
|
fabien-vavrand/kaggle-riiid
|
3302955980e0d4bb2dbc72bcd369000b0724f1e7
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
import itertools
import logging
class StartPicker:
def __init__(self, N, n):
self.N = N
self.n = n
self.length = N - n + 1
def get_prob(self, i):
if i < self.n:
return np.sqrt(self.n / (i + 1))
elif i > self.length - self.n - 1:
return np.sqrt(self.n / (self.length - i))
else:
return 1
def get_probs(self):
p = [self.get_prob(i) for i in range(self.length)]
psum = np.sum(p)
p = [prob / psum for prob in p]
return p
def random_start(self):
choices = list(range(self.length))
p = self.get_probs()
return np.random.choice(choices, size=1, replace=False, p=p)[0]
def generate_test(X, size, N=10000, seed=0):
np.random.seed(seed)
users = X.groupby('user_id')['timestamp'].max().reset_index()
users.columns = ['user_id', 'duration']
users['duration'] = users['duration'] / (1000 * 60 * 60 * 24)
# we pick a random initial timestamp for each user, so that their full period is within the riiid time period
total_duration = np.ceil(users['duration'].max())
users['random_period'] = total_duration - users['duration']
users['random'] = np.random.random(len(users))
users['appearance'] = users['random'] * users['random_period']
users['appearance'] = users['appearance'] * (1000 * 60 * 60 * 24)
users['initial_timestamp'] = np.round(users['appearance'], 0).astype(np.int64)
# We then compute the global timestamp for each task
X = pd.merge(X, users[['user_id', 'initial_timestamp']], on=['user_id'])
X['global_timestamp'] = X['initial_timestamp'] + X['timestamp']
# We pick the last "size" rows sorted on this global timestamp
test = X.groupby(['user_id', 'task_container_id', 'global_timestamp'], sort=False).size()
test = test.reset_index()
test.rename(columns={0: 'n'}, inplace=True)
test = test.sort_values('global_timestamp', ascending=False)
test['cumn'] = test['n'].rolling(len(test), min_periods=1).sum()
test = test[test['cumn'] <= size]
test = test.sort_values('global_timestamp')
test = test.drop(columns=['n', 'cumn'])
test = generate_batches(test, N)
return test
def generate_batches(test, N):
# We build more or less equal size batches
groups = test.groupby('user_id')
batches = [[] for _ in range(N)]
for user_id, user_test in groups:
n = len(user_test)
i = StartPicker(N, n).random_start()
for j, row in enumerate(user_test.itertuples(index=False)):
batches[i + j].append({
'batch_id': i + j,
'user_id': row.user_id,
'task_container_id': row.task_container_id
})
batches = itertools.chain.from_iterable(batches)
return pd.DataFrame(batches)
def build_test_batches(X):
# Expected columns for tests:
COLUMNS = ['row_id', 'timestamp', 'user_id', 'content_id', 'content_type_id',
'task_container_id', 'prior_question_elapsed_time',
'prior_question_had_explanation', 'prior_group_answers_correct',
'prior_group_responses', 'answered_correctly']
X = X[-pd.isnull(X['batch_id'])].copy().reset_index(drop=True)
batches = X.groupby('batch_id')
batches = [batch.copy() for _, batch in batches]
for i, batch in enumerate(batches):
if i == 0:
prior_user_answer = []
prior_answered_correctly = []
else:
prior_user_answer = list(batches[i - 1]['user_answer'].values)
prior_answered_correctly = list(batches[i - 1]['answered_correctly'].values)
batches[i - 1] = batches[i - 1][COLUMNS]
batch['row_id'] = 0
batch.reset_index(drop=True, inplace=True)
batch['prior_group_answers_correct'] = [str(prior_answered_correctly)] + [np.nan] * (len(batch) - 1)
batch['prior_group_responses'] = [str(prior_user_answer)] + [np.nan] * (len(batch) - 1)
if i == len(batches) - 1:
batches[i] = batches[i][COLUMNS]
return batches
def build_train(X):
X = X[pd.isnull(X['batch_id'])].copy().reset_index(drop=True)
X = X.drop(columns=['batch_id'])
return X
def merge_test(train, test, validation_ratio=None, return_batches=True):
X = pd.merge(train, test, on=['user_id', 'task_container_id'], how='left')
if validation_ratio is not None:
batches = sorted(test['batch_id'].unique())
n_validation_batches = int(len(batches) * validation_ratio)
validation_batches = batches[-n_validation_batches:]
validation = X[X['batch_id'].isin(validation_batches)]
X = X[-X['batch_id'].isin(validation_batches)].copy()
train = X[pd.isnull(X['batch_id'])]
test = X[-pd.isnull(X['batch_id'])]
train_size = len(train)
test_size = len(test)
test_ratio = test_size / (train_size + test_size)
logging.info(f'Train size = {train_size}, test size = {test_size} [{test_ratio:.1%}]')
if validation_ratio is not None:
validation_size = len(validation)
logging.info(f'Validation size = {validation_size}')
users = set(X['user_id'].values)
train_users = set(train['user_id'].values)
test_users = set(test['user_id'].values)
known_users = test_users.intersection(train_users)
new_users = test_users.difference(train_users)
logging.info(f'{len(users)} users, o/w {len(train_users)} in train and {len(test_users)} in test ({len(known_users)} existing, {len(new_users)} new)')
if validation_ratio is not None:
train_test_users = set(X['user_id'].values)
validation_users = set(validation['user_id'].values)
known_users = validation_users.intersection(train_test_users)
new_users = validation_users.difference(train_test_users)
logging.info(f'{len(validation_users)} users in validation ({len(known_users)} existing, {len(new_users)} new)')
if validation_ratio is None:
return X
else:
if return_batches:
validation = build_test_batches(validation)
return X, validation
| 37.554878 | 154 | 0.637279 |
4a21400c0b3a35af0d4545fa2ae1537a7d197a4e
| 1,023 |
py
|
Python
|
synd.py
|
easybe/synd
|
895a0958b5e50d6166ff28b32c7549bad7ebfff0
|
[
"MIT"
] | null | null | null |
synd.py
|
easybe/synd
|
895a0958b5e50d6166ff28b32c7549bad7ebfff0
|
[
"MIT"
] | null | null | null |
synd.py
|
easybe/synd
|
895a0958b5e50d6166ff28b32c7549bad7ebfff0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# SPDX-License-Identifier: MIT
"""Tiny Synology DiskStation daemon
The service will turn off the blinking LED at startup and shut the
system down when the power button is pressed.
"""
import os
import signal
import sys
from serial import Serial
UART_PORT = "/dev/ttyS1"
POWER_BUTTON_PRESSED = b'0'
CMD_LED_POWER_BLINK = b'5'
CMD_LED_POWER_OFF = b'6'
CMD_RCPOWERON = b'q'
def sigterm_handler(_signo, _stack_frame):
sys.exit(0)
def wait_for_button_press(uart):
while True:
in_byte = uart.read(1)
if in_byte == POWER_BUTTON_PRESSED:
print("Triggering system shutdown...")
os.system('/usr/sbin/poweroff')
if __name__ == '__main__':
signal.signal(signal.SIGTERM, sigterm_handler)
uart = Serial(UART_PORT, 9600, timeout=1)
try:
uart.write(CMD_LED_POWER_OFF)
uart.write(CMD_RCPOWERON)
wait_for_button_press(uart)
finally:
if uart:
uart.write(CMD_LED_POWER_BLINK)
uart.close()
| 22.23913 | 66 | 0.682307 |
4a21415032cb89e4ed3d24ff9cc7beb0cd14960c
| 58,874 |
py
|
Python
|
infoblox_netmri/api/broker/v3_7_0/auth_service_broker.py
|
NastyaArslanova/infoblox-netmri
|
399d904399ba7958262c6f107fa3b0efdd55019b
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_7_0/auth_service_broker.py
|
NastyaArslanova/infoblox-netmri
|
399d904399ba7958262c6f107fa3b0efdd55019b
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_7_0/auth_service_broker.py
|
NastyaArslanova/infoblox-netmri
|
399d904399ba7958262c6f107fa3b0efdd55019b
|
[
"Apache-2.0"
] | null | null | null |
from ..broker import Broker
class AuthServiceBroker(Broker):
controller = "auth_services"
def show(self, **kwargs):
"""Shows the details for the specified auth service.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The id of the authentication service.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_service: The auth service identified by the specified id.
:rtype auth_service: AuthService
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available auth services. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The id of the authentication service.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthService. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_services: An array of the AuthService objects that match the specified input criteria.
:rtype auth_services: Array of AuthService
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available auth services matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_method: The Authentication method of the service. One of (local, radius, tacacs, ldap, activedirectory).
:type auth_method: Array of String
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param context_params_json: Additional specific authentication method parameters are stored in this field using a json format. (such as 'base_dn' for LDAP method, Vendor Specific Attribute ID for Radius,...).
:type context_params_json: Array of String
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: Array of DateTime
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: Description/comment about this authentication service
:type description: Array of String
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enabled_authz_ind: A flag indicating whether this service is used for computing privileges for the remote users.
:type enabled_authz_ind: Array of Boolean
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enabled_ind: A flag indicating whether the authentication service settings is enabled or disabled.
:type enabled_ind: Array of Boolean
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The id of the authentication service.
:type id: Array of Integer
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param priority: The priority assigned to this Authentication Service.
:type priority: Array of Integer
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param service_name: The name of the Authentication Service.
:type service_name: Array of String
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timeout: The timeout interval of the service authentication servers.
:type timeout: Array of Integer
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthService. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against auth services, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: auth_method, context_params_json, created_at, description, enabled_authz_ind, enabled_ind, id, priority, service_name, timeout, updated_at.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_services: An array of the AuthService objects that match the specified input criteria.
:rtype auth_services: Array of AuthService
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available auth services matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: auth_method, context_params_json, created_at, description, enabled_authz_ind, enabled_ind, id, priority, service_name, timeout, updated_at.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_method: The operator to apply to the field auth_method. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_method: The Authentication method of the service. One of (local, radius, tacacs, ldap, activedirectory). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_method: If op_auth_method is specified, the field named in this input will be compared to the value in auth_method using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_method must be specified if op_auth_method is specified.
:type val_f_auth_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_method: If op_auth_method is specified, this value will be compared to the value in auth_method using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_method must be specified if op_auth_method is specified.
:type val_c_auth_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_context_params_json: The operator to apply to the field context_params_json. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. context_params_json: Additional specific authentication method parameters are stored in this field using a json format. (such as 'base_dn' for LDAP method, Vendor Specific Attribute ID for Radius,...). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_context_params_json: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_context_params_json: If op_context_params_json is specified, the field named in this input will be compared to the value in context_params_json using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_context_params_json must be specified if op_context_params_json is specified.
:type val_f_context_params_json: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_context_params_json: If op_context_params_json is specified, this value will be compared to the value in context_params_json using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_context_params_json must be specified if op_context_params_json is specified.
:type val_c_context_params_json: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_description: The operator to apply to the field description. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. description: Description/comment about this authentication service For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_description: If op_description is specified, the field named in this input will be compared to the value in description using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_description must be specified if op_description is specified.
:type val_f_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_description: If op_description is specified, this value will be compared to the value in description using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_description must be specified if op_description is specified.
:type val_c_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_enabled_authz_ind: The operator to apply to the field enabled_authz_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. enabled_authz_ind: A flag indicating whether this service is used for computing privileges for the remote users. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_enabled_authz_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_enabled_authz_ind: If op_enabled_authz_ind is specified, the field named in this input will be compared to the value in enabled_authz_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_enabled_authz_ind must be specified if op_enabled_authz_ind is specified.
:type val_f_enabled_authz_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_enabled_authz_ind: If op_enabled_authz_ind is specified, this value will be compared to the value in enabled_authz_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_enabled_authz_ind must be specified if op_enabled_authz_ind is specified.
:type val_c_enabled_authz_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_enabled_ind: The operator to apply to the field enabled_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. enabled_ind: A flag indicating whether the authentication service settings is enabled or disabled. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_enabled_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_enabled_ind: If op_enabled_ind is specified, the field named in this input will be compared to the value in enabled_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_enabled_ind must be specified if op_enabled_ind is specified.
:type val_f_enabled_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_enabled_ind: If op_enabled_ind is specified, this value will be compared to the value in enabled_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_enabled_ind must be specified if op_enabled_ind is specified.
:type val_c_enabled_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The id of the authentication service. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_priority: The operator to apply to the field priority. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. priority: The priority assigned to this Authentication Service. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_priority: If op_priority is specified, the field named in this input will be compared to the value in priority using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_priority must be specified if op_priority is specified.
:type val_f_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_priority: If op_priority is specified, this value will be compared to the value in priority using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_priority must be specified if op_priority is specified.
:type val_c_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_service_name: The operator to apply to the field service_name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. service_name: The name of the Authentication Service. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_service_name: If op_service_name is specified, the field named in this input will be compared to the value in service_name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_service_name must be specified if op_service_name is specified.
:type val_f_service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_service_name: If op_service_name is specified, this value will be compared to the value in service_name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_service_name must be specified if op_service_name is specified.
:type val_c_service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_timeout: The operator to apply to the field timeout. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. timeout: The timeout interval of the service authentication servers. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_timeout: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_timeout: If op_timeout is specified, the field named in this input will be compared to the value in timeout using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_timeout must be specified if op_timeout is specified.
:type val_f_timeout: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_timeout: If op_timeout is specified, this value will be compared to the value in timeout using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_timeout must be specified if op_timeout is specified.
:type val_c_timeout: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthService. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_services: An array of the AuthService objects that match the specified input criteria.
:rtype auth_services: Array of AuthService
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified auth service from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The id of the authentication service.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def create(self, **kwargs):
"""Create a new Authentication Service
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param service_name: The name of the authentication service, must be unique
:type service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param auth_method: The authentication method of the service. One of (local, radius, tacacs, ldap, activedirectory).
:type auth_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param priority: The priority assigned to this authentication service
:type priority: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: Description/comment about this authentication service
:type description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` True
:param enabled_ind: A flag indicating whether the authentication service settings is enabled or disabled
:type enabled_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param enabled_authz_ind: A flag indicating whether this service is used for computing privileges for the remote users
:type enabled_authz_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 5
:param timeout: The timeout interval of the service authentication servers
:type timeout: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_domain: Authentication Active Directory Domain name or LDAP BaseDN to use for search. (required for LDAP & Active Directory methods)
:type auth_domain: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` cn
:param user_attr: User attribute ID for LDAP authentication method (required for LDAP methods).
:type user_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` memberOf
:param group_attr: Group membership attribute ID for LDAP authentication method. The LDAP server has to be configured to use memberOf Overlay.
:type group_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param auth_type: A flag indicating whether the search request to the LDAP server is performed anonymously or needs authentication using a privileged bind User and Password. (values: 'true' for Authenticated, 'false' for anonymous')
:type auth_type: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param bind_user: The bind User complete 'dn' in case LDAP Authenticated connection is needed to request search to find user
:type bind_user: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param bind_passwd: The bind User password in case LDAP Authenticated connection is needed to request search to find user
:type bind_passwd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` subtree
:param search_scope: Specify the scope of the LDAP search.<br>
- 'base': Search only the base object.<br> - 'one': Search the entries immediately below the base object.<br> - 'subtree': Search the whole tree below (and including) the base object. This is the default.
:type search_scope: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` infoblox
:param tacacs_service: The name of the defined custom service for Infoblox.<br> group = GR_1{<br> service = infoblox { na-group-info = MY_GROUP_1 }<br> }
:type tacacs_service: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` na-group-info
:param tacacs_group_attr: The name of the group attribute defined in the Tacacs+ server to retrieve the user's groups list.<br> group = GR_1{<br> service = infoblox { na-group-info = MY_GROUP_1 }<br> }
:type tacacs_group_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 7779
:param radius_vendor_id: The Infoblox Vendor ID defined in the radius dictionary.<br> VENDOR infoblox 7779
:type radius_vendor_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 10
:param radius_vsa_id: The Vendor Specific Attribute ID as defined in the radius server dictionary to retrieve the user's groups list.<br> ATTRIBUTE na-group-info 10 string infoblox
:type radius_vsa_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_id: The new authentication service id.
:rtype service_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_name: The new authentication service name.
:rtype service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_description: The new authentication service description.
:rtype service_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_entity_id: SAML Entity ID
:rtype saml_entity_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_metadata: SAML Metadata URL
:rtype saml_metadata: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert: SAML Certificate
:rtype saml_cacert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_fn: SAML Certificate file name
:rtype saml_cacert_fn: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_key: SAML Key
:rtype saml_cacert_key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_key_fn: SAML Key file name
:rtype saml_cacert_key_fn: String
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""Update an existing Authentication Service
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The id of the authentication service to modify
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param service_name: The name of the authentication service, must be unique
:type service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_method: The authentication method of the service. One of (local, radius, tacacs, ldap, activedirectory).
:type auth_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param priority: The priority assigned to this authentication service
:type priority: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: Description/comment of this authentication service
:type description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enabled_ind: A flag indicating whether the authentication service settings is enabled or disabled
:type enabled_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enabled_authz_ind: A flag indicating whether this service is used for computing privileges for the remote users
:type enabled_authz_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timeout: The timeout interval of the service authentication servers
:type timeout: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_domain: Authentication Active Directory Domain name or LDAP BaseDN to use for search. (required for LDAP & Active Directory methods)
:type auth_domain: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param user_attr: User attribute ID for LDAP authentication method (required for LDAP methods).
:type user_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param group_attr: Group membership attribute ID for LDAP authentication method. The LDAP server has to be configured to use memberOf Overlay.
:type group_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_type: A flag indicating whether the search request to the LDAP server is performed anonymously or needs authentication using a privileged bind User and Password. (values: 'true' for Authenticated, 'false' for anonymous')
:type auth_type: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param bind_user: The bind User complete 'dn' in case LDAP Authenticated connection is needed to request search to find user
:type bind_user: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param bind_passwd: The bind User password in case LDAP Authenticated connection is needed to request search to find user
:type bind_passwd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param search_scope: Specify the scope of the LDAP search.<br>
- 'base': Search only the base object.<br> - 'one': Search the entries immediately below the base object.<br> - 'subtree': Search the whole tree below (and including) the base object. This is the default.
:type search_scope: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param tacacs_service: The name of the defined custom service for Infoblox.<br> group = GR_1{<br> service = infoblox { na-group-info = MY_GROUP_1 }<br> }
:type tacacs_service: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param tacacs_group_attr: The name of the group attribute defined in the Tacacs+ server to retrieve the user's groups list.<br> group = GR_1{<br> service = infoblox { na-group-info = MY_GROUP_1 }<br> }
:type tacacs_group_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param radius_vendor_id: The Infoblox Vendor ID defined in the radius dictionary.<br> VENDOR infoblox 7779
:type radius_vendor_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param radius_vsa_id: The Vendor Specific Attribute ID as defined in the radius server dictionary to retrieve the user's groups list.<br> ATTRIBUTE na-group-info 10 string infoblox
:type radius_vsa_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_id: The new authentication service id.
:rtype service_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_name: The new authentication service name.
:rtype service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_description: The new authentication service description.
:rtype service_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_entity_id: SAML Entity ID
:rtype saml_entity_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_metadata: SAML Metadata URL
:rtype saml_metadata: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert: SAML Certificate
:rtype saml_cacert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_fn: SAML Certificate file name
:rtype saml_cacert_fn: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_key: SAML Key
:rtype saml_cacert_key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_key_fn: SAML Key file name
:rtype saml_cacert_key_fn: String
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def duplicate(self, **kwargs):
"""Duplicate an authentication service.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the authentication service.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_id: The new authentication service id.
:rtype service_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_name: The new authentication service name.
:rtype service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_description: The new authentication service description.
:rtype service_description: String
"""
return self.api_request(self._get_method_fullname("duplicate"), kwargs)
def auth_servers(self, **kwargs):
"""List all servers defined for the requested Authentication Service
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The id of the authentication service to list the servers for
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("auth_servers"), kwargs)
def auth_test_creds(self, **kwargs):
"""Test credentials for this service Authentication Servers that are stored in db.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param service_id: The id of the authentication service to test
:type service_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param username: The login of the user to test
:type username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param password: The password of the user to test
:type password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param nohtml: Convert output to simple text
:type nohtml: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The id of the session output file to display.
:type id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param read: Offset in bytes from the start of the file.
:type read: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the session output file.
:rtype id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return read: Offset in bytes from the start of the file, to be used in the next auth_test_creds call, in order to retrieve the next lines of the output.
:rtype read: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return output: Result of the credential test.
:rtype output: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: Status of the remaining output data to dump:
<br><dd> 0: if the dump of the credential test output is completed.
<br><dd> 1: if there is still output data to dump.
:rtype status: Integer
"""
return self.api_request(self._get_method_fullname("auth_test_creds"), kwargs)
| 45.995313 | 551 | 0.579356 |
4a2141af21d8d1c6eba0ab39b2245625303abc85
| 15,143 |
py
|
Python
|
microbepy/correlation/mutation_cooccurrence.py
|
ScienceStacks/MicrobEPy
|
704435e66c58677bab24f27820458870092924e2
|
[
"MIT"
] | 1 |
2019-05-04T00:31:05.000Z
|
2019-05-04T00:31:05.000Z
|
microbepy/correlation/mutation_cooccurrence.py
|
ScienceStacks/MicrobEPy
|
704435e66c58677bab24f27820458870092924e2
|
[
"MIT"
] | null | null | null |
microbepy/correlation/mutation_cooccurrence.py
|
ScienceStacks/MicrobEPy
|
704435e66c58677bab24f27820458870092924e2
|
[
"MIT"
] | null | null | null |
"""
Analyzes the co-occurrence of mutations in isolates.
Produces Combination Statistics - CoStatistic.
A combination is a group
of Isolates. Combination statistics are statistics about
a combination that summarize
- mutations common to the combination
- the average value of dependent variable(s)
- the range of values of dependent variable(s)
The CoStatistic dataframes are organized by keys, each key
is associated with a different dataframe:
cn.MIN
cn.ISOLATES - isolates in group
cn.MUTATIONS - list of mutations in common
cn.COUNT_MUTATIONS - number of mutations in common
cn.COUNT_ISOLATES - number of isolates
cn.MIN - minimum of the rate and yield
cn.MAX - maximum of the rate and yield
cn.RATE, cn.YIELD
cn.ISOLATES - isolates in group
cn.MUTATIONS - list of mutations in common
cn.COUNT_MUTATIONS - number of mutations in common
cn.COUNT_ISOLATES - number of isolates
cn.AVG - average value in units of standard deviation
cn.RNG - range in units of standard deviation
The CoStatistic dataframes are constructed from Sample dataframes
that are indexed by isolate pair and have the following columns:
cn.GROUP, cn.RATE, cn.YIELD, mutations (with binary values)
"""
from microbepy.common import constants as cn
from microbepy.data.model_data_provider \
import ModelDataDualProvider, ModelDataProvider
from microbepy.common.study_context import nextStudyContext
from microbepy.common.range_constraint import RangeConstraint
from microbepy.common import util
import collections
import itertools
from sklearn import linear_model
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
NUM_REPLICATIONS = int(1e4)
XAXIS_VALUES = [cn.RATE, cn.YIELD, cn.MIN, cn.MAX]
class CoStatistic(object):
# Container for Cooccurrence Statistics
SCHEMA = {
cn.MIN: [cn.ISOLATES, cn.MUTATIONS,
cn.COUNT_MUTATIONS, cn.MIN, cn.MAX],
cn.RATE: [cn.ISOLATES, cn.MUTATIONS,
cn.COUNT_MUTATIONS, cn.AVG, cn.RNG],
cn.YIELD: [cn.ISOLATES, cn.MUTATIONS,
cn.COUNT_MUTATIONS, cn.AVG, cn.RNG],
}
ATTRIBUTES = SCHEMA.keys()
def __init__(self):
self.dfs = {}
for key in self.__class__.SCHEMA.keys():
self.dfs[key] = pd.DataFrame()
def get(self, name):
return self.dfs[name]
def set(self, name, df):
self.dfs[name] = df
def values(self):
return self.dfs.values()
def concat(self, other):
"""
The current CoStatistic is concatenated with another CoStatistic.
:param CoStatistic other:
"""
for key in self.__class__.SCHEMA.keys():
self.dfs[key] = pd.concat([self.dfs[key], other.dfs[key]],
sort=True)
RegressionResult = collections.namedtuple('RegressionResult',
['predictions', 'rsq', 'slope'])
class MutationCooccurrence(object):
def __init__(self, mutation_column=cn.GGENE_ID,
provider=None, constraints=None, is_plot=True):
"""
:param ModelDataDualProvider provider: if specified, has invoked do()
:param bool is_plot: plots if True
"""
self._mutation_column = mutation_column
self._is_plot = is_plot
if provider is None:
provider = ModelDataDualProvider(self._mutation_column,
constraints=constraints)
provider.do(
transform_type=cn.TRANSFORM_ONLY_LOW_FREQUENCY_ISOLATES)
self.df_X = provider.df_X
self.df_ys = provider.df_ys
self.df_y_stds = provider.df_y_stds
self.isolate_dict = ModelDataProvider.getIsolatesFromIndices(
self.df_X.index)
@staticmethod
def _combineIsolates(isolate_dict):
isolates = []
[isolates.extend(v) for v in isolate_dict.values()]
return isolates
def findWithRangeConstraints(self, rc_vector):
"""
Finds the set of mutations common to isolates satisfying
constraints on rate and yield.
:param RangeConstraintVector rc_vector:
has keys cn.RATE, cn.YIELD
:return dict, list-str: isolate_dict, mutations
dictionary keyed by cn.KEY_ISOLATE_DVH, cn.KEY_ISOLATE_MMP
"""
cls = self.__class__
#
# Select rows that abide by the RangeConstraints
df = pd.DataFrame({
cn.RATE: self.df_ys[cn.RATE][cn.VALUE],
cn.YIELD: self.df_ys[cn.YIELD][cn.VALUE],
})
indices = rc_vector.findSatisfiedRows(df)
isolate_dict = ModelDataProvider.getIsolatesFromIndices(indices)
superset_isolates = cls._combineIsolates(isolate_dict)
#
isolates, mutations = self.find(
superset_isolates=superset_isolates)
return isolates, mutations
def find(self,
superset_isolates=None, superset_mutations=None):
"""
Finds co-occurring mutations for a collection of isolates
:param list-str superset_isolates: set of isolates considered
:param list-str superset_mutations: set of mutations considered
:return list-str, list-str:
isolates satisfying the constraints that are present in data
list of mutations shared by isolates
Notes:
1. Includes the mutations of paired isolates
"""
cls = self.__class__
#
if superset_isolates is None:
superset_isolates = self.isolate_dict[cn.KEY_ISOLATE_DVH]
superset_isolates.extend(self.isolate_dict[cn.KEY_ISOLATE_MMP])
if superset_mutations is None:
superset_mutations = self.df_X.columns.tolist()
# Select the rows
sel = [(i[0] in superset_isolates) or (i[1] in superset_isolates)
for i in self.df_X.index]
df_X = self.df_X[sel]
isolate_dict = ModelDataProvider.getIsolatesFromIndices(
df_X.index)
isolates = cls._combineIsolates(isolate_dict)
# Select the columns
columns = [c for c in df_X.columns if c in superset_mutations]
df_X_final = df_X[columns]
# FInd the common mutations
ser = df_X_final.product()
mutations = [i for i, r in ser.items() if ser[i] == 1]
#
return isolates, mutations
def _makeCoStatisticFromSampleDF(self, df_sample):
"""
Computes statistics for aggregations of isolate groups.
:param pd.DataFrame df_sample: indexed by isolates
cn.GROUP, Mutation Columns, cn.RATE, cn.YIELD
:return CoStatistic:
"""
#
df_group = df_sample.groupby(cn.GROUP)
groups = df_group.groups
#
df_cooccur = df_group.prod()
for col in [cn.RATE, cn.YIELD]:
del df_cooccur[col]
ser_cooccur = df_cooccur.sum(axis=1) # Indicates presence of mutation
# Find the common mutations for each group
mutation_stgs = []
for group in groups.keys():
df = df_cooccur[df_cooccur.index == group]
mutations = [m for m in df_cooccur.columns
if df.loc[group, m] == 1]
mutations.sort()
mutation_stgs.append(str(mutations))
# Calculate the isolate strings
isolates = []
for key, values in groups.items():
size = len(groups[key])
isolate_stg = [str(v) for v in values]
isolate_stg.sort
isolates.append(str(isolate_stg))
# Compute common data
df_min = df_sample.groupby(cn.GROUP).min()
df_max = df_sample.groupby(cn.GROUP).max()
df_avg = df_sample.groupby(cn.GROUP).mean()
# Compute RATE, YIELD
def makeDF(depvar):
return pd.DataFrame({
cn.COUNT_MUTATIONS: ser_cooccur,
cn.MUTATIONS: mutation_stgs,
cn.ISOLATES: isolates,
cn.AVG: df_avg[depvar],
cn.RNG: df_max[depvar] - df_min[depvar],
})
#
df_result_rate = makeDF(cn.RATE)
df_result_yield = makeDF(cn.YIELD)
# Compute cn.MIN dataframe
df_result_min = pd.DataFrame({
cn.COUNT_MUTATIONS: ser_cooccur,
cn.MUTATIONS: mutation_stgs,
cn.ISOLATES: isolates,
})
df_result_min[cn.MAX] = pd.concat(
[df_result_rate[cn.RNG], df_result_yield[cn.RNG]],
axis=1, sort=True).max(axis=1)
df_result_min[cn.MIN] = pd.concat(
[df_result_rate[cn.RNG], df_result_yield[cn.RNG]],
axis=1, sort=True).min(axis=1)
result = CoStatistic()
result.set(cn.RATE, df_result_rate)
result.set(cn.YIELD, df_result_yield)
result.set(cn.MIN, df_result_min)
# Add the count of isolates
for df in result.values():
df[cn.COUNT_ISOLATES] = df[cn.ISOLATES].apply(
lambda v: len(eval(v)))
#
return result
def makeCoStatistic(self, size, is_resample=False, **kwargs):
"""
Chooses sets of isolates of the specified size.
Provides the distribution of the number of mutations in
common in the randomly chosen sets.
:param int size: set size
:param dict **kwargs: arguments for resample
:param bool is_resample: always use resampling
:return CoStatistic:
"""
if is_resample or (len(self.df_X)**size > 1e6):
df_sample = self._makeResampleSampleDF(size,
**kwargs)
else:
df_sample = self._makeExplicitSampleDF(size)
return self._makeCoStatisticFromSampleDF(df_sample)
def _makeExplicitSampleDF(self, size):
"""
Constructs a dataframe of samples by explicitly constructing
the possible sets of a desired size.
:param int size: set size
:return pd.DataFrame: Sample DataFrame
"""
df_X = self.df_X.copy()
isolates = df_X.index.tolist()
isolate_indices = range(len(isolates))
df_X[cn.INDEX] = isolate_indices
df_X[cn.YIELD] = self.df_ys[cn.YIELD][cn.VALUE]
df_X[cn.RATE] = self.df_ys[cn.RATE][cn.VALUE]
df_X = df_X.reset_index()
df_X[cn.INDEX] = isolate_indices
combination_iterator = itertools.combinations(isolate_indices,
size)
indices = []
groups = []
group_num = 0
for combination in combination_iterator:
indices.extend(list(combination))
groups.extend([group_num] * size)
group_num += 1
df_index = pd.DataFrame({
cn.GROUP: groups,
cn.INDEX: indices,
})
df_result = df_index.merge(df_X, on=cn.INDEX, how='inner')
df_result = df_result.sort_values(cn.GROUP)
tuples = [(r[cn.KEY_ISOLATE_DVH], r[cn.KEY_ISOLATE_MMP])
for _,r in df_result.iterrows()]
del df_result[cn.INDEX]
df_result.index = tuples
return df_result
def _makeResampleSampleDF(self, size,
num_replications=NUM_REPLICATIONS):
"""
Constructs a dataframe of samples by resampling.
:param int size: set size
:param int num_replications: number of replications
:return pd.DataFrame: Sample DataFrame
"""
# Construct the replicated dataframe
df_base = self.df_X.copy()
length = len(df_base)
df_base[cn.RATE] = self.df_ys[cn.RATE][cn.VALUE]
df_base[cn.YIELD] = self.df_ys[cn.YIELD][cn.VALUE]
df_sample = pd.concat([df_base] * num_replications, sort=True)
groups = []
[groups.extend([n] * length) for n in range(num_replications)]
df_sample[cn.GROUP] = groups
# Add the sort number
df_sample[cn.SORT] = np.random.uniform(
0, 1, length*num_replications) + df_sample[cn.GROUP]
df_sample = df_sample.sort_values(cn.SORT)
del df_sample[cn.SORT]
# Take the first elements of each group
sel_base = [[True] * size, [False] * (length - size)]
sel_lists = sel_base * num_replications
sel = []
[sel.extend(v) for v in sel_lists]
return df_sample[sel]
def plot(self, size,
columns=[cn.RATE, cn.YIELD, cn.MIN, cn.MAX],
title=""):
"""
Scatter plot that is overlayed with a regression line.
:param int size: Set size to plot
:param list-str columns: Columns in co-occurrence dataframe
that are to be plotted.
:param str title: plot title
:return pd.DataFrame:
cn.RSQ, cn.SLOPE, cn.VALUE (size), cn.XAXIS
"""
def regress(x, y):
"""
:return RegressionResult:
"""
X = pd.DataFrame({
'ones': [1] * len(x),
'x': x
})
lr = linear_model.LinearRegression()
lr.fit(X, y)
return RegressionResult(
predictions=lr.predict(X),
rsq=lr.score(X,y),
slope=lr.coef_[1],
)
#
result = self.makeCoStatistic(size)
title = "Set size: %d, %s" % (size, title)
col_dict = {
cn.RATE: cn.RNG,
cn.YIELD: cn.RNG,
cn.MIN: cn.MIN,
cn.MAX: cn.MAX,
}
rsqs = []
slopes = []
for key, col in col_dict.items():
if key == cn.MAX:
new_key = cn.MIN
else:
new_key = key
df = result.get(new_key)
df = df.sort_values(col)
plt.scatter(df[col], df[cn.COUNT_MUTATIONS])
regression_result = regress(df[col], df[cn.COUNT_MUTATIONS])
rsqs.append(regression_result.rsq)
slopes.append(regression_result.slope)
plt.plot(df[col], regression_result.predictions, c='r')
plt.xlabel(key)
plt.ylabel("Count")
new_title = "%s, RSQ=%1.3f, slope=%1.3f" % (
title, regression_result.rsq, regression_result.slope)
plt.title(new_title)
if self._is_plot:
plt.show()
df = pd.DataFrame({
cn.RSQ: rsqs,
cn.SLOPE: slopes,
cn.XAXIS: col_dict.keys(),
})
df[cn.VALUE] = size
return df
@classmethod
def makeSlopeDF(cls, lines=None, mutation_columns=None, set_sizes=None):
"""
Constructs dataframes with slopes for lines.
:return pd.DataFrame:
cn.LINE, cn.RSQ, cn.SLOPE, cn.VALUE (set size),
cn.MUTATION_COLUMN,
cn.XAXIS (cn.RATE, cn.YIELD, cn.MIN, cn.MAX)
"""
if lines is None:
lines = [cn.LINE_HA2, cn.LINE_HR2, cn.LINE_UE3]
if mutation_columns is None:
mutation_columns = cn.MUTATION_COLUMNS
if set_sizes is None:
set_sizes = range(3, 9)
specification = {
cn.LINE: lines,
cn.MUTATION_COLUMN: mutation_columns,
cn.VALUE: set_sizes,
}
dfs = []
for context in nextStudyContext(specification):
constraints = [lambda r: r[cn.LINE] == context.line]
cooccur = cls(context.mutation_column,
constraints=constraints, is_plot=False)
df = cooccur.plot(context.value)
df[cn.LINE] = context.line
df[cn.MUTATION_COLUMN] = context.mutation_column
dfs.append(df)
return pd.concat(dfs, sort=True)
@classmethod
def makeLineCoStatistic(cls, study_context, rc_vector=None):
"""
Makes statistics for the line and the range constraint vector
for the range of sizes of isolates present.
:param StudyContext study_context: specifies
line, mutation_column
:param RangeConstraintVector rc_vector:
:return CoStatistic:
"""
if study_context.line == cn.LINE_ALL:
constraints = None
else:
constraints = [lambda r: r[cn.LINE] == study_context.line]
provider = ModelDataDualProvider(study_context.mutation_column,
constraints=constraints, rc_vector=rc_vector)
provider.do()
m_c = cls(mutation_column=study_context.mutation_column,
provider=provider)
#
max_size = len(provider.df_X) # Number of isolates present
result = CoStatistic()
for size in range(2, max_size+1):
result.concat(m_c.makeCoStatistic(size))
return result
| 33.651111 | 74 | 0.66506 |
4a2141cfe7cd995686735b6a729ed68a9af47ea5
| 455 |
py
|
Python
|
ams/ticket/migrations/0010_auto_20190117_1750.py
|
magnuspedro/ams
|
72ef810d14d9a4724e781489d081140be6674d60
|
[
"MIT"
] | null | null | null |
ams/ticket/migrations/0010_auto_20190117_1750.py
|
magnuspedro/ams
|
72ef810d14d9a4724e781489d081140be6674d60
|
[
"MIT"
] | null | null | null |
ams/ticket/migrations/0010_auto_20190117_1750.py
|
magnuspedro/ams
|
72ef810d14d9a4724e781489d081140be6674d60
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.4 on 2019-01-17 17:50
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('ticket', '0009_auto_20190117_1740'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='code',
field=models.CharField(default=uuid.UUID('e07b8b85-9e6e-4e85-b2e1-596a9beb39d9'), max_length=255),
),
]
| 22.75 | 110 | 0.628571 |
4a214243064041a51b686d022ed84586e5237b26
| 4,027 |
py
|
Python
|
test/functional/feature_notifications.py
|
Real-E-Coin/REC
|
148063bd6afe431c565f3ae3e75f010b11b3d4e8
|
[
"MIT"
] | 1 |
2021-12-30T23:58:45.000Z
|
2021-12-30T23:58:45.000Z
|
test/functional/feature_notifications.py
|
Real-E-Coin/REC
|
148063bd6afe431c565f3ae3e75f010b11b3d4e8
|
[
"MIT"
] | null | null | null |
test/functional/feature_notifications.py
|
Real-E-Coin/REC
|
148063bd6afe431c565f3ae3e75f010b11b3d4e8
|
[
"MIT"
] | 1 |
2022-01-10T22:13:20.000Z
|
2022-01-10T22:13:20.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.test_framework import Real_E_CoinTestFramework
from test_framework.util import (
assert_equal,
wait_until,
connect_nodes,
)
class NotificationsTest(Real_E_CoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt")
self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt")
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [["-blockversion=4",
"-alertnotify=echo %%s >> %s" % self.alert_filename,
"-blocknotify=echo %%s >> %s" % self.block_filename],
["-blockversion=211",
"-rescan",
"-walletnotify=echo %%s >> %s" % self.tx_filename]]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generate(block_count)
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated blocks hashes
with open(self.block_filename, 'r') as f:
assert_equal(sorted(blocks), sorted(f.read().splitlines()))
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
os.remove(self.tx_filename)
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.restart_node(1)
connect_nodes(self.nodes[0], 1)
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
# Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
self.log.info("test -alertnotify")
self.nodes[1].generate(51)
self.sync_all()
# Give real_e_coind 10 seconds to write the alert notification
wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10)
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text = f.read()
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].generate(2)
self.sync_all()
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text2 = f.read()
self.log.info("-alertnotify should not continue notifying for more unknown version blocks")
assert_equal(alert_text, alert_text2)
if __name__ == '__main__':
NotificationsTest().main()
| 44.252747 | 138 | 0.656072 |
4a21433e78f95509efeb11a344379828b876af20
| 1,826 |
py
|
Python
|
docs/conf.py
|
Bixoto/PyMagento
|
302ef79ab4bd00a22ad659751cebfba7e9173136
|
[
"MIT"
] | 2 |
2021-12-21T16:43:05.000Z
|
2022-01-21T09:15:54.000Z
|
docs/conf.py
|
Bixoto/PyMagento
|
302ef79ab4bd00a22ad659751cebfba7e9173136
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Bixoto/PyMagento
|
302ef79ab4bd00a22ad659751cebfba7e9173136
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import sys
import os
from os.path import dirname
here = dirname(__file__)
sys.path.insert(0, here+'/..')
import magento
# -- Project information -----------------------------------------------------
project = 'PyMagento'
copyright = '2022, Bixoto'
author = 'Bixoto'
# The full version, including alpha/beta/rc tags
release = magento.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx_rtd_theme',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 29.934426 | 78 | 0.656079 |
4a214342722be17806ea5256089044939cd45180
| 1,515 |
py
|
Python
|
main.py
|
gabrielcosi/patbot
|
4f397e8cf94b30cdc248adc872568a64d0715767
|
[
"MIT"
] | 1 |
2020-08-14T22:31:04.000Z
|
2020-08-14T22:31:04.000Z
|
main.py
|
gabrielcosi/patbot
|
4f397e8cf94b30cdc248adc872568a64d0715767
|
[
"MIT"
] | null | null | null |
main.py
|
gabrielcosi/patbot
|
4f397e8cf94b30cdc248adc872568a64d0715767
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from time import sleep
class PatBot:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
self.driver = webdriver.Chrome(
executable_path='C:\webdrivers\chromedriver.exe', options=options)
user = input("Ingresa tu usuario: ")
pw = input("Ingresa tu contraseña: ")
self.driver.get("https://patmos.upeu.edu.pe/upeu")
self.driver.find_element_by_xpath(
"//input[@name=\"usuario\"]").send_keys(user)
self.driver.find_element_by_xpath(
"//input[@name=\"password\"]").send_keys(pw)
self.driver.find_element_by_xpath("//button[@type=\"submit\"]").click()
sleep(2)
self.driver.find_element_by_xpath(
"//li/following::a[@href=\"/report/dashboardStudent\"]").click()
sleep(5)
self.driver.find_element_by_xpath(
"//label[@for=\"customSwitch5\"]").click()
sleep(2)
def checkItems(self):
table = self.driver.find_element_by_css_selector(
"table.table")
for row in table.find_elements_by_tag_name("tr"):
self.driver.execute_script(
"window.scrollTo(0,"+str(row.location['y'] - 200)+")")
row.find_element_by_tag_name("td").click()
sleep(1)
self.driver.find_element_by_xpath(
"//button[contains(text(), 'Cerrar')]").click()
my_bot = PatBot()
my_bot.checkItems()
| 36.95122 | 79 | 0.605281 |
4a21437e98fd6246c02be63fe8d397039ecbe204
| 1,834 |
py
|
Python
|
tools/EditFileVersions.py
|
gbishop/TarHeelGameplay
|
4c14d8f1855fa560321e3e4d6cbfcf8320b9062b
|
[
"MIT"
] | null | null | null |
tools/EditFileVersions.py
|
gbishop/TarHeelGameplay
|
4c14d8f1855fa560321e3e4d6cbfcf8320b9062b
|
[
"MIT"
] | 35 |
2015-10-14T11:30:59.000Z
|
2022-03-15T22:27:56.000Z
|
tools/EditFileVersions.py
|
gbishop/TarHeelGameplay
|
4c14d8f1855fa560321e3e4d6cbfcf8320b9062b
|
[
"MIT"
] | 2 |
2018-10-12T13:08:47.000Z
|
2019-02-08T22:59:32.000Z
|
"""Edit files in Theme-build so that references are versioned to enable caching in production"""
import os.path as osp
import pickle
import hashlib
import re
import argparse
parser = argparse.ArgumentParser(description="Edit urls to include version numbers")
parser.add_argument("--staticHost", default="")
parser.add_argument("--db", default="../gbVersion.pkl")
parser.add_argument("--used")
parser.add_argument("files", nargs="+")
args = parser.parse_args()
staticHost = args.staticHost
target = re.compile(r"""(?<=['"(])/theme(V[0-9]+)?/([^'"\\)]*)""")
db = pickle.load(open(args.db, "rb"))
used = {}
def insertVersion(m):
name = m.group(2)
fullname = name
useStaticHost = True
if fullname == "js/main":
fullname = fullname + ".js"
useStaticHost = False
elif fullname.endswith(".json") or fullname.endswith(".swf"):
useStaticHost = False
if not osp.exists(fullname):
# print 'missing', fname, name
return m.group(0)
newhash = hashlib.md5(open(fullname, "rb").read()).hexdigest()
if fullname not in db:
version = 1
db[fullname] = (version, newhash)
else:
version, oldhash = db[fullname]
if oldhash != newhash:
version += 1
db[fullname] = (version, newhash)
if useStaticHost:
prefix = "%s/themeV%d/" % (staticHost, version)
else:
prefix = "/themeV%d/" % version
used[prefix + fullname] = True
return prefix + name
for fname in args.files:
p, ext = osp.splitext(fname)
obytes = open(fname, "r").read()
nbytes = target.sub(insertVersion, obytes)
if obytes != nbytes:
print(fname)
open(fname, "w").write(nbytes)
pickle.dump(db, open(args.db, "wb"))
if args.used:
open(args.used, "w").write("\n".join(sorted(used.keys())))
| 26.970588 | 96 | 0.623773 |
4a21444e6dba5430dcf21da27b6a929537e87497
| 4,866 |
py
|
Python
|
cushead/generator/templates/templates.py
|
mrsantos321/customhead
|
bb70867afc34abe7a2ef76bb0f92c08d7ff38214
|
[
"MIT"
] | 3 |
2019-10-08T06:02:23.000Z
|
2020-01-22T09:14:35.000Z
|
cushead/generator/templates/templates.py
|
mrsantos321/cushead
|
bb70867afc34abe7a2ef76bb0f92c08d7ff38214
|
[
"MIT"
] | 297 |
2019-08-22T19:45:23.000Z
|
2022-03-26T02:30:25.000Z
|
cushead/generator/templates/templates.py
|
mrsantos321/cushead
|
bb70867afc34abe7a2ef76bb0f92c08d7ff38214
|
[
"MIT"
] | 5 |
2019-09-25T02:35:04.000Z
|
2021-03-31T04:23:47.000Z
|
"""
Handle templates generation.
"""
from __future__ import annotations
import hashlib
import pathlib
import re
from typing import Any
from typing import List
from typing import Union
import jinja2
from cushead.generator import config as generator_config
from cushead.generator import files
from cushead.generator.templates.jinja import filters
class TemplateLoader:
"""
Handle jinja templates.
"""
def __init__(self, **kwargs: Any) -> None:
"""
Initialize a jinja template loader.
"""
template_loader = jinja2.FileSystemLoader(searchpath=str(pathlib.Path(__file__).parent / "jinja" / "templates"))
self.template_parser = jinja2.Environment(
loader=template_loader,
lstrip_blocks=True,
autoescape=True,
**kwargs,
)
def render_template(self, *, path: str) -> bytes:
"""
Render a template.
Args:
path: the template path, relative to the templates_folder instance attribute.
Returns:
The template rendered in UTF-8 format.
"""
rendered_template = self.template_parser.get_template(path).render()
cleaned_template = re.sub("((\n +)+\n)|(\n\n$)", "\n", rendered_template)
return cleaned_template.encode()
def get_template_hash(*, template: bytes) -> str:
"""
Get a hash of a template.
Args:
template: the template in UTF-8 format.
Returns:
The hash.
"""
return hashlib.sha256(template).hexdigest()[0:6]
def generate_templates(*, config: generator_config.Config) -> List[files.File]:
"""
Get templates ready to create.
Args:
config: the config used in the templates context.
Returns:
The templates.
"""
template_loader = TemplateLoader(extensions=["cushead.generator.templates.jinja.extensions.OneLineExtension"])
template_loader.template_parser.globals["config"] = config
template_loader.template_parser.filters["generate_sri"] = filters.generate_sri
index_template = template_loader.render_template(path="index.jinja2")
index_hash = get_template_hash(template=index_template)
template_loader.template_parser.globals["index_hash"] = index_hash
templates = [
files.File(
path=config["output_folder_path"] / "index.html",
data=index_template,
),
files.File(
path=config["output_folder_path"] / "manifest.json",
data=template_loader.render_template(path="manifest.jinja2"),
),
files.File(
path=config["output_folder_path"] / "robots.txt",
data=template_loader.render_template(path="robots.jinja2"),
),
files.File(
path=config["output_folder_path"] / "sw.js",
data=template_loader.render_template(path="sw.jinja2"),
),
files.File(
path=config["output_folder_path"] / "static" / "early_script.js",
data=template_loader.render_template(path="early_script.jinja2"),
),
files.File(
path=config["output_folder_path"] / "static" / "late_script.js",
data=template_loader.render_template(path="late_script.jinja2"),
),
files.File(
path=config["output_folder_path"] / "static" / "styles.css",
data=template_loader.render_template(path="styles.jinja2"),
),
]
if config.get("domain"):
templates.append(
files.File(
path=config["output_folder_path"] / "sitemap.xml",
data=template_loader.render_template(path="sitemap.jinja2"),
),
)
if config.get("title"):
templates.append(
files.File(
path=config["output_folder_path"] / "static" / "opensearch.xml",
data=template_loader.render_template(path="opensearch.jinja2"),
),
)
if config.get("favicon_png") or config.get("main_color"):
templates.append(
files.File(
path=config["output_folder_path"] / "static" / "browserconfig.xml",
data=template_loader.render_template(path="browserconfig.jinja2"),
)
)
if config.get("author_email"):
templates.append(
files.File(
path=config["output_folder_path"] / ".well-known" / "security",
data=template_loader.render_template(path="security.jinja2"),
)
)
if config.get("author_name") or config.get("author_email"):
templates.append(
files.File(
path=config["output_folder_path"] / "humans.txt",
data=template_loader.render_template(path="humans.jinja2"),
)
)
return templates
| 31.803922 | 120 | 0.609741 |
4a214511900da697506a7c9a9efda4e42b86be3e
| 402 |
py
|
Python
|
python/python_backup/PRAC_PYTHON/he.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_backup/PRAC_PYTHON/he.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_backup/PRAC_PYTHON/he.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
class palindrome:
def __init__(self):
self.a=""
def input(self,k1):
self.a=k1
def calculate(self):
f=0
j=len(k1)-1
while i<len(k1)/2:
if k1[i]!=k1[j]:
f=1
break
i=i+1
j=j-1
if f==0:
print "self.a is palindrome"
else:
print "self.a is not a palindrome"
x=palindrome()
a=input("enter string:")
x.input(a)
x.calculate()
| 18.272727 | 38 | 0.534826 |
4a21454d3c018311d6dcc0caf4c806e4623d7d31
| 1,991 |
py
|
Python
|
setup.py
|
ScottBrian/scottbrian_algo1
|
57cd8fc5674507db51b1c887d5f9a68462b0ca9d
|
[
"MIT"
] | null | null | null |
setup.py
|
ScottBrian/scottbrian_algo1
|
57cd8fc5674507db51b1c887d5f9a68462b0ca9d
|
[
"MIT"
] | null | null | null |
setup.py
|
ScottBrian/scottbrian_algo1
|
57cd8fc5674507db51b1c887d5f9a68462b0ca9d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 2020
@author: Scott Tuttle
"""
from pathlib import Path
from setuptools import setup, find_packages
with open('README.rst', 'r') as readme:
long_description = readme.read()
def get_version(rel_path):
target_path = Path(__file__).resolve().parent.joinpath(rel_path)
with open(target_path, 'r') as file:
for line in file:
if line.startswith('__version__'):
delimiter = '"' if '"' in line else "'"
return line.split(delimiter)[1]
else:
raise RuntimeError("Unable to find version string.")
setup(
name='scottbrian_algo1',
version=get_version('src/scottbrian_algo1/__init__.py'),
author='Scott Tuttle',
description='Analyze and trade financial instruments',
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/ScottBrian/scottbrian_algo1.git',
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Topic :: Office/Business :: Financial :: Investment',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Operating System :: POSIX :: Linux'
],
project_urls={
'Documentation': 'https://scottbrian-algo1.readthedocs.io/en'
'/latest/',
'Source': 'https://github.com/ScottBrian/scottbrian_algo1.git'},
python_requires='>=3.8',
packages=['ibapi', 'scottbrian_algo1'],
package_dir={'ibapi': 'pythonclient/ibapi', '': 'src'},
install_requires=['pandas', 'scottbrian_utils'],
package_data={"scottbrian_algo1": ["__init__.pyi", "py.typed"]},
zip_safe=False
)
| 34.929825 | 74 | 0.61326 |
4a21456da41b03b146d782b08774145c827b4736
| 2,712 |
py
|
Python
|
setup.py
|
XENON1T/cax
|
06de9290851904695275fd34d7c74e2c9eb7fe59
|
[
"0BSD"
] | 2 |
2016-05-19T05:51:15.000Z
|
2017-10-13T13:43:00.000Z
|
setup.py
|
XENON1T/cax
|
06de9290851904695275fd34d7c74e2c9eb7fe59
|
[
"0BSD"
] | 93 |
2016-03-26T20:34:01.000Z
|
2021-03-25T21:41:57.000Z
|
setup.py
|
XENON1T/cax
|
06de9290851904695275fd34d7c74e2c9eb7fe59
|
[
"0BSD"
] | 2 |
2017-05-19T03:47:09.000Z
|
2018-12-19T18:10:45.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
PROJECT = 'cax'
VERSION = '5.2.1'
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'checksumdir', 'scp', 'pagerduty-api', 'pymongo', 'paramiko',
'numpy', 'sympy', 'pytz',
]
test_requirements = [
'pytest', 'mongomock',
]
setup(
name='cax',
version=VERSION,
description="Copying Around XENON1T data",
long_description=readme + '\n\n' + history,
author="Christopher Tunnell",
author_email='ctunnell@nikhef.nl',
url='https://github.com/tunnell/cax',
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
data_files=[ ('cax', ['cax/cax.json']),
('cax/host_config', ['cax/host_config/tegner_bash_p3.config', 'cax/host_config/tegner_bash_p2.config', 'cax/host_config/midway_bash_p3.config', 'cax/host_config/midway_bash_p2.config', 'cax/host_config/xe1tdatamanager_bash_p3.config', 'cax/host_config/xe1tdatamanager_bash_p2.config'])
],
license="ISCL",
zip_safe=False,
keywords='cax',
classifiers=[
'Intended Audience :: System Administrators',
'Development Status :: 5 - Production/Stable'
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=['pytest-runner'],
entry_points={
'console_scripts': [
'cax = cax.main:main',
'massive-cax = cax.main:massive',
'caxer = cax.main:main', # For uniformity with paxer
'cax-process = cax.tasks.process:main',
'cax-mv = cax.main:move',
'cax-rm = cax.main:remove',
'cax-stray = cax.main:stray',
'cax-status = cax.main:status',
'massive-tsm = cax.main:massive_tsmclient',
'cax-tsm-remove = cax.main:remove_from_tsm',
'cax-tsm-watch = cax.main:cax_tape_log_file',
'ruciax = cax.main:ruciax',
'ruciax-rm = cax.main:remove_from_rucio',
'massive-ruciax = cax.main:massiveruciax',
'ruciax-check = cax.main:ruciax_status',
'ruciax-purge = cax.main:ruciax_purge',
'ruciax-download = cax.main:ruciax_download',
'ruciax-locator = cax.main:ruciax_locator',
],
},
)
| 35.684211 | 302 | 0.613938 |
4a2145b06c10a85214d0ac16b32ea169da729813
| 138 |
py
|
Python
|
module2/__init__.py
|
axel-sirota/advanced-generator-and-coroutines
|
fbb4f869b030a05dc10b4a49e9a091068d11e194
|
[
"MIT"
] | 5 |
2020-08-04T16:44:14.000Z
|
2021-08-21T02:23:03.000Z
|
module2/__init__.py
|
axel-sirota/advanced-generator-and-coroutines
|
fbb4f869b030a05dc10b4a49e9a091068d11e194
|
[
"MIT"
] | 1 |
2021-03-21T16:33:58.000Z
|
2021-03-21T16:33:58.000Z
|
module2/__init__.py
|
axel-sirota/advanced-generator-and-coroutines
|
fbb4f869b030a05dc10b4a49e9a091068d11e194
|
[
"MIT"
] | 4 |
2020-10-22T11:40:15.000Z
|
2022-01-30T19:42:07.000Z
|
from .mybomb import MyBomb
from .mybombgenerator import mybomb
from .mybomblazy import MyNotLazyBomb, mylazygenerator
from .demo import *
| 27.6 | 54 | 0.833333 |
4a214616bd4366c468657f58380de515e761ad49
| 474 |
py
|
Python
|
symposion/utils/signup.py
|
azkarmoulana/pycon
|
931388e6f640c35b892bb4b2d12581ba7ec8cf4e
|
[
"BSD-3-Clause"
] | 154 |
2015-01-17T02:29:24.000Z
|
2022-03-20T20:37:24.000Z
|
symposion/utils/signup.py
|
azkarmoulana/pycon
|
931388e6f640c35b892bb4b2d12581ba7ec8cf4e
|
[
"BSD-3-Clause"
] | 316 |
2015-01-10T04:01:50.000Z
|
2020-09-30T20:18:08.000Z
|
symposion/utils/signup.py
|
azkarmoulana/pycon
|
931388e6f640c35b892bb4b2d12581ba7ec8cf4e
|
[
"BSD-3-Clause"
] | 89 |
2015-01-10T05:25:21.000Z
|
2022-02-27T03:28:59.000Z
|
import hashlib
import random
from django.contrib.auth.models import User
def generate_username(email):
def random_username():
h = hashlib.sha1(email).hexdigest()[:25]
# don't ask
n = random.randint(1, (10 ** (5 - 1)) - 1)
return "%s%d" % (h, n)
while True:
try:
username = random_username()
User.objects.get(username=username)
except User.DoesNotExist:
break
return username
| 23.7 | 50 | 0.582278 |
4a21464cc305215f2b5ee383ef5911935ecc8d06
| 11,760 |
py
|
Python
|
stackinabox/stack.py
|
BenjamenMeyer/stackInABox
|
005a3e3f40ae7b7f14fae24d07768731e0ac948e
|
[
"Apache-2.0"
] | 5 |
2015-02-02T22:02:55.000Z
|
2016-02-03T21:58:12.000Z
|
stackinabox/stack.py
|
TestInABox/stackInABox
|
15586e61a2013b6f4997c652e8412a1784f8fc93
|
[
"Apache-2.0"
] | 43 |
2016-05-07T04:08:52.000Z
|
2022-03-16T23:43:36.000Z
|
stackinabox/stack.py
|
TestInABox/stackInABox
|
15586e61a2013b6f4997c652e8412a1784f8fc93
|
[
"Apache-2.0"
] | 3 |
2016-05-05T18:05:36.000Z
|
2022-03-23T17:41:41.000Z
|
"""
Stack-In-A-Box: Stack Management
"""
import logging
import re
import threading
import uuid
import six
logger = logging.getLogger(__name__)
class ServiceAlreadyRegisteredError(Exception):
"""StackInABoxService with the same name already registered."""
pass
class StackInABox(object):
"""Stack-In-A-Box Testing Service.
StackInABox provides a testing framework for RESTful APIs
The framework provides a thread-local instance holding the
StackInABoxService objects that are representing the
RESTful APIs.
The StackInABox object provides a means of accessing it
from anywhere in a thread; however, it is not necessarily
thread-safe at this time. If one is careful o setup StackInABox
and write StackInABoxService's that are thread-safe
themselves, then there is no reason it could not be used in a
multi-threaded or multi-processed test.
"""
@classmethod
def get_thread_instance(cls):
"""
Interface to the thread storage to ensure the instance properly exists
"""
create = False
# if the `instance` property doesn't exist
if not hasattr(local_store, 'instance'):
local_store.instance = None
create = True
# if the instance doesn't exist at all
elif local_store.instance is None:
create = True
# if it's something else entirely...
elif not isinstance(local_store.instance, cls):
local_store.instance = None
create = True
# if the above conditions are met, create it
if create:
logger.debug('Creating new StackInABox instance...')
local_store.instance = cls()
logger.debug(
'Created StackInABox({0})'.format(local_store.instance.__id)
)
return local_store.instance
@classmethod
def reset_services(cls):
"""Reset the thread's StackInABox instance."""
logger.debug('Resetting services')
return cls.get_thread_instance().reset()
@classmethod
def register_service(cls, service):
"""Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
For return value and errors see StackInABox.register()
"""
logger.debug('Registering service {0}'.format(service.name))
return cls.get_thread_instance().register(service)
@classmethod
def call_into(cls, method, request, uri, headers):
"""Make a call into the thread's StackInABox instance.
:param method: HTTP Method (e.g GET, POST)
:param request: a Request object containing the request data
:param uri: the URI of the request submitted with the method
:param headers: the return headers in a Case-Insensitive dict
For return value and errors see StackInABox.call()
"""
logger.debug('Request: {0} - {1}'.format(method, uri))
return cls.get_thread_instance().call(method,
request,
uri,
headers)
@classmethod
def hold_onto(cls, name, obj):
"""Add data into the a storage area provided by the framework.
Note: The data is stored with the thread local instance.
:param name: name of the data to be stored
:param obj: data to be stored
For return value and errors see StackInABox.into_hold()
"""
logger.debug('Holding on {0} of type {1} with id {2}'
.format(name, type(obj), id(obj)))
cls.get_thread_instance().into_hold(name, obj)
@classmethod
def hold_out(cls, name):
"""Get data from the storage area provided by the framework.
Note: The data is retrieved from the thread local instance.
:param name: name of the data to be retrieved
:returns: The data associated with the specified name.
For errors see StackInABox.from_hold()
"""
logger.debug('Retreiving {0} from hold'
.format(name))
obj = cls.get_thread_instance().from_hold(name)
logger.debug('Retrieved {0} of type {1} with id {2} from hold'
.format(name, type(obj), id(obj)))
return obj
@classmethod
def update_uri(cls, uri):
"""Set the URI of the StackInABox framework.
:param uri: the base URI used to match the service.
"""
logger.debug('Request: Update URI to {0}'.format(uri))
cls.get_thread_instance().base_url = uri
def __init__(self):
"""Initialize the StackInABox instance.
Default Base URI is '/'.
There are no services registered, and the storage hold
is a basic dictionary object used as a key-value store.
"""
self.__id = uuid.uuid4()
self.__base_url = '/'
self.services = {
}
self.holds = {
}
@staticmethod
def __get_service_url(base_url, service_name):
"""Get the URI for a given StackInABoxService.
Note: this is an internal function
:param base_url: base URL to use
:param service_name: name of the service the URI is for
"""
return '{0}/{1}'.format(base_url, service_name)
@staticmethod
def get_services_url(url, base_url):
"""Get the URI from a given URL.
:returns: URI within the URL
"""
length = len(base_url)
checks = ['http://', 'https://']
for check in checks:
if url.startswith(check):
length = length + len(check)
break
result = url[length:]
logger.debug('{0} from {1} equals {2}'
.format(base_url, url, result))
return result
@property
def base_url(self):
"""Base URL property."""
return self.__base_url
@base_url.setter
def base_url(self, value):
"""Set the Base URL property, updating all associated services."""
logger.debug('StackInABox({0}): Updating URL from {1} to {2}'
.format(self.__id, self.__base_url, value))
self.__base_url = value
for k, v in six.iteritems(self.services):
matcher, service = v
service.base_url = StackInABox.__get_service_url(value,
service.name)
logger.debug('StackInABox({0}): Service {1} has url {2}'
.format(self.__id, service.name, service.base_url))
def reset(self):
"""Reset StackInABox to a like-new state."""
logger.debug('StackInABox({0}): Resetting...'
.format(self.__id))
for k, v in six.iteritems(self.services):
matcher, service = v
logger.debug('StackInABox({0}): Resetting Service {1}'
.format(self.__id, service.name))
service.reset()
self.services = {}
self.holds = {}
logger.debug('StackInABox({0}): Reset Complete'
.format(self.__id))
def register(self, service):
"""Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
:returns: None
:raises: ServiceAlreadyRegisteredError if the service already exists
"""
if service.name not in self.services.keys():
logger.debug('StackInABox({0}): Registering Service {1}'
.format(self.__id, service.name))
regex = '^/{0}/'.format(service.name)
self.services[service.name] = [
re.compile(regex),
service
]
service.base_url = StackInABox.__get_service_url(self.base_url,
service.name)
logger.debug('StackInABox({0}): Service {1} has url {2}'
.format(self.__id, service.name, service.base_url))
else:
raise ServiceAlreadyRegisteredError(
'Service {0} is already registered'.format(service.name))
def call(self, method, request, uri, headers):
"""Make a call into the thread's StackInABox instance.
:param method: HTTP Method (e.g GET, POST)
:param request: a Request object containing the request data
:param uri: the URI of the request submitted with the method
:param headers: the return headers in a Case-Insensitive dict
:returns: A tuple containing - (i) the Status Code, (ii) the response
headers, and (iii) the response body data
This function should not emit any Exceptions
"""
logger.debug('StackInABox({0}): Received call to {1} - {2}'
.format(self.__id, method, uri))
service_uri = StackInABox.get_services_url(uri, self.base_url)
for k, v in six.iteritems(self.services):
matcher, service = v
logger.debug('StackInABox({0}): Checking if Service {1} handles...'
.format(self.__id, service.name))
logger.debug('StackInABox({0}): ...using regex pattern {1} '
'against {2}'
.format(self.__id, matcher.pattern, service_uri))
if matcher.match(service_uri):
logger.debug('StackInABox({0}): Trying Service {1} handler...'
.format(self.__id, service.name))
try:
service_caller_uri = service_uri[(len(service.name) + 1):]
return service.request(method,
request,
service_caller_uri,
headers)
except Exception as ex:
logger.exception('StackInABox({0}): Service {1} - '
'Internal Failure'
.format(self.__id, service.name))
return (596,
headers,
'Service Handler had an error: {0}'.format(ex))
return (597, headers, 'Unknown service - {0}'.format(service_uri))
def into_hold(self, name, obj):
"""Add data into the a storage area provided by the framework.
Note: The data is stored with the thread local instance.
:param name: name of the data to be stored
:param obj: data to be stored
:returns: N/A
:raises: N/A
"""
logger.debug('StackInABox({0}): Holding onto {1} of type {2} '
'with id {3}'
.format(self.__id, name, type(obj), id(obj)))
self.holds[name] = obj
def from_hold(self, name):
"""Get data from the storage area provided by the framework.
Note: The data is retrieved from the thread local instance.
:param name: name of the data to be retrieved
:returns: The data associated with the specified name.
:raises: Lookup/KeyError error if the name does not match
a value in the storage
"""
logger.debug('StackInABox({0}): Retreiving {1} from the hold'
.format(self.__id, name))
obj = self.holds[name]
logger.debug('StackInABox({0}): Retrieved {1} of type {2} with id {3}'
.format(self.__id, name, type(obj), id(obj)))
return obj
# Thread local instance of StackInABox
local_store = threading.local()
| 34.486804 | 79 | 0.571854 |
4a2146ace877b7d3ce4b26f14beab8d093ea025c
| 4,193 |
py
|
Python
|
MDRSREID/Loss_Meter/Multi_Seg_loss.py
|
nickhuang1996/HJL-re-id
|
107b25f31c961f360f69560cfddd78dfc0da3291
|
[
"MIT"
] | 43 |
2020-09-20T09:40:04.000Z
|
2022-03-29T11:25:22.000Z
|
MDRSREID/Loss_Meter/Multi_Seg_loss.py
|
nickhuang1996/HJL-re-id
|
107b25f31c961f360f69560cfddd78dfc0da3291
|
[
"MIT"
] | 19 |
2020-10-05T05:35:38.000Z
|
2021-12-10T03:17:31.000Z
|
MDRSREID/Loss_Meter/Multi_Seg_loss.py
|
nickhuang1996/HJL-re-id
|
107b25f31c961f360f69560cfddd78dfc0da3291
|
[
"MIT"
] | 18 |
2020-10-01T14:41:53.000Z
|
2021-09-02T06:57:57.000Z
|
from MDRSREID.Loss_Meter import Loss
import torch.nn as nn
import torch
from MDRSREID.utils.meter import RecentAverageMeter as Meter
class MultiSegLoss(Loss):
def __init__(self, cfg, tb_writer=None):
super(MultiSegLoss, self).__init__(cfg, tb_writer=tb_writer)
self.criterion = torch.nn.CrossEntropyLoss(reduction='none' if cfg.normalize_size else 'mean')
self.part_fmt = '#{}'
def __call__(self, item, pred, step=0, **kwargs):
multi_seg_pred_list = pred['multi_seg_pred_list']
ps_label = item['ps_label']
N, C, H, W = multi_seg_pred_list[0].size()
assert ps_label.size() == (N, H, W)
# shape [N, H, W] -> [NHW]
ps_label = ps_label.view(N * H * W).detach()
# shape [N, C, H, W] -> [N, H, W, C] -> [NHW, C]
loss_list = [self.criterion(multi_seg_pred_list[i].permute(0, 2, 3, 1).contiguous().view(-1, C), ps_label) for i in range(len(multi_seg_pred_list))]
# New version of pytorch allow stacking 0-dim tensors, but not concatenating.
loss = torch.stack(loss_list).mean() # sum()
# Calculate each class avg loss and then average across classes, to compensate for classes that have few pixels
if self.cfg.normalize_size:
num_loss = 0
for j in range(len(loss_list)):
loss_ = 0
cur_batch_n_classes = 0
for i in range(self.cfg.num_classes):
# select ingredients that satisfy the condition 'ps_label == i'
# the number may be less than that of ps_label
loss_i = loss_list[j][ps_label == i]
# if the number of selected ingredients is more than 0, calculate the loss and class numbers add 1
if loss_i.numel() > 0:
loss_ += loss_i.mean()
cur_batch_n_classes += 1
loss_ /= (cur_batch_n_classes + 1e-8)
loss_list[j] = loss_
num_loss += 1
sum_loss = 0.0
for i in range(len(loss_list)):
sum_loss += loss_list[i]
loss = 1. * sum_loss / num_loss
# Meter: stores and computes the average of recent values
self.store_calculate_loss(loss)
# May calculate each branch loss separately
self.may_calculate_each_branch_loss(loss_list)
# May record losses.
self.may_record_loss(step)
# Scale by loss weight
loss *= self.cfg.weight
return {'loss': loss}
def store_calculate_loss(self, loss):
"""
:param loss: torch.stack(loss_list).sum()
:return:
Meter: stores and computes the average of recent values.
"""
if self.cfg.name not in self.meter_dict:
# Here use RecentAverageMeter as Meter
self.meter_dict[self.cfg.name] = Meter(name=self.cfg.name)
# Update the meter, store the current whole loss.
self.meter_dict[self.cfg.name].update(loss.item())
def may_calculate_each_branch_loss(self, loss_list):
"""
:param loss_list: each part loss
:return:
Meter: stores and computes the average of recent values.
For each part loss, calculate the loss separately.
"""
if len(loss_list) > 1:
# stores and computes each part average of recent values
for i in range(len(loss_list)):
# if there is not the meter of the part, create a new one.
if self.part_fmt.format(i + 1) not in self.meter_dict:
self.meter_dict[self.part_fmt.format(i + 1)] = Meter(name=self.part_fmt.format(i + 1))
# Update the meter, store the current part loss
self.meter_dict[self.part_fmt.format(i + 1)].update(loss_list[i].item())
def may_record_loss(self, step):
"""
:param loss_list:
:param step:
:return:
Use TensorBoard to record the losses.
"""
if self.tb_writer is not None:
self.tb_writer.add_scalars(self.cfg.name, {self.cfg.name: self.meter_dict[self.cfg.name].avg}, step)
| 39.186916 | 156 | 0.5917 |
4a2149b1775b602d06d564b36324658cae787149
| 33,507 |
py
|
Python
|
pyqtgraph/parametertree/Parameter.py
|
ltirrell/pyqtgraph
|
4a24598bcb631ecaf1140c44afba1e62be8ce1ab
|
[
"MIT"
] | 1 |
2021-05-31T06:43:22.000Z
|
2021-05-31T06:43:22.000Z
|
pyqtgraph/parametertree/Parameter.py
|
ltirrell/pyqtgraph
|
4a24598bcb631ecaf1140c44afba1e62be8ce1ab
|
[
"MIT"
] | null | null | null |
pyqtgraph/parametertree/Parameter.py
|
ltirrell/pyqtgraph
|
4a24598bcb631ecaf1140c44afba1e62be8ce1ab
|
[
"MIT"
] | 1 |
2022-02-21T03:09:29.000Z
|
2022-02-21T03:09:29.000Z
|
# -*- coding: utf-8 -*-
from ..Qt import QtGui, QtCore
import os, weakref, re
from ..pgcollections import OrderedDict
from ..python2_3 import asUnicode, basestring
from .ParameterItem import ParameterItem
PARAM_TYPES = {}
PARAM_NAMES = {}
def registerParameterType(name, cls, override=False):
global PARAM_TYPES
if name in PARAM_TYPES and not override:
raise Exception("Parameter type '%s' already exists (use override=True to replace)" % name)
PARAM_TYPES[name] = cls
PARAM_NAMES[cls] = name
def __reload__(old):
PARAM_TYPES.update(old.get('PARAM_TYPES', {}))
PARAM_NAMES.update(old.get('PARAM_NAMES', {}))
class Parameter(QtCore.QObject):
"""
A Parameter is the basic unit of data in a parameter tree. Each parameter has
a name, a type, a value, and several other properties that modify the behavior of the
Parameter. Parameters may have parent / child / sibling relationships to construct
organized hierarchies. Parameters generally do not have any inherent GUI or visual
interpretation; instead they manage ParameterItem instances which take care of
display and user interaction.
Note: It is fairly uncommon to use the Parameter class directly; mostly you
will use subclasses which provide specialized type and data handling. The static
pethod Parameter.create(...) is an easy way to generate instances of these subclasses.
For more Parameter types, see ParameterTree.parameterTypes module.
=================================== =========================================================
**Signals:**
sigStateChanged(self, change, info) Emitted when anything changes about this parameter at
all.
The second argument is a string indicating what changed
('value', 'childAdded', etc..)
The third argument can be any extra information about
the change
sigTreeStateChanged(self, changes) Emitted when any child in the tree changes state
(but only if monitorChildren() is called)
the format of *changes* is [(param, change, info), ...]
sigValueChanged(self, value) Emitted when value is finished changing
sigValueChanging(self, value) Emitted immediately for all value changes,
including during editing.
sigChildAdded(self, child, index) Emitted when a child is added
sigChildRemoved(self, child) Emitted when a child is removed
sigRemoved(self) Emitted when this parameter is removed
sigParentChanged(self, parent) Emitted when this parameter's parent has changed
sigLimitsChanged(self, limits) Emitted when this parameter's limits have changed
sigDefaultChanged(self, default) Emitted when this parameter's default value has changed
sigNameChanged(self, name) Emitted when this parameter's name has changed
sigOptionsChanged(self, opts) Emitted when any of this parameter's options have changed
sigContextMenu(self, name) Emitted when a context menu was clicked
=================================== =========================================================
"""
## name, type, limits, etc.
## can also carry UI hints (slider vs spinbox, etc.)
sigValueChanged = QtCore.Signal(object, object) ## self, value emitted when value is finished being edited
sigValueChanging = QtCore.Signal(object, object) ## self, value emitted as value is being edited
sigChildAdded = QtCore.Signal(object, object, object) ## self, child, index
sigChildRemoved = QtCore.Signal(object, object) ## self, child
sigRemoved = QtCore.Signal(object) ## self
sigParentChanged = QtCore.Signal(object, object) ## self, parent
sigLimitsChanged = QtCore.Signal(object, object) ## self, limits
sigDefaultChanged = QtCore.Signal(object, object) ## self, default
sigNameChanged = QtCore.Signal(object, object) ## self, name
sigOptionsChanged = QtCore.Signal(object, object) ## self, {opt:val, ...}
## Emitted when anything changes about this parameter at all.
## The second argument is a string indicating what changed ('value', 'childAdded', etc..)
## The third argument can be any extra information about the change
sigStateChanged = QtCore.Signal(object, object, object) ## self, change, info
## emitted when any child in the tree changes state
## (but only if monitorChildren() is called)
sigTreeStateChanged = QtCore.Signal(object, object) # self, changes
# changes = [(param, change, info), ...]
sigContextMenu = QtCore.Signal(object, object) # self, name
# bad planning.
#def __new__(cls, *args, **opts):
#try:
#cls = PARAM_TYPES[opts['type']]
#except KeyError:
#pass
#return QtCore.QObject.__new__(cls, *args, **opts)
@staticmethod
def create(**opts):
"""
Static method that creates a new Parameter (or subclass) instance using
opts['type'] to select the appropriate class.
All options are passed directly to the new Parameter's __init__ method.
Use registerParameterType() to add new class types.
"""
typ = opts.get('type', None)
if typ is None:
cls = Parameter
else:
cls = PARAM_TYPES[opts['type']]
return cls(**opts)
def __init__(self, **opts):
"""
Initialize a Parameter object. Although it is rare to directly create a
Parameter instance, the options available to this method are also allowed
by most Parameter subclasses.
======================= =========================================================
**Keyword Arguments:**
name The name to give this Parameter. This is the name that
will appear in the left-most column of a ParameterTree
for this Parameter.
value The value to initially assign to this Parameter.
default The default value for this Parameter (most Parameters
provide an option to 'reset to default').
children A list of children for this Parameter. Children
may be given either as a Parameter instance or as a
dictionary to pass to Parameter.create(). In this way,
it is possible to specify complex hierarchies of
Parameters from a single nested data structure.
readonly If True, the user will not be allowed to edit this
Parameter. (default=False)
enabled If False, any widget(s) for this parameter will appear
disabled. (default=True)
visible If False, the Parameter will not appear when displayed
in a ParameterTree. (default=True)
renamable If True, the user may rename this Parameter.
(default=False)
removable If True, the user may remove this Parameter.
(default=False)
expanded If True, the Parameter will initially be expanded in
ParameterTrees: Its children will be visible.
(default=True)
syncExpanded If True, the `expanded` state of this Parameter is
synchronized with all ParameterTrees it is displayed in.
(default=False)
title (str or None) If specified, then the parameter will be
displayed to the user using this string as its name.
However, the parameter will still be referred to
internally using the *name* specified above. Note that
this option is not compatible with renamable=True.
(default=None; added in version 0.9.9)
======================= =========================================================
"""
QtCore.QObject.__init__(self)
self.opts = {
'type': None,
'readonly': False,
'visible': True,
'enabled': True,
'renamable': False,
'removable': False,
'strictNaming': False, # forces name to be usable as a python variable
'expanded': True,
'syncExpanded': False,
'title': None,
#'limits': None, ## This is a bad plan--each parameter type may have a different data type for limits.
}
value = opts.get('value', None)
name = opts.get('name', None)
self.opts.update(opts)
self.opts['value'] = None # will be set later.
self.opts['name'] = None
self.childs = []
self.names = {} ## map name:child
self.items = weakref.WeakKeyDictionary() ## keeps track of tree items representing this parameter
self._parent = None
self.treeStateChanges = [] ## cache of tree state changes to be delivered on next emit
self.blockTreeChangeEmit = 0
#self.monitoringChildren = False ## prevent calling monitorChildren more than once
if not isinstance(name, basestring):
raise Exception("Parameter must have a string name specified in opts.")
self.setName(name)
self.addChildren(self.opts.pop('children', []))
if value is not None:
self.setValue(value)
if 'default' not in self.opts:
self.opts['default'] = None
## Connect all state changed signals to the general sigStateChanged
self.sigValueChanged.connect(lambda param, data: self.emitStateChanged('value', data))
self.sigChildAdded.connect(lambda param, *data: self.emitStateChanged('childAdded', data))
self.sigChildRemoved.connect(lambda param, data: self.emitStateChanged('childRemoved', data))
self.sigParentChanged.connect(lambda param, data: self.emitStateChanged('parent', data))
self.sigLimitsChanged.connect(lambda param, data: self.emitStateChanged('limits', data))
self.sigDefaultChanged.connect(lambda param, data: self.emitStateChanged('default', data))
self.sigNameChanged.connect(lambda param, data: self.emitStateChanged('name', data))
self.sigOptionsChanged.connect(lambda param, data: self.emitStateChanged('options', data))
self.sigContextMenu.connect(lambda param, data: self.emitStateChanged('contextMenu', data))
#self.watchParam(self) ## emit treechange signals if our own state changes
def name(self):
"""Return the name of this Parameter."""
return self.opts['name']
def title(self):
"""Return the title of this Parameter.
By default, the title is the same as the name unless it has been explicitly specified
otherwise."""
title = self.opts.get('title', None)
if title is None:
title = self.name()
return title
def contextMenu(self, name):
""""A context menu entry was clicked"""
self.sigContextMenu.emit(self, name)
def setName(self, name):
"""Attempt to change the name of this parameter; return the actual name.
(The parameter may reject the name change or automatically pick a different name)"""
if self.opts['strictNaming']:
if len(name) < 1 or re.search(r'\W', name) or re.match(r'\d', name[0]):
raise Exception("Parameter name '%s' is invalid. (Must contain only alphanumeric and underscore characters and may not start with a number)" % name)
parent = self.parent()
if parent is not None:
name = parent._renameChild(self, name) ## first ask parent if it's ok to rename
if self.opts['name'] != name:
self.opts['name'] = name
self.sigNameChanged.emit(self, name)
return name
def type(self):
"""Return the type string for this Parameter."""
return self.opts['type']
def isType(self, typ):
"""
Return True if this parameter type matches the name *typ*.
This can occur either of two ways:
- If self.type() == *typ*
- If this parameter's class is registered with the name *typ*
"""
if self.type() == typ:
return True
global PARAM_TYPES
cls = PARAM_TYPES.get(typ, None)
if cls is None:
raise Exception("Type name '%s' is not registered." % str(typ))
return self.__class__ is cls
def childPath(self, child):
"""
Return the path of parameter names from self to child.
If child is not a (grand)child of self, return None.
"""
path = []
while child is not self:
path.insert(0, child.name())
child = child.parent()
if child is None:
return None
return path
def setValue(self, value, blockSignal=None):
"""
Set the value of this Parameter; return the actual value that was set.
(this may be different from the value that was requested)
"""
try:
if blockSignal is not None:
self.sigValueChanged.disconnect(blockSignal)
value = self._interpretValue(value)
if self.opts['value'] == value:
return value
self.opts['value'] = value
self.sigValueChanged.emit(self, value)
finally:
if blockSignal is not None:
self.sigValueChanged.connect(blockSignal)
return value
def _interpretValue(self, v):
return v
def value(self):
"""
Return the value of this Parameter.
"""
return self.opts['value']
def getValues(self):
"""Return a tree of all values that are children of this parameter"""
vals = OrderedDict()
for ch in self:
vals[ch.name()] = (ch.value(), ch.getValues())
return vals
def saveState(self, filter=None):
"""
Return a structure representing the entire state of the parameter tree.
The tree state may be restored from this structure using restoreState().
If *filter* is set to 'user', then only user-settable data will be included in the
returned state.
"""
if filter is None:
state = self.opts.copy()
if state['type'] is None:
global PARAM_NAMES
state['type'] = PARAM_NAMES.get(type(self), None)
elif filter == 'user':
state = {'value': self.value()}
else:
raise ValueError("Unrecognized filter argument: '%s'" % filter)
ch = OrderedDict([(ch.name(), ch.saveState(filter=filter)) for ch in self])
if len(ch) > 0:
state['children'] = ch
return state
def restoreState(self, state, recursive=True, addChildren=True, removeChildren=True, blockSignals=True):
"""
Restore the state of this parameter and its children from a structure generated using saveState()
If recursive is True, then attempt to restore the state of child parameters as well.
If addChildren is True, then any children which are referenced in the state object will be
created if they do not already exist.
If removeChildren is True, then any children which are not referenced in the state object will
be removed.
If blockSignals is True, no signals will be emitted until the tree has been completely restored.
This prevents signal handlers from responding to a partially-rebuilt network.
"""
state = state.copy()
childState = state.pop('children', [])
## list of children may be stored either as list or dict.
if isinstance(childState, dict):
cs = []
for k,v in childState.items():
cs.append(v.copy())
cs[-1].setdefault('name', k)
childState = cs
if blockSignals:
self.blockTreeChangeSignal()
try:
self.setOpts(**state)
if not recursive:
return
ptr = 0 ## pointer to first child that has not been restored yet
foundChilds = set()
#print "==============", self.name()
for ch in childState:
name = ch['name']
#typ = ch.get('type', None)
#print('child: %s, %s' % (self.name()+'.'+name, typ))
## First, see if there is already a child with this name
gotChild = False
for i, ch2 in enumerate(self.childs[ptr:]):
#print " ", ch2.name(), ch2.type()
if ch2.name() != name: # or not ch2.isType(typ):
continue
gotChild = True
#print " found it"
if i != 0: ## move parameter to next position
#self.removeChild(ch2)
self.insertChild(ptr, ch2)
#print " moved to position", ptr
ch2.restoreState(ch, recursive=recursive, addChildren=addChildren, removeChildren=removeChildren)
foundChilds.add(ch2)
break
if not gotChild:
if not addChildren:
#print " ignored child"
continue
#print " created new"
ch2 = Parameter.create(**ch)
self.insertChild(ptr, ch2)
foundChilds.add(ch2)
ptr += 1
if removeChildren:
for ch in self.childs[:]:
if ch not in foundChilds:
#print " remove:", ch
self.removeChild(ch)
finally:
if blockSignals:
self.unblockTreeChangeSignal()
def defaultValue(self):
"""Return the default value for this parameter."""
return self.opts['default']
def setDefault(self, val):
"""Set the default value for this parameter."""
if self.opts['default'] == val:
return
self.opts['default'] = val
self.sigDefaultChanged.emit(self, val)
def setToDefault(self):
"""Set this parameter's value to the default."""
if self.hasDefault():
self.setValue(self.defaultValue())
def hasDefault(self):
"""Returns True if this parameter has a default value."""
return self.opts['default'] is not None
def valueIsDefault(self):
"""Returns True if this parameter's value is equal to the default value."""
return self.value() == self.defaultValue()
def setLimits(self, limits):
"""Set limits on the acceptable values for this parameter.
The format of limits depends on the type of the parameter and
some parameters do not make use of limits at all."""
if 'limits' in self.opts and self.opts['limits'] == limits:
return
self.opts['limits'] = limits
self.sigLimitsChanged.emit(self, limits)
return limits
def writable(self):
"""
Returns True if this parameter's value can be changed by the user.
Note that the value of the parameter can *always* be changed by
calling setValue().
"""
return not self.readonly()
def setWritable(self, writable=True):
"""Set whether this Parameter should be editable by the user. (This is
exactly the opposite of setReadonly)."""
self.setOpts(readonly=not writable)
def readonly(self):
"""
Return True if this parameter is read-only. (this is the opposite of writable())
"""
return self.opts.get('readonly', False)
def setReadonly(self, readonly=True):
"""Set whether this Parameter's value may be edited by the user
(this is the opposite of setWritable())."""
self.setOpts(readonly=readonly)
def setOpts(self, **opts):
"""
Set any arbitrary options on this parameter.
The exact behavior of this function will depend on the parameter type, but
most parameters will accept a common set of options: value, name, limits,
default, readonly, removable, renamable, visible, enabled, expanded and syncExpanded.
See :func:`Parameter.__init__ <pyqtgraph.parametertree.Parameter.__init__>`
for more information on default options.
"""
changed = OrderedDict()
for k in opts:
if k == 'value':
self.setValue(opts[k])
elif k == 'name':
self.setName(opts[k])
elif k == 'limits':
self.setLimits(opts[k])
elif k == 'default':
self.setDefault(opts[k])
elif k not in self.opts or self.opts[k] != opts[k]:
self.opts[k] = opts[k]
changed[k] = opts[k]
if len(changed) > 0:
self.sigOptionsChanged.emit(self, changed)
def emitStateChanged(self, changeDesc, data):
## Emits stateChanged signal and
## requests emission of new treeStateChanged signal
self.sigStateChanged.emit(self, changeDesc, data)
#self.treeStateChanged(self, changeDesc, data)
self.treeStateChanges.append((self, changeDesc, data))
self.emitTreeChanges()
def makeTreeItem(self, depth):
"""
Return a TreeWidgetItem suitable for displaying/controlling the content of
this parameter. This is called automatically when a ParameterTree attempts
to display this Parameter.
Most subclasses will want to override this function.
"""
if hasattr(self, 'itemClass'):
#print "Param:", self, "Make item from itemClass:", self.itemClass
return self.itemClass(self, depth)
else:
return ParameterItem(self, depth=depth)
def addChild(self, child, autoIncrementName=None):
"""
Add another parameter to the end of this parameter's child list.
See insertChild() for a description of the *autoIncrementName*
argument.
"""
return self.insertChild(len(self.childs), child, autoIncrementName=autoIncrementName)
def addChildren(self, children):
"""
Add a list or dict of children to this parameter. This method calls
addChild once for each value in *children*.
"""
## If children was specified as dict, then assume keys are the names.
if isinstance(children, dict):
ch2 = []
for name, opts in children.items():
if isinstance(opts, dict) and 'name' not in opts:
opts = opts.copy()
opts['name'] = name
ch2.append(opts)
children = ch2
for chOpts in children:
#print self, "Add child:", type(chOpts), id(chOpts)
self.addChild(chOpts)
def insertChild(self, pos, child, autoIncrementName=None):
"""
Insert a new child at pos.
If pos is a Parameter, then insert at the position of that Parameter.
If child is a dict, then a parameter is constructed using
:func:`Parameter.create <pyqtgraph.parametertree.Parameter.create>`.
By default, the child's 'autoIncrementName' option determines whether
the name will be adjusted to avoid prior name collisions. This
behavior may be overridden by specifying the *autoIncrementName*
argument. This argument was added in version 0.9.9.
"""
if isinstance(child, dict):
child = Parameter.create(**child)
name = child.name()
if name in self.names and child is not self.names[name]:
if autoIncrementName is True or (autoIncrementName is None and child.opts.get('autoIncrementName', False)):
name = self.incrementName(name)
child.setName(name)
else:
raise Exception("Already have child named %s" % str(name))
if isinstance(pos, Parameter):
pos = self.childs.index(pos)
with self.treeChangeBlocker():
if child.parent() is not None:
child.remove()
self.names[name] = child
self.childs.insert(pos, child)
child.parentChanged(self)
child.sigTreeStateChanged.connect(self.treeStateChanged)
self.sigChildAdded.emit(self, child, pos)
return child
def removeChild(self, child):
"""Remove a child parameter."""
name = child.name()
if name not in self.names or self.names[name] is not child:
raise Exception("Parameter %s is not my child; can't remove." % str(child))
del self.names[name]
self.childs.pop(self.childs.index(child))
child.parentChanged(None)
try:
child.sigTreeStateChanged.disconnect(self.treeStateChanged)
except (TypeError, RuntimeError): ## already disconnected
pass
self.sigChildRemoved.emit(self, child)
def clearChildren(self):
"""Remove all child parameters."""
for ch in self.childs[:]:
self.removeChild(ch)
def children(self):
"""Return a list of this parameter's children.
Warning: this overrides QObject.children
"""
return self.childs[:]
def hasChildren(self):
"""Return True if this Parameter has children."""
return len(self.childs) > 0
def parentChanged(self, parent):
"""This method is called when the parameter's parent has changed.
It may be useful to extend this method in subclasses."""
self._parent = parent
self.sigParentChanged.emit(self, parent)
def parent(self):
"""Return the parent of this parameter."""
return self._parent
def remove(self):
"""Remove this parameter from its parent's child list"""
parent = self.parent()
if parent is None:
raise Exception("Cannot remove; no parent.")
parent.removeChild(self)
self.sigRemoved.emit(self)
def incrementName(self, name):
## return an unused name by adding a number to the name given
base, num = re.match(r'(.*)(\d*)', name).groups()
numLen = len(num)
if numLen == 0:
num = 2
numLen = 1
else:
num = int(num)
while True:
newName = base + ("%%0%dd"%numLen) % num
if newName not in self.names:
return newName
num += 1
def __iter__(self):
for ch in self.childs:
yield ch
def __getitem__(self, names):
"""Get the value of a child parameter. The name may also be a tuple giving
the path to a sub-parameter::
value = param[('child', 'grandchild')]
"""
if not isinstance(names, tuple):
names = (names,)
return self.param(*names).value()
def __setitem__(self, names, value):
"""Set the value of a child parameter. The name may also be a tuple giving
the path to a sub-parameter::
param[('child', 'grandchild')] = value
"""
if isinstance(names, basestring):
names = (names,)
return self.param(*names).setValue(value)
def child(self, *names):
"""Return a child parameter.
Accepts the name of the child or a tuple (path, to, child)
Added in version 0.9.9. Earlier versions used the 'param' method, which is still
implemented for backward compatibility.
"""
try:
param = self.names[names[0]]
except KeyError:
raise KeyError("Parameter %s has no child named %s" % (self.name(), names[0]))
if len(names) > 1:
return param.child(*names[1:])
else:
return param
def param(self, *names):
# for backward compatibility.
return self.child(*names)
def __repr__(self):
return asUnicode("<%s '%s' at 0x%x>") % (self.__class__.__name__, self.name(), id(self))
def __getattr__(self, attr):
## Leaving this undocumented because I might like to remove it in the future..
#print type(self), attr
if 'names' not in self.__dict__:
raise AttributeError(attr)
if attr in self.names:
import traceback
traceback.print_stack()
print("Warning: Use of Parameter.subParam is deprecated. Use Parameter.param(name) instead.")
return self.param(attr)
else:
raise AttributeError(attr)
def _renameChild(self, child, name):
## Only to be called from Parameter.rename
if name in self.names:
return child.name()
self.names[name] = child
del self.names[child.name()]
return name
def registerItem(self, item):
self.items[item] = None
def hide(self):
"""Hide this parameter. It and its children will no longer be visible in any ParameterTree
widgets it is connected to."""
self.show(False)
def show(self, s=True):
"""Show this parameter. """
self.opts['visible'] = s
self.sigOptionsChanged.emit(self, {'visible': s})
def treeChangeBlocker(self):
"""
Return an object that can be used to temporarily block and accumulate
sigTreeStateChanged signals. This is meant to be used when numerous changes are
about to be made to the tree and only one change signal should be
emitted at the end.
Example::
with param.treeChangeBlocker():
param.addChild(...)
param.removeChild(...)
param.setValue(...)
"""
return SignalBlocker(self.blockTreeChangeSignal, self.unblockTreeChangeSignal)
def blockTreeChangeSignal(self):
"""
Used to temporarily block and accumulate tree change signals.
*You must remember to unblock*, so it is advisable to use treeChangeBlocker() instead.
"""
self.blockTreeChangeEmit += 1
def unblockTreeChangeSignal(self):
"""Unblocks enission of sigTreeStateChanged and flushes the changes out through a single signal."""
self.blockTreeChangeEmit -= 1
self.emitTreeChanges()
def treeStateChanged(self, param, changes):
"""
Called when the state of any sub-parameter has changed.
============== ================================================================
**Arguments:**
param The immediate child whose tree state has changed.
note that the change may have originated from a grandchild.
changes List of tuples describing all changes that have been made
in this event: (param, changeDescr, data)
============== ================================================================
This function can be extended to react to tree state changes.
"""
self.treeStateChanges.extend(changes)
self.emitTreeChanges()
def emitTreeChanges(self):
if self.blockTreeChangeEmit == 0:
changes = self.treeStateChanges
self.treeStateChanges = []
if len(changes) > 0:
self.sigTreeStateChanged.emit(self, changes)
class SignalBlocker(object):
def __init__(self, enterFn, exitFn):
self.enterFn = enterFn
self.exitFn = exitFn
def __enter__(self):
self.enterFn()
def __exit__(self, exc_type, exc_value, tb):
self.exitFn()
| 41.88375 | 164 | 0.564449 |
4a214bcb08b890a74ae6bb21efe3f78829125023
| 663 |
py
|
Python
|
python_speech_features_cuda/_acc/__init__.py
|
vkola-lab/python_speech_features_cuda
|
51948be5b3acbe26a57e11aee9a1fbacba1b6b39
|
[
"MIT"
] | 6 |
2020-08-05T04:03:12.000Z
|
2021-04-06T07:45:57.000Z
|
python_speech_features_cuda/_acc/__init__.py
|
vkola-lab/python_speech_features_cuda
|
51948be5b3acbe26a57e11aee9a1fbacba1b6b39
|
[
"MIT"
] | null | null | null |
python_speech_features_cuda/_acc/__init__.py
|
vkola-lab/python_speech_features_cuda
|
51948be5b3acbe26a57e11aee9a1fbacba1b6b39
|
[
"MIT"
] | null | null | null |
"""
Created on Mon Aug 10 09:25:14 2020
@author: cxue2
"""
from .. import env
if env.is_numba_available:
from ._jit import _jit_preemp_frmsig
from ._jit import _jit_powdiv
from ._jit import _jit_sum
from ._jit import _jit_mul
from ._jit import _jit_rplzro_log
from ._jit import _jit_rplzro
else:
_jit_preemp_frmsig = None
_jit_powdiv = None
_jit_sum = None
_jit_mul = None
_jit_rplzro_log = None
_jit_rplzro = None
from ._fft import fft
from ._opr import sum
from ._opr import mul
from ._opr import rplzro
from ._opr import rplzro_log
from ._wrp import preemp_frmsig_powspc
| 22.1 | 40 | 0.689291 |
4a214ca13dc21d98f0d35535dcd7ebe6890a226a
| 24,668 |
py
|
Python
|
Exp 4 4 with USRCAT/evolution_functions.py
|
Mattarian/GAD-USRCAT
|
306a21e8afbeff3d8b56ae4641ac1195a12f2036
|
[
"Apache-2.0"
] | null | null | null |
Exp 4 4 with USRCAT/evolution_functions.py
|
Mattarian/GAD-USRCAT
|
306a21e8afbeff3d8b56ae4641ac1195a12f2036
|
[
"Apache-2.0"
] | null | null | null |
Exp 4 4 with USRCAT/evolution_functions.py
|
Mattarian/GAD-USRCAT
|
306a21e8afbeff3d8b56ae4641ac1195a12f2036
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
import rdkit
import shutil
import multiprocessing
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import MolFromSmiles as smi2mol
from rdkit.Chem import MolToSmiles as mol2smi
from rdkit.Chem import Descriptors
from rdkit.Chem import rdMolDescriptors
from rdkit import DataStructs
from selfies import decoder
import numpy as np
import inspect
from collections import OrderedDict
manager = multiprocessing.Manager()
lock = multiprocessing.Lock()
def get_logP(mol):
'''Calculate logP of a molecule
Parameters:
mol (rdkit.Chem.rdchem.Mol) : RdKit mol object, for which logP is to calculates
Returns:
float : logP of molecule (mol)
'''
return Descriptors.MolLogP(mol)
def molecule_similarity(mol, target, radius=2, nBits=2048,
useChirality=True):
"""
Reward for a target molecule similarity, based on tanimoto similarity
between the ECFP fingerprints of the x molecule and target molecule
:param mol: rdkit mol object
:param target: rdkit mol object
:return: float, [0.0, 1.0]
"""
x = rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, radius=radius,
nBits=nBits,
useChirality=useChirality)
target = rdMolDescriptors.GetMorganFingerprintAsBitVect(target,
radius=radius,
nBits=nBits,
useChirality=useChirality)
return DataStructs.TanimotoSimilarity(x, target)
def make_clean_results_dir():
# Create the results folder
root_folder = './results'
if not os.path.exists(root_folder):
os.makedirs(root_folder)
else:
shutil.rmtree(root_folder)
os.makedirs(root_folder)
return root_folder
def make_clean_directories(beta, root_folder, iteration):
'''Create or clean directories: 'images' & 'saved_models'
Create directories from scratch, if they do not exist
Clean (remove all content) if directories already exist
Parameters:
None
Returns:
None : Folders in current directory modified
'''
image_dir= root_folder + '/images_generation_' + str(beta) + '_' + str(iteration)
if not os.path.exists(image_dir):
os.makedirs(image_dir)
else:
if len(os.listdir(image_dir)) > 0:
os.system("rm -r %s/*"%(image_dir))
models_dir = root_folder + '/saved_models_' + str(beta) + '_' + str(iteration)
if not os.path.exists(models_dir):
os.makedirs(models_dir)
else:
if len(os.listdir(models_dir)) > 0:
os.system("rm -r %s/*"%(models_dir))
data_dir = root_folder + '/results_' + str(beta) + '_' + str(iteration)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
else:
if len(os.listdir(data_dir)) > 0:
os.system("rm -r %s/*"%(data_dir))
return (image_dir, models_dir, data_dir)
def sanitize_smiles(smi):
'''Return a canonical smile representation of smi
Parameters:
smi (string) : smile string to be canonicalized
Returns:
mol (rdkit.Chem.rdchem.Mol) : RdKit mol object (None if invalid smile string smi)
smi_canon (string) : Canonicalized smile representation of smi (None if invalid smile string smi)
conversion_successful (bool): True/False to indicate if conversion was successful
'''
try:
mol = smi2mol(smi, sanitize=True)
smi_canon = mol2smi(mol, isomericSmiles=False, canonical=True)
return (mol, smi_canon, True)
except:
return (None, None, False)
def sanitize_multiple_smiles(smi_ls):
'''Calls function sanitize_smiles for each item in list smi_ls
'''
sanitized_smiles = []
for smi in smi_ls:
smi_converted = sanitize_smiles(smi)
sanitized_smiles.append(smi_converted[1])
if smi_converted[2] == False or smi_converted[1] == '':
raise Exception("Invalid SMILE ecncountered. Value =", smi)
return sanitized_smiles
def read_dataset(filename):
'''Return a list of smiles contained in file filename
Parameters:
filename (string) : Name of file containg smiles seperated by '\n'
Returns
content (list) : list of smile string in file filename
'''
with open(filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
def read_dataset_encoding(disc_enc_type):
'''Return zinc-data set based on disc_enc_type choice of 'smiles' or 'selfies'
Parameters:
disc_enc_type (string): 'smiles' or 'selfies'
'''
if disc_enc_type == 'smiles' or disc_enc_type == 'properties_rdkit':
smiles_reference = read_dataset(filename='./datasets/zinc_dearom.txt')
return smiles_reference
elif disc_enc_type == 'selfies':
selfies_reference = read_dataset(filename='./datasets/SELFIES_zinc.txt')
return selfies_reference
def create_100_mol_image(mol_list, file_name, fitness, logP, SAS, RingCount, USRSim): #!#
'''Create a single picture of multiple molecules in a single Grid.
'''
assert len(mol_list) == 100
if logP == None and SAS == None and RingCount == None:
Draw.MolsToGridImage(mol_list, molsPerRow=10, subImgSize=(200,200)).save(file_name)
return
for i,m in enumerate(mol_list):
m.SetProp('_Name','%s %s %s %s %s' % (round(fitness[i], 3), round(logP[i], 3), round(SAS[i], 3), round(RingCount[i], 3), round(USRSim[i], 3)))
try:
Draw.MolsToGridImage(mol_list, molsPerRow=10, subImgSize=(200,200), legends=[x.GetProp("_Name") for x in mol_list]).save(file_name)
except:
print('Failed to produce image!')
return
def get_selfie_chars(selfie):
'''Obtain a list of all selfie characters in string selfie
Parameters:
selfie (string) : A selfie string - representing a molecule
Example:
>>> get_selfie_chars('[C][=C][C][=C][C][=C][Ring1][Branch1_1]')
['[C]', '[=C]', '[C]', '[=C]', '[C]', '[=C]', '[Ring1]', '[Branch1_1]']
Returns:
chars_selfie: list of selfie characters present in molecule selfie
'''
chars_selfie = [] # A list of all SELFIE sybols from string selfie
while selfie != '':
chars_selfie.append(selfie[selfie.find('['): selfie.find(']')+1])
selfie = selfie[selfie.find(']')+1:]
return chars_selfie
def smiles_alphabet(disc_enc_type):
'''Return a list of characters present in the zinc dataset
Parameters:
disc_enc_type (string): Indicates whether to return SMILES/SELFiES characters
Returns:
alphabet: list of SELFIE/SMILE alphabets in Zinc
'''
if disc_enc_type == 'smiles':
alphabet = ['C', 'c', 'H','O','o', 'N','n', 'S','s', 'F', 'P', 'I',
'Cl','Br', '=','#','(',')','[',']','1','2','3','4','5',
'6','7','8','9','+','-','X'] # SMILES Alphabets in zinc
elif disc_enc_type == 'selfies':
alphabet = ['[Ring1]', '[Branch1_1]', '[Branch1_2]','[Branch1_3]', '[Cl]',
'[Ring2]', '[Branch2_1]', '[Branch2_2]','[Branch2_3]', '[NH3+]',
'[N]', '[=N]', '[#N]', '[C]', '[=C]',
'[#C]', '[S]', '[=S]', '[=O]', '[Br]',
'[epsilon]', '[N+]', '[NH+]', '[NH2+]', '[=NH+]',
'[=NH2+]', '[I]', '[O-]', '[P]', '[=P]',
'[S-]', '[=N-]', '[NH-]', '[=O+]', '[CH-]',
'[PH+]', '[=S+]', '[S+]', '[CH2-]', '[P+]',
'[O+]', '[=N+]', '[N-]' , '[=SH+]', '[=OH+]',
'[#N+]', '[=PH2]', 'X', '[F]', '[O]',
] # SELFIES Alphabets in zinc
else:
exit('Invalid choice. Only possible choices are: smiles/selfies.')
return alphabet
def _to_onehot(molecule_str, disc_enc_type, max_molecules_len):
'''Convert given molecule string into a one-hot encoding, with characters
obtained from function 'smiles_alphabet'.
One-hot encoding of arbitrary molecules is converted to len
'max_molecules_len' by padding with character 'X'
Parameters:
molecule_str (string): SMILE/SELFIE string of molecule
disc_enc_type (string): Indicating weather molecule string is either
SMILE or SELFIE
max_molecules_len (string): Length of the one-hot encoding
Returns:
one_hots (list of lists): One-Hot encoding of molecule string, padding
till length max_molecules_len (dim: len(alphabet) * max_molecules_len)
'''
one_hots=[]
alphabet = smiles_alphabet(disc_enc_type)
alphabet_length = len(alphabet)
if disc_enc_type == 'smiles':
alphabet.remove('Cl') # Replace 'Cl' & 'Br' with 'Y' & 'Z' for convenience
alphabet.remove('Br') # (Searching for single characters is easier)
alphabet.append('Y')
alphabet.append('Z')
for smi in molecule_str:
# Relace 'Cl' and 'Br' with 'Y', 'Z' from smi (for conveninece)
if disc_enc_type == 'smiles':
smi = smi.replace('Cl', 'Y')
smi = smi.replace('Br', 'Z')
one_hot=[]
if disc_enc_type == 'selfies':
smi = get_selfie_chars(smi)
if len(smi) > max_molecules_len:
exit("Molecule is too large!")
for char in smi:
if char not in alphabet:
print("smiles character %s not in alphabet MOLECULE: %s"%(char, smi))
zeros = np.zeros((alphabet_length)).astype(np.int32).tolist()
zeros[alphabet.index(char)] = 1
one_hot+=zeros
# Padding with 'X's
for char in range(max_molecules_len-len(smi)):
zeros = np.zeros((alphabet_length)).astype(np.int32).tolist()
zeros[alphabet.index("X")] = 1
one_hot += zeros
one_hots.append(one_hot)
one_hots = np.array(one_hots)
return (one_hots)
def mutations_random_grin(selfie, max_molecules_len, write_fail_cases=False):
'''Return a mutated selfie string
Mutations are done until a valid molecule is obtained
Rules of mutation: With a 50% propbabily, either:
1. Add a random SELFIE character in the string
2. Replace a random SELFIE character with another
Parameters:
selfie (string) : SELFIE string to be mutated
max_molecules_len (int) : Mutations of SELFIE string are allowed up to this length
write_fail_cases (bool) : If true, failed mutations are recorded in "selfie_failure_cases.txt"
Returns:
selfie_mutated (string) : Mutated SELFIE string
smiles_canon (string) : canonical smile of mutated SELFIE string
'''
valid=False
fail_counter = 0
chars_selfie = get_selfie_chars(selfie)
while not valid:
fail_counter += 1
alphabet = ['[Branch1_1]', '[Branch1_2]','[Branch1_3]', '[epsilon]', '[Ring1]', '[Ring2]', '[Branch2_1]', '[Branch2_2]', '[Branch2_3]', '[F]', '[O]', '[=O]', '[N]', '[=N]', '[#N]', '[C]', '[=C]', '[#C]', '[S]', '[=S]', '[C][=C][C][=C][C][=C][Ring1][Branch1_1]']
# Insert a character in a Random Location
if np.random.random() < 0.5:
random_index = np.random.randint(len(chars_selfie)+1)
random_character = np.random.choice(alphabet, size=1)[0]
selfie_mutated_chars = chars_selfie[:random_index] + [random_character] + chars_selfie[random_index:]
# Replace a random character
else:
random_index = np.random.randint(len(chars_selfie))
random_character = np.random.choice(alphabet, size=1)[0]
if random_index==0:
selfie_mutated_chars = [random_character] + chars_selfie[random_index+1:]
else:
selfie_mutated_chars = chars_selfie[:random_index] + [random_character] + chars_selfie[random_index+1:]
selfie_mutated = "".join(x for x in selfie_mutated_chars)
sf = "".join(x for x in chars_selfie)
try:
smiles = decoder(selfie_mutated)
mol, smiles_canon, done = sanitize_smiles(smiles)
if len(smiles_canon) > max_molecules_len or smiles_canon=="":
done=False
if done:
valid=True
else:
valid=False
except:
valid=False
if fail_counter > 1 and write_fail_cases == True:
f = open("selfie_failure_cases.txt", "a+")
f.write('Tried to mutate SELFIE: '+str(sf)+' To Obtain: '+str(selfie_mutated) + '\n')
f.close()
return (selfie_mutated, smiles_canon)
def count_atoms(mol, atomic_num):
'''Count the number of atoms in mol with atomic number atomic_num
Parameters:
mol (rdkit.Chem.rdchem.Mol) : Molecule in which search is conducted
atomic_num (int) : Counting is done in mol for atoms with this atomic number
Returns:
(int) : final count of atom
'''
pat = Chem.MolFromSmarts("[#{}]".format(atomic_num))
return len(mol.GetSubstructMatches(pat))
def get_num_bond_types(mol):
'''Calculate the ratio of total number of (single, double, triple, aromatic) bonds to the
total number of bonds.
Parameters:
mol (rdkit.Chem.rdchem.Mol) : Molecule for which ratios arre retuned
Returns:
(list): [num_single/num_bonds, num_double/num_bonds, num_triple/num_bonds, num_aromatic/num_bonds]
'''
bonds = mol.GetBonds()
num_bonds = 0
num_double = 0
num_triple = 0
num_single = 0
num_aromatic = 0
for b in bonds:
num_bonds += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.SINGLE:
num_single += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.DOUBLE:
num_double += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.TRIPLE:
num_triple += 1
if b.GetBondType() == rdkit.Chem.rdchem.BondType.AROMATIC:
num_aromatic += 1
if num_bonds == 0:
return [0, 0, 0, 0]
else:
return [num_single/num_bonds, num_double/num_bonds, num_triple/num_bonds, num_aromatic/num_bonds]
def count_conseq_double(mol):
'''Return the number of consequtive double bonds in an entire molecule
including rings
Examples
>>> count_conseq_double(Chem.MolFromSmiles('C1=CC=C=C=C1'))
2
>>> count_conseq_double(Chem.MolFromSmiles('C1=CC=CC=C1'))
0
>>> count_conseq_double(Chem.MolFromSmiles('C1=CC2=C(C=C1)C=C=C=C2'))
2
Parameters:
mol (rdkit.Chem.rdchem.Mol) : Molecule for conseq. double bonds are to be counted
Returns:
(int): The integer number of coseq. double bonds
'''
bonds = mol.GetBonds()
previous_BType = None
count_conseq_doub = 0
for b in bonds:
curr_BType = b.GetBondType()
if previous_BType == curr_BType and curr_BType == rdkit.Chem.rdchem.BondType.DOUBLE:
count_conseq_doub += 1
previous_BType = curr_BType
return count_conseq_doub
def get_rot_bonds_posn(mol):
'''Return atom indices with Rotatable bonds
Examples:
>>> get_rot_bonds_posn('CC1=CC=CC=C1') # Toluene (Rotatable Bonds At: CH3 & Benzene)
((0, 1),)
>>> get_rot_bonds_posn('CCC1=CC=CC=C1') # (Rotatable Bonds At: CH3, CH3 & Benzene)
((0, 1), (1, 2))
'''
RotatableBond = Chem.MolFromSmarts('*-&!@*')
rot = mol.GetSubstructMatches(RotatableBond)
return rot
def get_bond_indeces(mol, rot):
'''Get all the bond indices with Rotatable bonds atoms (generated from 'get_rot_bonds_posn')
'''
bonds_idx = []
for i in range(len(rot)):
bond = mol.GetBondBetweenAtoms(rot[i][0],rot[i][1])
bonds_idx.append(bond.GetIdx())
return bonds_idx
def obtain_rings(smi):
'''Obtain a list of all rings present in SMILE string smi
Examples:
>>> obtain_rings('CCC1=CC=CC=C1')
['c1ccccc1']
>>> obtain_rings('C1=CC=C(C=C1)C1=CC=CC=C1')
['c1ccccc1', 'c1ccccc1']
>>> obtain_rings('C1=CC2=C(C=C1)C=CC=C2')
(None, None)
Parameters:
smi (string) : SMILE string of a molecule
Returns
(list) : List if all rings in a SMILE string
'''
mol = Chem.MolFromSmiles(smi)
rot = get_rot_bonds_posn(mol) # Get rotatble bond positions
if len(rot) == 0:
return None, None
bond_idx = get_bond_indeces(mol, rot)
new_mol = Chem.FragmentOnBonds(mol, bond_idx, addDummies=False)
new_smile = Chem.MolToSmiles(new_mol)
smile_split_list = new_smile.split(".")
rings = []
for item in smile_split_list:
if '1' in item:
rings.append(item)
return rings
def size_ring_counter(ring_ls):
'''Get the number of rings of sizes 3 to 20 and the number of consequtive double bonds in a ring
Parameters:
ring_ls (list) : list of rings of a molecule
Returns
(list) : Of size 19 (1 for number of conseq. double bonds)
(18 for number of rings between size 3 to 20)
'''
ring_counter = []
if ring_ls == (None, None): # Presence of no rings, return 0s for the 19 feature
return [0 for i in range(19)]
mol_ring_ls = [Chem.MolFromSmiles(smi) for smi in ring_ls]
# Cont number consequtive double bonds in ring
conseq_dbl_bnd_in_ring = 0
for item in mol_ring_ls:
conseq_dbl_bnd_in_ring += count_conseq_double(item)
ring_counter.append(conseq_dbl_bnd_in_ring) # concatenate onto list ring_counter
# Count the number of consequtive double bonds in rings
for i in range(3, 21):
count = 0
for mol_ring in mol_ring_ls:
if mol_ring.GetNumAtoms() == i:
count += 1
ring_counter.append(count)
return ring_counter
def get_mol_info(smi):
''' Calculate a set of 51 RdKit properties, collected from above helper functions.
Parameters:
smi (string) : SMILE string of molecule
Returns:
(list of float) : list of 51 calculated properties
'''
mol = Chem.MolFromSmiles(smi)
num_atoms = mol.GetNumAtoms()
num_hydro = Chem.AddHs(mol).GetNumAtoms() - num_atoms
num_carbon = count_atoms(mol, 6)
num_nitro = count_atoms(mol, 7)
num_sulphur = count_atoms(mol, 16)
num_oxy = count_atoms(mol, 8)
num_clorine = count_atoms(mol, 17)
num_bromine = count_atoms(mol, 35)
num_florine = count_atoms(mol, 9)
if num_carbon == 0: # Avoid division by zero error, set num_carbon to a very small value
num_carbon = 0.0001
basic_props = [num_atoms/num_carbon, num_hydro/num_carbon, num_nitro/num_carbon,
num_sulphur/num_carbon, num_oxy/num_carbon, num_clorine/num_carbon,
num_bromine/num_carbon, num_florine/num_carbon]
to_caculate = ["RingCount", "HallKierAlpha", "BalabanJ", "NumAliphaticCarbocycles","NumAliphaticHeterocycles",
"NumAliphaticRings","NumAromaticCarbocycles","NumAromaticHeterocycles",
"NumAromaticRings","NumHAcceptors","NumHDonors","NumHeteroatoms",
"NumRadicalElectrons","NumSaturatedCarbocycles","NumSaturatedHeterocycles",
"NumSaturatedRings","NumValenceElectrons"]
# Calculate all propoerties listed in 'to_calculate'
calc_props = OrderedDict(inspect.getmembers(Descriptors, inspect.isfunction))
for key in list(calc_props.keys()):
if key.startswith('_'):
del calc_props[key]
continue
if len(to_caculate)!=0 and key not in to_caculate:
del calc_props[key]
features = [val(mol) for key,val in calc_props.items()] # List of properties
# Ratio of total number of (single, double, triple, aromatic) bonds to the total number of bonds.
simple_bond_info = get_num_bond_types(mol)
# Obtain all rings in a molecule and calc. #of triple bonds in rings & #of rings in molecule
ring_ls = obtain_rings(smi)
num_triple = 0 # num triple bonds in ring
if len(ring_ls) > 0 and ring_ls != (None, None):
for item in ring_ls:
num_triple += item.count('#')
simple_bond_info.append(len(ring_ls)) # append number of Rings in molecule
else: simple_bond_info.append(0) # no rotatable bonds
simple_bond_info.append(num_triple) # number of triple bonds in rings
# appended onto 'simple_bond_info'
# Calculate the number of rings of size 3 to 20 & number of conseq. double bonds in rings
simple_bond_info = simple_bond_info + size_ring_counter(ring_ls)
# Calculate the number of consequitve double bonds in entire molecule
simple_bond_info.append(count_conseq_double(mol))
return np.array(features + basic_props + simple_bond_info)
def get_chunks(arr, num_processors, ratio):
"""
Get chunks based on a list
"""
chunks = [] # Collect arrays that will be sent to different processorr
counter = int(ratio)
for i in range(num_processors):
if i == 0:
chunks.append(arr[0:counter])
if i != 0 and i<num_processors-1:
chunks.append(arr[counter-int(ratio): counter])
if i == num_processors-1:
chunks.append(arr[counter-int(ratio): ])
counter += int(ratio)
return chunks
def get_mult_mol_info(smiles_list):
''' Collect results of 'get_mol_info' for multiple smiles (smiles_list)
Parameters:
smiles_list (list) : List of SMILE strings
Returns:
np.array : Concatenated array of results with shape (len(smiles_list), 51)
51 is the number of RdKit properties calculated in 'get_mol_info'.
'''
concat_arr = []
for smi in smiles_list:
concat_arr.append(get_mol_info(smi))
return np.array(concat_arr)
def get_mult_mol_info_parr(smiles_list, dataset_x):
''' Record calculated rdkit property results for each smile in smiles_list,
and add record result in dictionary dataset_x.
'''
for smi in smiles_list:
dataset_x['properties_rdkit'][smi] = get_mol_info(smi)
def create_parr_process(chunks):
'''This function initiates parallel execution (based on the number of cpu cores)
to calculate all the properties mentioned in 'get_mol_info()'
Parameters:
chunks (list) : List of lists, contining smile strings. Each sub list is
sent to a different process
dataset_x (dict): Locked dictionary for recording results from different processes.
Locking allows communication between different processes.
Returns:
None : All results are recorde in dictionary 'dataset_x'
'''
# Assign data to each process
process_collector = []
collect_dictionaries = []
for chunk in chunks: # process initialization
dataset_x = manager.dict(lock=True)
smiles_map_props = manager.dict(lock=True)
dataset_x['properties_rdkit'] = smiles_map_props
collect_dictionaries.append(dataset_x)
process_collector.append(multiprocessing.Process(target=get_mult_mol_info_parr, args=(chunk, dataset_x, )))
for item in process_collector: # initite all process
item.start()
for item in process_collector: # wait for all processes to finish
item.join()
combined_dict = {}
for i,item in enumerate(collect_dictionaries):
combined_dict.update(item['properties_rdkit'])
return combined_dict
| 36.383481 | 269 | 0.602927 |
4a214d874a85e32246d9e82fe1348dd3bbe0d8ad
| 7,002 |
py
|
Python
|
docs/zh_CN/conf.py
|
youqingxiaozhua/mmclassification
|
65bbabbb7e78544864b845e7fd09a90d4f897d5c
|
[
"Apache-2.0"
] | 1 |
2022-03-17T02:39:08.000Z
|
2022-03-17T02:39:08.000Z
|
docs/zh_CN/conf.py
|
youqingxiaozhua/mmclassification
|
65bbabbb7e78544864b845e7fd09a90d4f897d5c
|
[
"Apache-2.0"
] | null | null | null |
docs/zh_CN/conf.py
|
youqingxiaozhua/mmclassification
|
65bbabbb7e78544864b845e7fd09a90d4f897d5c
|
[
"Apache-2.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
from sphinx.builders.html import StandaloneHTMLBuilder
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'MMClassification'
copyright = '2020, OpenMMLab'
author = 'MMClassification Authors'
# The full version, including alpha/beta/rc tags
version_file = '../../mmcls/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
]
autodoc_mock_imports = ['mmcv._ext', 'matplotlib']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
language = 'zh_CN'
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo_url':
'https://mmclassification.readthedocs.io/zh_CN/latest/',
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmclassification'
},
{
'name':
'Colab 教程',
'children': [
{
'name':
'用命令行工具训练和推理',
'url':
'https://colab.research.google.com/github/'
'open-mmlab/mmclassification/blob/master/docs/zh_CN/'
'tutorials/MMClassification_tools_cn.ipynb',
},
{
'name':
'用 Python API 训练和推理',
'url':
'https://colab.research.google.com/github/'
'open-mmlab/mmclassification/blob/master/docs/zh_CN/'
'tutorials/MMClassification_python_cn.ipynb',
},
]
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
html_js_files = ['js/custom.js']
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mmclsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mmcls.tex', 'MMClassification Documentation', author,
'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'mmcls', 'MMClassification Documentation', [author],
1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mmcls', 'MMClassification Documentation', author, 'mmcls',
'OpenMMLab image classification toolbox and benchmark.', 'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# set priority when building html
StandaloneHTMLBuilder.supported_image_types = [
'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg'
]
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
# Auto-generated header anchors
myst_heading_anchors = 3
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/en/master/', None),
}
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.add_config_value('no_underscore_emphasis', False, 'env')
app.connect('builder-inited', builder_inited_handler)
| 30.710526 | 79 | 0.618109 |
4a214dc6b51db5505d119767abc081d62621ab2f
| 4,953 |
py
|
Python
|
nipyapi/registry/apis/config_api.py
|
Zyrix/nipyapi
|
d00221ba50bd83e21133d6e4d4b56741ead6822a
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/registry/apis/config_api.py
|
Zyrix/nipyapi
|
d00221ba50bd83e21133d6e4d4b56741ead6822a
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/registry/apis/config_api.py
|
Zyrix/nipyapi
|
d00221ba50bd83e21133d6e4d4b56741ead6822a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Apache NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 0.7.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ConfigApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_configuration(self, **kwargs):
"""
Get configration
Gets the NiFi Registry configurations.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: RegistryConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_configuration_with_http_info(**kwargs)
else:
(data) = self.get_configuration_with_http_info(**kwargs)
return data
def get_configuration_with_http_info(self, **kwargs):
"""
Get configration
Gets the NiFi Registry configurations.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: RegistryConfiguration
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_configuration" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'basicAuth', 'Authorization']
return self.api_client.call_api('/config', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegistryConfiguration',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 35.378571 | 127 | 0.578841 |
4a214e794d2163c899280555b7597074b0a3e235
| 3,468 |
py
|
Python
|
examples/unsupervised_quality_estimation/meteor.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 35 |
2021-05-08T09:23:31.000Z
|
2022-03-25T06:19:48.000Z
|
examples/unsupervised_quality_estimation/meteor.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 10 |
2021-11-14T12:28:48.000Z
|
2022-02-28T14:13:40.000Z
|
examples/unsupervised_quality_estimation/meteor.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 14 |
2021-05-17T06:55:01.000Z
|
2022-03-28T12:07:42.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
import os
import subprocess
import sys
import tempfile
from collections import defaultdict
from itertools import combinations
def read_translations(path, n_repeats):
segment_counter = 0
segment_translations = []
translations = defaultdict(list)
for line in open(path):
segment_translations.append(" ".join(line.split()))
if len(segment_translations) == n_repeats:
translations[segment_counter] = segment_translations
segment_translations = []
segment_counter += 1
return translations
def generate_input(translations, n_repeats):
_, ref_path = tempfile.mkstemp()
_, mt_path = tempfile.mkstemp()
ref_fh = open(ref_path, "w")
mt_fh = open(mt_path, "w")
for segid in sorted(translations.keys()):
assert len(translations[segid]) == n_repeats
indexes = combinations(range(n_repeats), 2)
for idx1, idx2 in indexes:
mt_fh.write(translations[segid][idx1].strip() + "\n")
ref_fh.write(translations[segid][idx2].strip() + "\n")
sys.stderr.write("\nSaved translations to %s and %s" % (ref_path, mt_path))
return ref_path, mt_path
def run_meteor(ref_path, mt_path, metric_path, lang="en"):
_, out_path = tempfile.mkstemp()
subprocess.call(
[
"java",
"-Xmx2G",
"-jar",
metric_path,
mt_path,
ref_path,
"-p",
"0.5 0.2 0.6 0.75", # default parameters, only changed alpha to give equal weight to P and R
"-norm",
"-l",
lang,
],
stdout=open(out_path, "w"),
)
os.remove(ref_path)
os.remove(mt_path)
sys.stderr.write("\nSaved Meteor output to %s" % out_path)
return out_path
def read_output(meteor_output_path, n_repeats):
n_combinations = math.factorial(n_repeats) / (
math.factorial(2) * math.factorial(n_repeats - 2)
)
raw_scores = []
average_scores = []
for line in open(meteor_output_path):
if not line.startswith("Segment "):
continue
score = float(line.strip().split("\t")[1])
raw_scores.append(score)
if len(raw_scores) == n_combinations:
average_scores.append(sum(raw_scores) / n_combinations)
raw_scores = []
os.remove(meteor_output_path)
return average_scores
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input")
parser.add_argument("-n", "--repeat_times", type=int)
parser.add_argument("-m", "--meteor")
parser.add_argument("-o", "--output")
args = parser.parse_args()
translations = read_translations(args.infile, args.repetitions)
sys.stderr.write("\nGenerating input for Meteor...")
ref_path, mt_path = generate_input(translations, args.repetitions)
sys.stderr.write("\nRunning Meteor...")
out_path = run_meteor(ref_path, mt_path, args.meteor)
sys.stderr.write("\nReading output...")
scores = read_output(out_path, args.repetitions)
sys.stderr.write("\nWriting results...")
with open(args.output, "w") as o:
for scr in scores:
o.write("{}\n".format(scr))
o.close()
if __name__ == "__main__":
main()
| 31.527273 | 105 | 0.633506 |
4a214f10698062861f6fe0935f75dcf9ce43683c
| 36,632 |
py
|
Python
|
docker-privnet/neo-python/unsafeprompt.py
|
rodoufu/neocompiler.io
|
fba364d66571bd7d5254087ec644d00bbcf31ec2
|
[
"MIT"
] | 2 |
2019-06-30T00:09:24.000Z
|
2021-03-25T14:02:14.000Z
|
docker-privnet/neo-python/unsafeprompt.py
|
rodoufu/neocompiler.io
|
fba364d66571bd7d5254087ec644d00bbcf31ec2
|
[
"MIT"
] | null | null | null |
docker-privnet/neo-python/unsafeprompt.py
|
rodoufu/neocompiler.io
|
fba364d66571bd7d5254087ec644d00bbcf31ec2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import datetime
import json
import os
import resource
import traceback
import logging
import time
import logzero
from prompt_toolkit import prompt
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.history import FileHistory
from prompt_toolkit.shortcuts import print_tokens
from prompt_toolkit.styles import style_from_dict
from prompt_toolkit.token import Token
from twisted.internet import reactor, task
from neo import __version__
from neo.Core.Blockchain import Blockchain
from neocore.Fixed8 import Fixed8
from neo.IO.MemoryStream import StreamManager
from neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain
from neo.Implementations.Blockchains.LevelDB.DebugStorage import DebugStorage
from neo.Implementations.Wallets.peewee.UserWallet import UserWallet
from neo.Network.NodeLeader import NodeLeader
from neo.Prompt.Commands.BuildNRun import BuildAndRun, LoadAndRun
from neo.Prompt.Commands.Invoke import InvokeContract, TestInvokeContract, test_invoke
from neo.Prompt.Commands.LoadSmartContract import LoadContract, GatherContractDetails, ImportContractAddr, \
ImportMultiSigContractAddr
from neo.Prompt.Commands.Send import construct_and_send, parse_and_sign
from neo.Prompt.Commands.Tokens import token_approve_allowance, token_get_allowance, token_send, token_send_from, token_mint, token_crowdsale_register
from neo.Prompt.Commands.Wallet import DeleteAddress, ImportWatchAddr, ImportToken, ClaimGas, DeleteToken, AddAlias
from neo.Prompt.Utils import get_arg
from neo.Settings import settings, DIR_PROJECT_ROOT
from neo.UserPreferences import preferences
from neocore.KeyPair import KeyPair
from neocore.UInt256 import UInt256
import codecs
# Logfile settings & setup
LOGFILE_FN = os.path.join(DIR_PROJECT_ROOT, 'prompt.log')
LOGFILE_MAX_BYTES = 5e7 # 50 MB
LOGFILE_BACKUP_COUNT = 3 # 3 logfiles history
settings.set_logfile(LOGFILE_FN, LOGFILE_MAX_BYTES, LOGFILE_BACKUP_COUNT)
# Prompt history filename
FILENAME_PROMPT_HISTORY = os.path.join(DIR_PROJECT_ROOT, '.prompt.py.history')
class PromptInterface(object):
mycommands = []
go_on = True
_walletdb_loop = None
Wallet = None
_known_things = []
commands = ['quit',
'help',
'block {index/hash} (tx)',
'header {index/hash}',
'tx {hash}',
'asset {assetId}',
'asset search {query}',
'contract {contract hash}',
'contract search {query}',
'mem',
'nodes',
'state',
'config debug {on/off}',
'config sc-events {on/off}',
'build {path/to/file.py} (test {params} {returntype} {needs_storage} {needs_dynamic_invoke} {test_params})',
'load_run {path/to/file.avm} (test {params} {returntype} {needs_storage} {needs_dynamic_invoke} {test_params})',
'import wif {wif}',
'import nep2 {nep2_encrypted_key}',
'import contract {path/to/file.avm} {params} {returntype} {needs_storage} {needs_dynamic_invoke}',
'import contract_addr {contract_hash} {pubkey}',
'import watch_addr {address}',
'import token {token_contract_hash}',
'export wif {address}',
'export nep2 {address}',
'open wallet {path}',
'create wallet {path}',
'wallet {verbose}',
'wallet claim',
'wallet migrate',
'wallet rebuild {start block}',
'wallet delete_addr {addr}',
'wallet alias {addr} {title}',
'wallet tkn_send {token symbol} {address_from} {address to} {amount} ',
'wallet tkn_send_from {token symbol} {address_from} {address to} {amount}',
'wallet tkn_approve {token symbol} {address_from} {address to} {amount}',
'wallet tkn_allowance {token symbol} {address_from} {address to}',
'wallet tkn_mint {token symbol} {mint_to_addr} {amount_attach_neo} {amount_attach_gas}',
'wallet close',
'send {assetId or name} {address} {amount} (--from-addr={addr})',
'sign {transaction in JSON format}',
'testinvoke {contract hash} {params} (--attach-neo={amount}, --attach-gas={amount)',
'debugstorage {on/off/reset}'
]
history = FileHistory(FILENAME_PROMPT_HISTORY)
token_style = None
start_height = None
start_dt = None
def __init__(self):
self.start_height = Blockchain.Default().Height
self.start_dt = datetime.datetime.utcnow()
self.token_style = style_from_dict({
Token.Command: preferences.token_style['Command'],
Token.Neo: preferences.token_style['Neo'],
Token.Default: preferences.token_style['Default'],
Token.Number: preferences.token_style['Number'],
})
def get_bottom_toolbar(self, cli=None):
out = []
try:
out = [(Token.Command, '[%s] Progress: ' % settings.net_name),
(Token.Number, str(Blockchain.Default().Height)),
(Token.Neo, '/'),
(Token.Number, str(Blockchain.Default().HeaderHeight))]
except Exception as e:
pass
return out
def get_completer(self):
standard_completions = ['block', 'tx', 'header', 'mem', 'neo', 'gas',
'help', 'state', 'nodes', 'exit', 'quit',
'config', 'import', 'export', 'open',
'wallet', 'contract', 'asset', 'wif',
'watch_addr', 'contract_addr', 'testinvoke', 'tkn_send',
'tkn_mint', 'tkn_send_from', 'tkn_approve', 'tkn_allowance',
'build', ]
if self.Wallet:
for addr in self.Wallet.Addresses:
if addr not in self._known_things:
self._known_things.append(addr)
for alias in self.Wallet.NamedAddr:
if alias.Title not in self._known_things:
self._known_things.append(alias.Title)
for tkn in self.Wallet.GetTokens().values():
if tkn.symbol not in self._known_things:
self._known_things.append(tkn.symbol)
all_completions = standard_completions + self._known_things
completer = WordCompleter(all_completions)
return completer
def quit(self):
print('Shutting down. This may take a bit...')
self.go_on = False
Blockchain.Default().Dispose()
reactor.stop()
NodeLeader.Instance().Shutdown()
def help(self):
tokens = []
for c in self.commands:
tokens.append((Token.Command, "%s\n" % c))
print_tokens(tokens, self.token_style)
def do_open(self, arguments):
if self.Wallet:
self.do_close_wallet()
item = get_arg(arguments)
if item and item == 'wallet':
path = get_arg(arguments, 1)
if path:
if not os.path.exists(path):
print("wallet file not found")
return
passwd = 'coz' #always use this password
#passwd = prompt("[Password]> ", is_password=True)
try:
self.Wallet = UserWallet.Open(path, passwd)
self._walletdb_loop = task.LoopingCall(self.Wallet.ProcessBlocks)
self._walletdb_loop.start(1)
print("Opened wallet at %s" % path)
except Exception as e:
print("could not open wallet: %s " % e)
else:
print("Please specify a path")
else:
print("please specify something to open")
def do_create(self, arguments):
item = get_arg(arguments)
if item and item == 'wallet':
path = get_arg(arguments, 1)
if path:
if os.path.exists(path):
print("File already exists")
return
passwd1 = prompt("[Password 1]> ", is_password=True)
passwd2 = prompt("[Password 2]> ", is_password=True)
### remove check to allow 'coz' password
#if passwd1 != passwd2 or len(passwd1) < 10:
# print("please provide matching passwords that are at least 10 characters long")
# return
try:
self.Wallet = UserWallet.Create(path=path, password=passwd1)
contract = self.Wallet.GetDefaultContract()
key = self.Wallet.GetKey(contract.PublicKeyHash)
print("Wallet %s " % json.dumps(self.Wallet.ToJson(), indent=4))
print("pubkey %s " % key.PublicKey.encode_point(True))
except Exception as e:
print("Exception creating wallet: %s " % e)
self.Wallet = None
if os.path.isfile(path):
try:
os.remove(path)
except Exception as e:
print("Could not remove {}: {}".format(path, e))
return
self._walletdb_loop = task.LoopingCall(self.Wallet.ProcessBlocks)
self._walletdb_loop.start(1)
else:
print("Please specify a path")
def do_close_wallet(self):
if self.Wallet:
path = self.Wallet._path
self._walletdb_loop.stop()
self._walletdb_loop = None
self.Wallet = None
print("closed wallet %s " % path)
def do_import(self, arguments):
item = get_arg(arguments)
if not item:
print("please specify something to import")
return
if item == 'wif':
if not self.Wallet:
print("Please open a wallet before importing WIF")
return
wif = get_arg(arguments, 1)
if not wif:
print("Please supply a valid WIF key")
return
try:
prikey = KeyPair.PrivateKeyFromWIF(wif)
key = self.Wallet.CreateKey(prikey)
print("Imported key %s " % wif)
print("Pubkey: %s \n" % key.PublicKey.encode_point(True).hex())
print("Wallet: %s " % json.dumps(self.Wallet.ToJson(), indent=4))
except ValueError as e:
print(str(e))
except Exception as e:
print(str(e))
return
elif item == 'nep2':
if not self.Wallet:
print("Please open a wallet before importing a NEP2 key")
return
nep2_key = get_arg(arguments, 1)
if not nep2_key:
print("Please supply a valid nep2 encrypted private key")
return
nep2_passwd = prompt("[Key Password]> ", is_password=True)
try:
prikey = KeyPair.PrivateKeyFromNEP2(nep2_key, nep2_passwd)
key = self.Wallet.CreateKey(prikey)
print("Imported nep2 key: %s " % nep2_key)
print("Pubkey: %s \n" % key.PublicKey.encode_point(True).hex())
print("Wallet: %s " % json.dumps(self.Wallet.ToJson(), indent=4))
except ValueError as e:
print(str(e))
except Exception as e:
print(str(e))
return
elif item == 'contract':
return self.load_smart_contract(arguments)
elif item == 'contract_addr':
return ImportContractAddr(self.Wallet, arguments[1:])
elif item == 'watch_addr':
return ImportWatchAddr(self.Wallet, get_arg(arguments, 1))
elif item == 'multisig_addr':
return ImportMultiSigContractAddr(self.Wallet, arguments[1:])
elif item == 'token':
return ImportToken(self.Wallet, get_arg(arguments, 1))
else:
print("Import of '%s' not implemented" % item)
def do_build(self, arguments):
BuildAndRun(arguments, self.Wallet)
def do_load_n_run(self, arguments):
LoadAndRun(arguments, self.Wallet)
def do_export(self, arguments):
item = get_arg(arguments)
if item == 'wif':
if not self.Wallet:
return print("please open a wallet")
address = get_arg(arguments, 1)
if not address:
return print("Please specify an address")
passwd = prompt("[Wallet Password]> ", is_password=True)
if not self.Wallet.ValidatePassword(passwd):
return print("Incorrect password")
keys = self.Wallet.GetKeys()
for key in keys:
if key.GetAddress() == address:
export = key.Export()
print("WIF key export: %s" % export)
return
elif item == 'nep2':
if not self.Wallet:
return print("please open a wallet")
address = get_arg(arguments, 1)
if not address:
return print("Please specify an address")
passwd = prompt("[Wallet Password]> ", is_password=True)
if not self.Wallet.ValidatePassword(passwd):
return print("Incorrect password")
nep2_passwd1 = prompt("[Key Password 1]> ", is_password=True)
if len(nep2_passwd1) < 10:
return print("Please provide a password with at least 10 characters")
nep2_passwd2 = prompt("[Key Password 2]> ", is_password=True)
if nep2_passwd1 != nep2_passwd2:
return print("Passwords don't match")
keys = self.Wallet.GetKeys()
for key in keys:
export = key.ExportNEP2(nep2_passwd1)
print("NEP2 key export: %s" % export)
return
print("Command export %s not found" % item)
def show_wallet(self, arguments):
if not self.Wallet:
print("please open a wallet")
return
item = get_arg(arguments)
if not item:
print("Wallet %s " % json.dumps(self.Wallet.ToJson(), indent=4))
return
if item in ['v', '--v', 'verbose']:
print("Wallet %s " % json.dumps(self.Wallet.ToJson(verbose=True), indent=4))
return
elif item == 'migrate' and self.Wallet is not None:
self.Wallet.Migrate()
print("migrated wallet")
elif item == 'delete_addr':
addr_to_delete = get_arg(arguments, 1)
DeleteAddress(self, self.Wallet, addr_to_delete)
elif item == 'delete_token':
token_to_delete = get_arg(arguments, 1)
DeleteToken(self.Wallet, token_to_delete)
elif item == 'close':
self.do_close_wallet()
elif item == 'claim':
ClaimGas(self.Wallet)
elif item == 'rebuild':
self.Wallet.Rebuild()
# self._walletdb_loop = task.LoopingCall(self.Wallet.ProcessBlocks)
# self._walletdb_loop.start(1)
try:
item2 = int(get_arg(arguments, 1))
if item2 and item2 > 0:
print('restarting at %s ' % item2)
self.Wallet._current_height = item2
except Exception as e:
pass
elif item == 'tkn_send':
token_send(self.Wallet, arguments[1:])
elif item == 'tkn_send_from':
token_send_from(self.Wallet, arguments[1:])
elif item == 'tkn_approve':
token_approve_allowance(self.Wallet, arguments[1:])
elif item == 'tkn_allowance':
token_get_allowance(self.Wallet, arguments[1:], verbose=True)
elif item == 'tkn_mint':
token_mint(self.Wallet, arguments[1:])
elif item == 'tkn_register':
token_crowdsale_register(self.Wallet, arguments[1:])
elif item == 'alias':
if len(arguments) == 3:
AddAlias(self.Wallet, arguments[1], arguments[2])
else:
print("Please supply an address and title")
def do_send(self, arguments):
return construct_and_send(self, self.Wallet, arguments,False)
def do_sign(self, arguments):
jsn = get_arg(arguments)
parse_and_sign(self, self.Wallet, jsn)
def show_state(self):
height = Blockchain.Default().Height
headers = Blockchain.Default().HeaderHeight
diff = height - self.start_height
now = datetime.datetime.utcnow()
difftime = now - self.start_dt
mins = difftime / datetime.timedelta(minutes=1)
bpm = 0
if diff > 0 and mins > 0:
bpm = diff / mins
out = 'Progress: %s / %s\n' % (height, headers)
out += 'Block Cache length %s\n' % Blockchain.Default().BlockCacheCount
out += 'Blocks since program start %s\n' % diff
out += 'Time elapsed %s mins\n' % mins
out += 'blocks per min %s \n' % bpm
tokens = [(Token.Number, out)]
print_tokens(tokens, self.token_style)
def show_nodes(self):
if len(NodeLeader.Instance().Peers) > 0:
out = ''
for peer in NodeLeader.Instance().Peers:
out += 'Peer %s - IO: %s\n' % (peer.Name(), peer.IOStats())
print_tokens([(Token.Number, out)], self.token_style)
else:
print('Not connected yet\n')
def show_block(self, args):
item = get_arg(args)
txarg = get_arg(args, 1)
if item is not None:
block = Blockchain.Default().GetBlock(item)
if block is not None:
bjson = json.dumps(block.ToJson(), indent=4)
tokens = [(Token.Number, bjson)]
print_tokens(tokens, self.token_style)
print('\n')
if txarg and 'tx' in txarg:
for tx in block.FullTransactions:
print(json.dumps(tx.ToJson(), indent=4))
else:
print("could not locate block %s" % item)
else:
print("please specify a block")
def show_header(self, args):
item = get_arg(args)
if item is not None:
header = Blockchain.Default().GetHeaderBy(item)
if header is not None:
print(json.dumps(header.ToJson(), indent=4))
else:
print("could not locate Header %s \n" % item)
else:
print("please specify a header")
def show_tx(self, args):
item = get_arg(args)
if item is not None:
try:
tx, height = Blockchain.Default().GetTransaction(item)
if height > -1:
bjson = json.dumps(tx.ToJson(), indent=4)
tokens = [(Token.Command, bjson)]
print_tokens(tokens, self.token_style)
print('\n')
except Exception as e:
print("Could not find transaction with id %s " % item)
print("Please specify a tx hash like 'db55b4d97cf99db6826967ef4318c2993852dff3e79ec446103f141c716227f6'")
else:
print("please specify a tx hash")
def show_account_state(self, args):
item = get_arg(args)
if item is not None:
account = Blockchain.Default().GetAccountState(item, print_all_accounts=True)
if account is not None:
bjson = json.dumps(account.ToJson(), indent=4)
tokens = [(Token.Number, bjson)]
print_tokens(tokens, self.token_style)
print('\n')
else:
print("account %s not found" % item)
else:
print("please specify an account address")
def show_asset_state(self, args):
item = get_arg(args)
if item is not None:
if item == 'search':
query = get_arg(args, 1)
results = Blockchain.Default().SearchAssetState(query)
print("Found %s results for %s " % (len(results), query))
for asset in results:
bjson = json.dumps(asset.ToJson(), indent=4)
tokens = [(Token.Number, bjson)]
print_tokens(tokens, self.token_style)
print('\n')
return
asset = Blockchain.Default().GetAssetState(item)
if asset is not None:
bjson = json.dumps(asset.ToJson(), indent=4)
tokens = [(Token.Number, bjson)]
print_tokens(tokens, self.token_style)
print('\n')
else:
print("asset %s not found" % item)
else:
print("please specify an asset hash")
def show_contract_state(self, args):
item = get_arg(args)
if item is not None:
if item.lower() == 'all':
contracts = Blockchain.Default().ShowAllContracts()
print("contracts: %s " % contracts)
elif item.lower() == 'search':
query = get_arg(args, 1)
if query:
contracts = Blockchain.Default().SearchContracts(query=query)
print("Found %s results for %s " % (len(contracts), query))
for contract in contracts:
bjson = json.dumps(contract.ToJson(), indent=4)
tokens = [(Token.Number, bjson)]
print_tokens(tokens, self.token_style)
print('\n')
else:
print("Please specify a search query")
else:
contract = Blockchain.Default().GetContract(item)
if contract is not None:
bjson = json.dumps(contract.ToJson(), indent=4)
tokens = [(Token.Number, bjson)]
print_tokens(tokens, self.token_style)
print('\n')
else:
print("please specify a contract")
def test_invoke_contract(self, args):
if not self.Wallet:
print("please open a wallet")
return
if args and len(args) > 0:
tx, fee, results, num_ops = TestInvokeContract(self.Wallet, args)
f = open(args[0]+".invoke", "w")
if tx is not None and results is not None:
print("\n-------------------------------------------------------------------------------------------------------------------------------------", file=f)
print("Test invoke successful", file=f)
print("Total operations: %s " % num_ops, file=f)
print("Results %s " % [str(item) for item in results], file=f)
print("Invoke TX gas cost: %s " % (tx.Gas.value / Fixed8.D), file=f)
print("Invoke TX Fee: %s " % (fee.value / Fixed8.D), file=f)
print("-------------------------------------------------------------------------------------------------------------------------------------\n", file=f)
print("Enter your password to continue and invoke on the network\n")
f.close();
passwd = 'coz'
#passwd = prompt("[password]> ", is_password=True)
if not self.Wallet.ValidatePassword(passwd):
return print("Incorrect password")
result = InvokeContract(self.Wallet, tx, fee)
if result is False:
return None
return tx
else:
print("Error testing contract invoke")
return None
print("please specify a contract to invoke")
def load_smart_contract(self, args):
if not self.Wallet:
print("please open wallet")
return
function_code = LoadContract(args[1:])
if function_code:
#cname=None
#if len(args) > 6:
# cname = args[6:][0]
#print("name="+args[6:][0])
#contract_script = GatherContractDetails(function_code, self, cname)
contract_script = GatherContractDetails(function_code, self)
if contract_script is not None:
tx, fee, results, num_ops = test_invoke(contract_script, self.Wallet, [])
if tx is not None and results is not None:
print("\n-------------------------------------------------------------------------------------------------------------------------------------")
print("Test deploy invoke successful")
print("Total operations executed: %s " % num_ops)
print("Results %s " % [str(item) for item in results])
print("Deploy Invoke TX gas cost: %s " % (tx.Gas.value / Fixed8.D))
print("Deploy Invoke TX Fee: %s " % (fee.value / Fixed8.D))
print("-------------------------------------------------------------------------------------------------------------------------------------\n")
print("Enter your password to continue and deploy this contract")
passwd = 'coz'
#passwd = prompt("[password]> ", is_password=True)
if not self.Wallet.ValidatePassword(passwd):
return print("Incorrect password")
result = InvokeContract(self.Wallet, tx, Fixed8.Zero())
if result is False:
return None
return tx
else:
print("test ivoke failed")
print("tx is, results are %s %s " % (tx, results))
return None
def show_mem(self):
total = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
totalmb = total / 1000000
out = "Total: %s MB\n" % totalmb
out += "total buffers %s\n" % StreamManager.TotalBuffers()
print_tokens([(Token.Number, out)], self.token_style)
def handle_debug_storage(self, args):
what = get_arg(args)
if what == 'on':
settings.USE_DEBUG_STORAGE = True
print("Debug Storage On")
elif what == 'off':
settings.USE_DEBUG_STORAGE = False
print("Debug Storage Off")
elif what == 'reset':
DebugStorage.instance().reset()
print("Reset Debug Storage")
else:
print("Please specify on/off/reset")
def configure(self, args):
what = get_arg(args)
if what == 'debug':
c1 = get_arg(args, 1).lower()
if c1 is not None:
if c1 == 'on' or c1 == '1':
print("debug logging is now enabled")
settings.set_loglevel(logging.DEBUG)
if c1 == 'off' or c1 == '0':
print("debug logging is now disabled")
settings.set_loglevel(logging.INFO)
else:
print("cannot configure log. Please specify on or off")
elif what == 'sc-events':
c1 = get_arg(args, 1).lower()
if c1 is not None:
if c1 == 'on' or c1 == '1':
print("smart contract event logging is now enabled")
settings.set_log_smart_contract_events(True)
if c1 == 'off' or c1 == '0':
print("smart contract event logging is now disabled")
settings.set_log_smart_contract_events(False)
else:
print("cannot configure log. Please specify on or off")
else:
print("cannot configure %s " % what)
print("Try 'config log on/off'")
def parse_result(self, result):
if len(result):
commandParts = [s for s in result.split()]
return commandParts[0], commandParts[1:]
return None, None
def wait_for_tx(self, tx, max_seconds=300):
""" Wait for tx to show up on blockchain """
foundtx = False
sec_passed = 0
while not foundtx and sec_passed < max_seconds:
_tx, height = Blockchain.Default().GetTransaction(tx.Hash.ToString())
if height > -1:
foundtx = True
print("Transaction found with success")
continue
if sec_passed < 4:
print("Waiting for tx {} to show up on blockchain...".format(tx.Hash.ToString()))
time.sleep(4)
sec_passed += 4
if foundtx:
return True
else:
print("Transaction was relayed but never accepted by consensus node")
return False
def run(self):
dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
dbloop.start(.1)
Blockchain.Default().PersistBlocks()
while Blockchain.Default().Height < 2:
print("Waiting for prompt to sync...")
time.sleep(1)
tokens = [(Token.Neo, 'NEO'), (Token.Default, ' cli. Type '),
(Token.Command, "'help' "), (Token.Default, 'to get started')]
print_tokens(tokens, self.token_style)
print("\n")
timebase = time.time()
while self.go_on:
try:
if len(self.mycommands) > 0:
timenow = time.time()
if timenow - timebase > 3: #wait 3 seconds
timebase = timenow
result = self.mycommands.pop()
else:
time.sleep(0.5) # slow refresh
continue
#time.sleep(3)
else:
result = prompt("neo> ",
completer=self.get_completer(),
history=self.history,
get_bottom_toolbar_tokens=self.get_bottom_toolbar,
style=self.token_style,
refresh_interval=3
)
# Control-D pressed: quit
#print("result=",result)
#print("mycommands ->", self.mycommands)
except EOFError:
return self.quit()
except KeyboardInterrupt:
# Control-C pressed: do nothing
continue
try:
command, arguments = self.parse_result(result)
if command is not None and len(command) > 0:
command = command.lower()
if command == 'quit' or command == 'exit':
time.sleep(2) # consolidate chain
self.quit()
elif command == 'help':
self.help()
elif command == 'create':
self.do_create(arguments)
elif command == 'open':
self.do_open(arguments)
elif command == 'build':
self.do_build(arguments)
elif command == 'load_run':
self.do_load_n_run(arguments)
elif command == 'import':
tx = self.do_import(arguments)
# Wait until transaction is on blockchain
if tx is not None:
self.wait_for_tx(tx)
elif command == 'export':
self.do_export(arguments)
elif command == 'wallet':
self.show_wallet(arguments)
elif command == 'send':
tx = self.do_send(arguments)
if tx is not None:
if tx is not 0:
self.wait_for_tx(tx)
elif command == 'sign':
self.do_sign(arguments)
elif command == 'block':
self.show_block(arguments)
elif command == 'tx':
self.show_tx(arguments)
elif command == 'header':
self.show_header(arguments)
elif command == 'account':
self.show_account_state(arguments)
elif command == 'asset':
self.show_asset_state(arguments)
elif command == 'contract':
tx = self.show_contract_state(arguments)
elif command == 'testinvoke':
tx = self.test_invoke_contract(arguments)
# Wait until transaction is on blockchain
if tx is not None:
self.wait_for_tx(tx)
elif command == 'mem':
self.show_mem()
elif command == 'nodes' or command == 'node':
self.show_nodes()
elif command == 'state':
self.show_state()
elif command == 'debugstorage':
self.handle_debug_storage(arguments)
elif command == 'config':
self.configure(arguments)
elif command is None:
print('please specify a command')
else:
print("command %s not found" % command)
except Exception as e:
print("could not execute command: %s " % e)
traceback.print_stack()
traceback.print_exc()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mainnet", action="store_true", default=False,
help="Use MainNet instead of the default TestNet")
parser.add_argument("-p", "--privnet", action="store_true", default=False,
help="Use PrivNet instead of the default TestNet")
parser.add_argument("-c", "--config", action="store", help="Use a specific config file")
parser.add_argument("-t", "--set-default-theme", dest="theme",
choices=["dark", "light"], help="Set the default theme to be loaded from the config file. Default: 'dark'")
parser.add_argument('--version', action='version',
version='neo-python v{version}'.format(version=__version__))
parser.add_argument("-e", "--exec_command", action="store", help="Use a specific commands")
args = parser.parse_args()
if args.config and (args.mainnet or args.privnet):
print("Cannot use both --config and --mainnet/--privnet arguments, please use only one.")
exit(1)
if args.mainnet and args.privnet:
print("Cannot use both --mainnet and --privnet arguments")
exit(1)
# Setup depending on command line arguments. By default, the testnet settings are already loaded.
if args.config:
settings.setup(args.config)
elif args.mainnet:
settings.setup_mainnet()
elif args.privnet:
settings.setup_privnet()
mycoms = []
mycoms2 = []
if args.exec_command:
mycoms = args.exec_command.split(',') #print("exec:"+args.exec_command)
for k in mycoms:
mycoms2.append( codecs.decode(k, "hex").decode() )
if args.theme:
preferences.set_theme(args.theme)
# Instantiate the blockchain and subscribe to notifications
blockchain = LevelDBBlockchain(settings.LEVELDB_PATH)
Blockchain.RegisterBlockchain(blockchain)
# Start the prompt interface
cli = PromptInterface()
cli.mycommands = mycoms2 #['oi', 'oi2']
# Run
reactor.suggestThreadPoolSize(15)
reactor.callInThread(cli.run)
NodeLeader.Instance().Start()
reactor.run()
if __name__ == "__main__":
main()
| 38.03946 | 168 | 0.525224 |
4a214fc2e6d3262078dc514c158bbe08968eefe1
| 665 |
py
|
Python
|
mod/httpserver.py
|
rbxnk/fips
|
b1bd5f33f04d48f080e621d27214c254149924ca
|
[
"MIT"
] | null | null | null |
mod/httpserver.py
|
rbxnk/fips
|
b1bd5f33f04d48f080e621d27214c254149924ca
|
[
"MIT"
] | null | null | null |
mod/httpserver.py
|
rbxnk/fips
|
b1bd5f33f04d48f080e621d27214c254149924ca
|
[
"MIT"
] | null | null | null |
"""wrap SimpleHTTPServer and prevent Ctrl-C stack trace output"""
import sys
if sys.version_info > (3, 0):
import http.server as SimpleHTTPServer
import socketserver as SocketServer
else:
import SimpleHTTPServer
import SocketServer
import log
try :
log.colored(log.GREEN, 'serving on http://localhost:8000 (Ctrl-C to quit)')
SocketServer.TCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer(('localhost', 8000), SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
httpd.server_close()
log.colored(log.GREEN, '\nhttp server stopped')
exit(0)
| 26.6 | 98 | 0.738346 |
4a214fcf44deca28ad3f9bd7188804edb3393aec
| 11,730 |
py
|
Python
|
easier68k/core/opcodes/subq.py
|
bpas247/Easier68k
|
30a39883f1e73cd2bd848cf7bd356c96b8664ff4
|
[
"MIT"
] | null | null | null |
easier68k/core/opcodes/subq.py
|
bpas247/Easier68k
|
30a39883f1e73cd2bd848cf7bd356c96b8664ff4
|
[
"MIT"
] | null | null | null |
easier68k/core/opcodes/subq.py
|
bpas247/Easier68k
|
30a39883f1e73cd2bd848cf7bd356c96b8664ff4
|
[
"MIT"
] | null | null | null |
from ...core.enum.ea_mode import EAMode
from ...core.enum.op_size import OpSize
from ...core.enum import ea_mode_bin
from ...core.enum.ea_mode_bin import parse_ea_from_binary
from ...simulator.m68k import M68K
from ...core.opcodes.opcode import Opcode
from ...core.util.split_bits import split_bits
from ...core.util import opcode_util
from ..util.parsing import parse_assembly_parameter
from ..models.assembly_parameter import AssemblyParameter
from ..enum.condition_status_code import ConditionStatusCode
from ..models.memory_value import MemoryValue
class Subq(Opcode): # Forward declaration
pass
class Subq(Opcode):
# Allowed sizes for this opcode
valid_sizes = [OpSize.BYTE, OpSize.WORD, OpSize.LONG]
def __init__(self, params: list, size: OpSize = OpSize.WORD):
assert len(params) == 2
assert isinstance(params[0], AssemblyParameter)
assert isinstance(params[1], AssemblyParameter)
# check src param is valid
assert params[0].mode == EAMode.IMM
self.src = params[0]
# check the dest param is valid. Can't be immediate data
assert params[1].mode != EAMode.IMM
self.dest = params[1]
assert size in Subq.valid_sizes
self.size = size
def assemble(self) -> bytearray:
"""
Assembles this opcode into hex to be inserted into memory
:return: The hex version of this opcode
"""
# 1101 Dn xxx D x S xx M xxx Xn xxx
# ret_opcode is the binary value which represents the assembled instruction
ret_opcode = 0b0101 << 12
ret_opcode |= self.src.data << 9
ret_opcode |= 0b1 << 8
if self.size == OpSize.BYTE:
ret_opcode |= 0b00 << 6
elif self.size == OpSize.WORD:
ret_opcode |= 0b01 << 6
elif self.size == OpSize.LONG:
ret_opcode |= 0b10 << 6
ret_opcode |= ea_mode_bin.parse_from_ea_mode_modefirst(self.dest) << 0
ret_bytes = bytearray(ret_opcode.to_bytes(2, byteorder='big', signed=False))
if self.dest.mode == EAMode.AWA or self.dest.mode == EAMode.ALA:
ret_bytes.extend(opcode_util.ea_to_binary_post_op(self.dest, self.size).get_value_bytearray())
return ret_bytes
def execute(self, simulator: M68K):
"""
Executes this command in a simulator
:param simulator: The simulator to execute the command on
:return: Nothing
"""
# get the length
val_length = self.size.get_number_of_bytes()
# get the value of src from the simulator
src_val = self.src.get_value(simulator, val_length)
# get the value of dest from the simulator
dest_val = self.dest.get_value(simulator, val_length)
# increment the program counter by the length of the instruction (1 word)
to_increment = OpSize.WORD.value
# repeat for the dest
if self.dest.mode in [EAMode.AbsoluteLongAddress]:
to_increment += OpSize.LONG.value
if self.dest.mode in [EAMode.AbsoluteWordAddress]:
to_increment += OpSize.WORD.value
# mask to apply to the source
mask = 0xFF
if self.size is OpSize.BYTE:
mask = 0xFF
if self.size is OpSize.WORD:
mask = 0xFFFF
if self.size is OpSize.LONG:
mask = 0xFFFFFFFF
# which bits of the total should not be modified
inverted_mask = 0xFFFFFFFF ^ mask
# preserve the upper bits of the operation if they aren't used
preserve = dest_val.get_value_signed() & inverted_mask
raw_total = dest_val.get_value_unsigned() - src_val.get_value_unsigned()
total = (raw_total & mask) | preserve
# If the subtraction of the masked destination and source value is
# negative, then a borrow has been generated.
borrow_bit = (mask & dest_val.get_value_unsigned()) - src_val.get_value_unsigned() < 0
negative_bit = 0
if self.size is OpSize.BYTE:
negative_bit = 0x80
elif self.size is OpSize.WORD:
negative_bit = 0x8000
elif self.size is OpSize.LONG:
negative_bit = 0x80000000
negative = total & negative_bit > 0
# Overflow occurs when a sign change occurs where it shouldn't occur.
# For example: positive - negative != negative.
# This doesn't make sense, so an overflow occurs
overflow = False
if dest_val.get_value_unsigned() & 0x80000000 > 0:
if total & negative_bit == 0:
overflow = True
# set the same as the 'C' bit
simulator.set_condition_status_code(ConditionStatusCode.X, borrow_bit)
# set if result is negative
simulator.set_condition_status_code(ConditionStatusCode.N, negative)
# set if result is zero
simulator.set_condition_status_code(ConditionStatusCode.Z, total == 0)
# set if an overflow is generated, cleared otherwise
simulator.set_condition_status_code(ConditionStatusCode.V, overflow)
# set if a borrow is generated, cleared otherwise
simulator.set_condition_status_code(ConditionStatusCode.C, borrow_bit)
# and set the value
self.dest.set_value(simulator, MemoryValue(OpSize.LONG, unsigned_int=total))
# set the program counter value
simulator.increment_program_counter(to_increment)
def __str__(self):
# Makes this a bit easier to read in doctest output
return 'Subq command: Size {}, src {}, dest {}'.format(self.size, self.src, self.dest)
@classmethod
def command_matches(cls, command: str) -> bool:
"""
Checks whether a command string is an instance of this command type
:param command: The command string to check (e.g. 'MOVE.B', 'LEA', etc.)
:return: Whether the string is an instance of this command type
"""
return opcode_util.command_matches(command, 'SUB')
@classmethod
def get_word_length(cls, command: str, parameters: str) -> int:
"""
>>> Subq.get_word_length('SUBQ.B', '#5, D3')
1
>>> Subq.get_word_length('SUBQ.W', '#4, ($BBBB).W')
2
>>> Subq.get_word_length('SUBQ.L', '#7, ($BBBB).W')
2
>>> Subq.get_word_length('SUBQ.W', '#1, ($BBBB).L')
3
>>> Subq.get_word_length('SUBQ.W', '#8, ($BBBB).L')
3
>>> Subq.get_word_length('SUB.L', '#5, ($BBBB).L')
3
Gets what the end length of this command will be in memory
:param command: The text of the command itself (e.g. "LEA", "MOVE.B", etc.)
:param parameters: The parameters after the command
:return: The length of the bytes in memory in words, as well as a list of warnings or errors encountered
"""
# Split the parameters into EA modes
params = parameters.split(',')
# src = parse_assembly_parameter(params[0].strip()) # Parse the source and make sure it parsed right
dest = parse_assembly_parameter(params[1].strip())
length = 1 # Always 1 word not counting additions to end
if dest.mode == EAMode.AWA: # Appends a word
length += 1
if dest.mode == EAMode.ALA: # Appends a long, so 2 words
length += 2
return length
@classmethod
def is_valid(cls, command: str, parameters: str) -> (bool, list):
"""
Tests whether the given command is valid
>>> Subq.is_valid('SUBQ.B', '#2, D1')[0]
True
>>> Subq.is_valid('SUBQ.W', 'D0')[0]
False
>>> Subq.is_valid('SUBQ.G', '#5, D1')[0]
False
>>> Subq.is_valid('SUBQ.L', 'D0, A2')[0]
False
>>> Subq.is_valid('SU.L', '#2, D1')[0]
False
>>> Subq.is_valid('SUBQ.', '#5, D1')[0]
False
>>> Subq.is_valid('SUBQ.W', '#2, #6500')[0]
False
:param command: The command itself (e.g. 'MOVE.B', 'LEA', etc.)
:param parameters: The parameters after the command (such as the source and destination of a move)
:return: Whether the given command is valid and a list of issues/warnings encountered
"""
return opcode_util.n_param_is_valid(command, parameters, "SUBQ", 2, param_invalid_modes=[[EAMode.ARD],
[EAMode.ARD,
EAMode.IMM]])[:2]
@classmethod
def disassemble_instruction(cls, data: bytearray) -> Opcode:
"""
This has a non-subq opcode
>>> Subq.disassemble_instruction(bytearray.fromhex('0280'))
SUBQ.B #2,D7
>>> op = Subq.disassemble_instruction(bytearray.fromhex('5507'))
>>> str(op.src)
'EA Mode: EAMode.IMM, Data: 2'
>>> str(op.dest)
'EA Mode: EAMode.DRD, Data: 7'
SUBQ.W #5,D1
>>> op = Subq.disassemble_instruction(bytearray.fromhex('5B41'))
>>> str(op.src)
'EA Mode: EAMode.IMM, Data: 5'
>>> str(op.dest)
'EA Mode: EAMode.DRD, Data: 1'
SUBQ.L #7,(A0)
>>> op = Subq.disassemble_instruction(bytearray.fromhex('5F90'))
>>> str(op.src)
'EA Mode: EAMode.IMM, Data: 7'
>>> str(op.dest)
'EA Mode: EAMode.ARI, Data: 0'
SUBQ.L #3,$4000
>>> op = Subq.disassemble_instruction(bytearray.fromhex('57B84000'))
>>> str(op.src)
'EA Mode: EAMode.IMM, Data: 3'
>>> str(op.dest)
'EA Mode: EAMode.AWA, Data: 16384'
Parses some raw data into an instance of the opcode class
:param data: The data used to convert into an opcode instance
:return: The constructed instance or none if there was an error and
the amount of data in words that was used (e.g. extra for immediate
data) or 0 for not a match
"""
assert len(data) >= 2, 'Opcode size is at least one word'
first_word = int.from_bytes(data[0:2], 'big')
[opcode_bin,
data_bin,
one_bin,
size_bin,
ea_mode_binary,
ea_reg_bin] = split_bits(first_word, [4, 3, 1, 2, 3, 3])
if opcode_bin != 0b0101 or one_bin != 0b1:
return None
src = None
dest = None
size = None
# populate source data
src = AssemblyParameter(EAMode.IMM, data_bin)
# Determine size
if size_bin == 0b00:
size = OpSize.BYTE
elif size_bin == 0b01:
size = OpSize.WORD
elif size_bin == 0b10:
size = OpSize.LONG
else:
return None
# populate destination data
dest = dest = parse_ea_from_binary(ea_mode_binary, ea_reg_bin, size, False, data[2:])[0]
return cls([src, dest], size)
@classmethod
def from_str(cls, command: str, parameters: str):
"""
Parses a SUBQ command from text.
>>> str(Subq.from_str('SUBQ.B', '#4, D1'))
'Subq command: Size OpSize.BYTE, src EA Mode: EAMode.IMM, Data: 4, dest EA Mode: EAMode.DRD, Data: 1'
>>> str(Subq.from_str('SUBQ.L', '#1, (A0)'))
'Subq command: Size OpSize.LONG, src EA Mode: EAMode.IMM, Data: 1, dest EA Mode: EAMode.ARI, Data: 0'
:param command: The command itself (e.g. 'MOVE.B', 'LEA', etc.)
:param parameters: The parameters after the command (such as the source and destination of a move)
:return: The parsed command
"""
return opcode_util.n_param_from_str(command, parameters, Subq, 2, OpSize.WORD)
| 34.198251 | 114 | 0.599744 |
4a21508fc1af87a19d37c02da2dc7d22ce43f4e2
| 3,181 |
py
|
Python
|
examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101.py
|
ryanwongsa/ignite
|
948532834c2c60a6fae35d9754d5a8e74faceeb4
|
[
"BSD-3-Clause"
] | 1 |
2020-09-18T18:28:30.000Z
|
2020-09-18T18:28:30.000Z
|
examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101.py
|
alxlampe/ignite
|
b53c6aeef87754b3cd3638c91172b386dc73af12
|
[
"BSD-3-Clause"
] | null | null | null |
examples/references/segmentation/pascal_voc2012/configs/train/baseline_resnet101.py
|
alxlampe/ignite
|
b53c6aeef87754b3cd3638c91172b386dc73af12
|
[
"BSD-3-Clause"
] | null | null | null |
# Basic training configuration
import os
from functools import partial
import cv2
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
from torchvision.models.segmentation import deeplabv3_resnet101
import albumentations as A
from albumentations.pytorch import ToTensorV2 as ToTensor
from dataflow.dataloaders import get_train_val_loaders
from dataflow.transforms import ignore_mask_boundaries, prepare_batch_fp32, denormalize
# ##############################
# Global configs
# ##############################
seed = 19
device = "cuda"
debug = False
fp16_opt_level = "O2"
num_classes = 21
batch_size = 18 # total batch size
val_batch_size = 24
num_workers = 12
val_interval = 1
accumulation_steps = 4
val_img_size = 513
train_img_size = 480
# ##############################
# Setup Dataflow
# ##############################
assert "DATASET_PATH" in os.environ
data_path = os.environ["DATASET_PATH"]
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
train_transforms = A.Compose(
[
A.RandomScale(scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0),
A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT),
A.RandomCrop(train_img_size, train_img_size),
A.HorizontalFlip(),
A.Blur(blur_limit=3),
A.Normalize(mean=mean, std=std),
ignore_mask_boundaries,
ToTensor(),
]
)
val_transforms = A.Compose(
[
A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT),
A.Normalize(mean=mean, std=std),
ignore_mask_boundaries,
ToTensor(),
]
)
train_loader, val_loader, train_eval_loader = get_train_val_loaders(
root_path=data_path,
train_transforms=train_transforms,
val_transforms=val_transforms,
batch_size=batch_size,
num_workers=num_workers,
val_batch_size=val_batch_size,
limit_train_num_samples=100 if debug else None,
limit_val_num_samples=100 if debug else None,
)
prepare_batch = prepare_batch_fp32
# Image denormalization function to plot predictions with images
img_denormalize = partial(denormalize, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
# ##############################
# Setup models
# ##############################
num_classes = 21
model = deeplabv3_resnet101(num_classes=num_classes)
def model_output_transform(output):
return output["out"]
# ##############################
# Setup solver
# ##############################
num_epochs = 100
criterion = nn.CrossEntropyLoss()
lr = 0.007
weight_decay = 5e-4
momentum = 0.9
nesterov = False
optimizer = optim.SGD(
[{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}],
lr=1.0,
momentum=momentum,
weight_decay=weight_decay,
nesterov=nesterov,
)
le = len(train_loader)
def lambda_lr_scheduler(iteration, lr0, n, a):
return lr0 * pow((1.0 - 1.0 * iteration / n), a)
lr_scheduler = lrs.LambdaLR(
optimizer,
lr_lambda=[
partial(lambda_lr_scheduler, lr0=lr, n=num_epochs * le, a=0.9),
partial(lambda_lr_scheduler, lr0=lr * 10.0, n=num_epochs * le, a=0.9),
],
)
| 22.721429 | 93 | 0.662999 |
4a2151ec23787c17b45fb3a9fb18393327df4eaa
| 1,873 |
py
|
Python
|
alipay/aop/api/domain/AntfortuneContentCommunityDataSendModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213 |
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AntfortuneContentCommunityDataSendModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29 |
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AntfortuneContentCommunityDataSendModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59 |
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AntfortuneContentCommunityDataSendModel(object):
def __init__(self):
self._content = None
self._source_id = None
self._soure_type = None
@property
def content(self):
return self._content
@content.setter
def content(self, value):
self._content = value
@property
def source_id(self):
return self._source_id
@source_id.setter
def source_id(self, value):
self._source_id = value
@property
def soure_type(self):
return self._soure_type
@soure_type.setter
def soure_type(self, value):
self._soure_type = value
def to_alipay_dict(self):
params = dict()
if self.content:
if hasattr(self.content, 'to_alipay_dict'):
params['content'] = self.content.to_alipay_dict()
else:
params['content'] = self.content
if self.source_id:
if hasattr(self.source_id, 'to_alipay_dict'):
params['source_id'] = self.source_id.to_alipay_dict()
else:
params['source_id'] = self.source_id
if self.soure_type:
if hasattr(self.soure_type, 'to_alipay_dict'):
params['soure_type'] = self.soure_type.to_alipay_dict()
else:
params['soure_type'] = self.soure_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntfortuneContentCommunityDataSendModel()
if 'content' in d:
o.content = d['content']
if 'source_id' in d:
o.source_id = d['source_id']
if 'soure_type' in d:
o.soure_type = d['soure_type']
return o
| 26.380282 | 71 | 0.584624 |
4a215240e0b2f4b25ed7e6f96f476d92784d7906
| 2,584 |
py
|
Python
|
tests/strategies/test_custom_plugin.py
|
AJSVB/pytorch-lightning
|
00211c1de3c5901789417263f14a36c846cc42d1
|
[
"Apache-2.0"
] | 2 |
2019-11-24T10:28:10.000Z
|
2020-06-20T12:57:35.000Z
|
tests/strategies/test_custom_plugin.py
|
AJSVB/pytorch-lightning
|
00211c1de3c5901789417263f14a36c846cc42d1
|
[
"Apache-2.0"
] | 1 |
2021-03-30T12:12:49.000Z
|
2021-03-30T12:12:49.000Z
|
tests/strategies/test_custom_plugin.py
|
AJSVB/pytorch-lightning
|
00211c1de3c5901789417263f14a36c846cc42d1
|
[
"Apache-2.0"
] | 2 |
2022-02-11T08:26:13.000Z
|
2022-03-21T03:48:34.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, Mapping
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.strategies import DDPStrategy, SingleDeviceStrategy
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
class CustomParallelStrategy(DDPStrategy):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Set to None so it will be overwritten by the accelerator connector.
self.sync_batchnorm = None
@RunIf(skip_windows=True)
def test_sync_batchnorm_set(tmpdir):
"""Tests if sync_batchnorm is automatically set for custom plugin."""
model = BoringModel()
strategy = CustomParallelStrategy()
assert strategy.sync_batchnorm is None
trainer = Trainer(max_epochs=1, strategy=strategy, default_root_dir=tmpdir, sync_batchnorm=True)
trainer.fit(model)
assert strategy.sync_batchnorm is True
@pytest.mark.parametrize("restore_optimizer_and_schedulers", [True, False])
def test_strategy_lightning_restore_optimizer_and_schedulers(tmpdir, restore_optimizer_and_schedulers):
class TestStrategy(SingleDeviceStrategy):
load_optimizer_state_dict_called = False
@property
def lightning_restore_optimizer(self) -> bool:
return restore_optimizer_and_schedulers
def load_optimizer_state_dict(self, checkpoint: Mapping[str, Any]) -> None:
self.load_optimizer_state_dict_called = True
# create ckpt to resume from
checkpoint_path = os.path.join(tmpdir, "model.ckpt")
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
trainer.save_checkpoint(checkpoint_path)
model = BoringModel()
strategy = TestStrategy(torch.device("cpu"))
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, strategy=strategy)
trainer.fit(model, ckpt_path=checkpoint_path)
assert strategy.load_optimizer_state_dict_called == restore_optimizer_and_schedulers
| 38 | 103 | 0.765093 |
4a21528dc054baccbae222d427431546956ee748
| 159 |
py
|
Python
|
figuregen/__init__.py
|
Mira-13/figure-gen
|
fd1b8814423dd34973a3fafe68ff5c0f95c08590
|
[
"MIT"
] | 75 |
2020-09-17T17:17:17.000Z
|
2022-01-21T14:28:14.000Z
|
figuregen/__init__.py
|
neshume/figure-gen
|
e4cb1d9ea1841980b4dc90953325e1a8c6dfb510
|
[
"MIT"
] | 3 |
2020-09-29T11:51:35.000Z
|
2020-10-22T15:27:40.000Z
|
figuregen/__init__.py
|
neshume/figure-gen
|
e4cb1d9ea1841980b4dc90953325e1a8c6dfb510
|
[
"MIT"
] | 2 |
2020-10-24T05:57:49.000Z
|
2022-03-14T17:06:02.000Z
|
# Import API functions
from .figuregen import *
from .element_data import *
from .matplot_lineplot import MatplotLinePlot
from .pgf_lineplot import PgfLinePlot
| 31.8 | 45 | 0.836478 |
4a2153097afb36335b448ff6adb4136be7775334
| 402 |
py
|
Python
|
map/migrations/0003_eventmodel_numberofattendees.py
|
zacharyyahn/CS3240_Project
|
8f8ce4f787d7fc33721543ce05f0f650220fb11d
|
[
"Apache-2.0",
"Unlicense"
] | null | null | null |
map/migrations/0003_eventmodel_numberofattendees.py
|
zacharyyahn/CS3240_Project
|
8f8ce4f787d7fc33721543ce05f0f650220fb11d
|
[
"Apache-2.0",
"Unlicense"
] | null | null | null |
map/migrations/0003_eventmodel_numberofattendees.py
|
zacharyyahn/CS3240_Project
|
8f8ce4f787d7fc33721543ce05f0f650220fb11d
|
[
"Apache-2.0",
"Unlicense"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-04-14 03:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('map', '0002_auto_20210413_1835'),
]
operations = [
migrations.AddField(
model_name='eventmodel',
name='numberOfAttendees',
field=models.PositiveIntegerField(default=0),
),
]
| 21.157895 | 57 | 0.614428 |
4a21532bfa3c393f32956ec86830a27349a79c60
| 280 |
py
|
Python
|
setup.py
|
ThordurPall/fish_classifier
|
f28121fba92076e56fdfd047018176a92557ecff
|
[
"MIT"
] | 1 |
2021-06-17T11:34:25.000Z
|
2021-06-17T11:34:25.000Z
|
setup.py
|
FrederikKromann/fish_classifier
|
f28121fba92076e56fdfd047018176a92557ecff
|
[
"MIT"
] | 3 |
2021-06-10T10:14:14.000Z
|
2021-06-16T10:58:44.000Z
|
setup.py
|
FrederikKromann/fish_classifier
|
f28121fba92076e56fdfd047018176a92557ecff
|
[
"MIT"
] | 5 |
2021-06-10T09:24:55.000Z
|
2021-06-24T09:35:19.000Z
|
from setuptools import find_packages, setup
setup(
name="src",
packages=find_packages(),
version="0.1.40",
description="A neural network fish classifier using a large-scale dataset for fish segmentation and classification",
author="DTU",
license="MIT",
)
| 25.454545 | 120 | 0.710714 |
4a21539dd182df4a418f2deffdcf94419f128146
| 1,531 |
py
|
Python
|
src/updater/type.py
|
JamzumSum/AssetsUpdater
|
05fc783fe116cab8d754c237356ee3b4fdd098f0
|
[
"MIT"
] | 1 |
2021-09-28T11:51:02.000Z
|
2021-09-28T11:51:02.000Z
|
src/updater/type.py
|
JamzumSum/AssetsUpdater
|
05fc783fe116cab8d754c237356ee3b4fdd098f0
|
[
"MIT"
] | null | null | null |
src/updater/type.py
|
JamzumSum/AssetsUpdater
|
05fc783fe116cab8d754c237356ee3b4fdd098f0
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod, abstractproperty
from dataclasses import dataclass
from typing import AsyncGenerator, Callable, List, Optional
Pred = Callable[["Release"], bool]
@dataclass(frozen=True)
class Asset:
from_tag: str
name: str
download_url: str
def __repr__(self) -> str:
return self.name
class Release(ABC):
@abstractproperty
def tag(self) -> str:
return ""
@abstractproperty
def title(self) -> str:
return ""
@abstractmethod
def assets(self) -> List[Asset]:
return []
@abstractproperty
def pre(self) -> bool:
return False
def has_asset(self, name: str):
for i in self.assets():
if i.name == name:
return True
return False
def __repr__(self) -> str:
tag = f"<{self.tag}> " if self.tag else ""
return tag + self.title
class Updater(ABC):
@abstractmethod
async def latest(self, pre: bool = False) -> Optional[Release]:
return
@abstractmethod
def all_iter(
self, num: Optional[int], pre: bool = False, start: int = 0, **kwds
) -> AsyncGenerator[Release, None]:
pass
async def all(self, num: Optional[int], pre: bool = False) -> List[Release]:
return [i async for i in self.all_iter(num, pre)]
async def filter(self, pred: Pred, pre: bool = False, start: int = 0, **kwds):
async for i in self.all_iter(None, pre, start, **kwds):
if pred(i):
yield i
| 23.921875 | 82 | 0.595689 |
4a215441ee4c5debd75710a5a8decb50155a9f32
| 9,936 |
py
|
Python
|
docs/source/conf.py
|
wblong/PVGeo-Copy
|
a7f143a037c3abddcc5b8f70f84a0dee38874bfc
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/conf.py
|
wblong/PVGeo-Copy
|
a7f143a037c3abddcc5b8f70f84a0dee38874bfc
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/conf.py
|
wblong/PVGeo-Copy
|
a7f143a037c3abddcc5b8f70f84a0dee38874bfc
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import shutil
import faulthandler
faulthandler.enable()
# Add PVGeo to the path
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
print(path)
sys.path.insert(0, path)
sys.path.insert(0, '/Users/bane/Documents/OpenGeoVis/Software/gendocs/')
# Mock the paraview module to build pvmacros docs
import mock
MOCK_MODULES = ['paraview', 'paraview.simple', 'discretize', 'pyproj']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
autodoc_mock_imports = ['paraview']
# # Automattically generat source pages:
# os.system('python ./make_files.py')
import PVGeo, pvmacros # for documenting
from gendocs import Generator
append_material = """
.. toctree::
:maxdepth: 2
:caption: Examples
:hidden:
about-examples.rst
examples/index
"""
extra = """
.. toctree::
:maxdepth: 2
:caption: Development Guide
:hidden:
dev-guide/contributing
dev-guide/repo-structure
dev-guide/templates
dev-guide/snippets/index
dev-guide/resources
"""
# Automatically generate documentaion pages
Generator().DocumentPackages([PVGeo, pvmacros],
index_base='../index_base.rst',
showprivate=True,
notify=False,
intro_pages=['overview/why-pvgeo',
'overview/getting-started',
'overview/featured',
'overview/agu-2018',
],
append_material=append_material,
extra=extra,
)
import pyvista
import numpy as np
# Manage errors
pyvista.set_error_output_file('errors.txt')
# Ensure that offscreen rendering is used for docs generation
pyvista.OFF_SCREEN = True # Not necessary - simply an insurance policy
# Preferred plotting style for documentation
pyvista.set_plot_theme('document')
# -- Project information -----------------------------------------------------
project = 'PVGeo'
copyright = u'2018-2019, Bane Sullivan, http:://banesullivan.com'
author = 'Bane Sullivan'
html_show_copyright = False
html_show_sphinx = False
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.extlinks',
'sphinxcontrib.napoleon',
'sphinx_copybutton',
'sphinx_gallery.gen_gallery',
'notfound.extension',
]
linkcheck_retries = 3
linkcheck_timeout = 500
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'canonical_url': 'http://docs.pvgeo.org/',
'analytics_id': 'UA-115959679-6',
'display_version': False,
}
html_context = {
# Enable the "Edit in GitHub link within the header of each page.
'display_github': True,
# Set the following variables to generate the resulting github URL for each page.
# Format Template: https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/blob/{{ github_version }}{{ conf_py_path }}{{ pagename }}{{ suffix }}
'github_user': 'OpenGeoVis',
'github_repo': 'PVGeo',
'github_version': 'master/docs/',
'menu_links_name': 'Getting Connected',
'menu_links': [
('<i class="fa fa-slack fa-fw"></i> Slack Community', 'http://slack.pyvista.org'),
('<i class="fa fa-comment fa-fw"></i> Support', 'https://github.com/pyvista/pyvista-support'),
('<i class="fa fa-github fa-fw"></i> Source Code', 'https://github.com/OpenGeoVis/PVGeo'),
('<i class="fa fa-gavel fa-fw"></i> Contributing', 'https://pvgeo.org/dev-guide/contributing.html'),
('<i class="fa fa-file-text fa-fw"></i> The Paper', 'https://doi.org/10.21105/joss.01451'),
],
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PVGeoDoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PVGeo.tex', 'PVGeo Documentation',
'Bane Sullivan', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pvgeo', 'PVGeo Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PVGeo', 'PVGeo Documentation',
author, 'PVGeo', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Custom 404 page
notfound_context = {
'body': '<h1>Page not found.</h1>\n\nPerhaps try the <a href="https://pvgeo.org/about-examples.html">About Examples page</a>.',
}
notfound_no_urls_prefix = True
# -- Sphinx Gallery Options
from sphinx_gallery.sorting import FileNameSortKey
# thumb_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'PVGeo_icon_horiz.png')
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": [
"../../examples/",
],
# path where to save gallery generated examples
"gallery_dirs": ["examples"],
# Patter to search for example files
"filename_pattern": r"\.py",
# Remove the "Download all examples" button from the top level gallery
"download_all_examples": False,
# Sort gallery example by file name instead of number of lines (default)
"within_subsection_order": FileNameSortKey,
# directory where function granular galleries are stored
"backreferences_dir": None,
# Modules for which function level galleries are created. In
"doc_module": "PVGeo",
"image_scrapers": (pyvista.Scraper(), 'matplotlib'),
"thumbnail_size": (350, 350),
# 'default_thumb_file': thumb_path,
}
def setup(app):
app.add_stylesheet("style.css")
app.add_stylesheet("copybutton.css")
| 30.572308 | 176 | 0.657005 |
4a2155afc5c583d1d51e20dd47366a2b690d6d6c
| 159,748 |
py
|
Python
|
bqtools/tests/test_bqtools.py
|
kpr6/bqtools
|
cdafb108768ca60c77bcb9989108466b4d697a2d
|
[
"MIT"
] | null | null | null |
bqtools/tests/test_bqtools.py
|
kpr6/bqtools
|
cdafb108768ca60c77bcb9989108466b4d697a2d
|
[
"MIT"
] | null | null | null |
bqtools/tests/test_bqtools.py
|
kpr6/bqtools
|
cdafb108768ca60c77bcb9989108466b4d697a2d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This modules purpose is to test bqtools-json
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import difflib
import json
import logging
import pprint
import unittest
from deepdiff import DeepDiff
from google.cloud import bigquery, storage, exceptions
import bqtools
class TestScannerMethods(unittest.TestCase):
def load_data(self, file_name):
with open(file_name) as json_file:
return json.load(json_file)
def setUp(self):
logging.basicConfig()
self.pp = pprint.PrettyPrinter(indent=4)
# test 1 validate can create a schema from a dictionary
self.schemaTest1 = self.load_data("bqtools/tests/schemaTest1.json")
self.schemaTest2 = self.load_data("bqtools/tests/schemaTest2.json")
# next schemas are for testing bare array handling
# this is a starting schema
self.schema2startnobare = self.load_data("bqtools/tests/schema2startnobare.json")
# this adds 2 bare arrays
self.schemaTest2bare = self.load_data("bqtools/tests/schemaTest2bare.json")
# resultant schema and objects shoulld loook like this
self.schemaTest2nonbare = self.load_data("bqtools/tests/schemaTest2nonbare.json")
self.schemaTest4 = self.load_data("bqtools/tests/schemaTest4.json")
self.schemaTest3 = self.load_data("bqtools/tests/schemaTest3.json")
self.monsterSchema = self.load_data("bqtools/tests/monsterSchema.json")
def test_toDict(self):
schema2Dict = (
bigquery.SchemaField('string', 'STRING'),
bigquery.SchemaField('integer', 'INTEGER'),
bigquery.SchemaField('float', 'FLOAT'),
bigquery.SchemaField('boolean', 'BOOLEAN'),
bigquery.SchemaField('record', 'RECORD', fields=(
bigquery.SchemaField('string2', 'STRING'),
bigquery.SchemaField('float', 'FLOAT'),
bigquery.SchemaField('integer2', 'INTEGER'),
bigquery.SchemaField('boolean2', 'BOOLEAN')
)),
bigquery.SchemaField('array', 'RECORD', mode='REPEATED', fields=(
bigquery.SchemaField('string3', 'STRING'),
bigquery.SchemaField('integer3', 'INTEGER')
))
)
expectedResult = [
{
"name": 'string',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'integer',
"type": 'INTEGER',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'float',
"type": 'FLOAT',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'boolean',
"type": 'BOOLEAN',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'record',
"type": 'RECORD',
"description": None,
"mode": 'NULLABLE',
"fields": [
{"name": 'string2',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'float',
"type": 'FLOAT',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'integer2',
"type": 'INTEGER',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'boolean2',
"type": 'BOOLEAN',
"description": None,
"mode": 'NULLABLE',
"fields": []}
]},
{
"name": 'array',
"type": 'RECORD',
"description": None,
"mode": 'REPEATED',
"fields": [
{"name": 'string3',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'integer3',
"type": 'INTEGER',
"description": None,
"mode": 'NULLABLE',
"fields": []}
]}
]
sa = []
# print("showing each field")
for bqi in schema2Dict:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa.append(i)
diff = DeepDiff(expectedResult, sa, ignore_order=True)
self.assertEqual(diff, {},
"Unexpected result in toDict expected nothing insteadest got {}".format(
self.pp.pprint(diff)))
def test_createschema(self):
bqSchema = bqtools.create_schema(self.schemaTest1)
expectedSchema = (
bigquery.SchemaField('string', 'STRING'),
bigquery.SchemaField('integer', 'INTEGER'),
bigquery.SchemaField('float', 'FLOAT'),
bigquery.SchemaField('boolean', 'BOOLEAN'),
bigquery.SchemaField('record', 'RECORD', fields=(
bigquery.SchemaField('string2', 'STRING'),
bigquery.SchemaField('float', 'FLOAT'),
bigquery.SchemaField('integer2', 'INTEGER'),
bigquery.SchemaField('boolean2', 'BOOLEAN')
)),
bigquery.SchemaField('array', 'RECORD', mode='REPEATED', fields=(
bigquery.SchemaField('string3', 'STRING'),
bigquery.SchemaField('integer3', 'INTEGER')
))
)
# print("testing result")
# self.pp.pprint(bqSchema)
sa = []
# print("showing each field")
for bqi in bqSchema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa.append(i)
# print("Schema as dict")
# self.pp.pprint(sa)
isa = sa
# print("Expected result")
# self.pp.pprint(expectedSchema)
sa = []
# print("showing each expected field")
for bqi in expectedSchema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa.append(i)
# print("expected Schema as dict")
diff = DeepDiff(isa, sa, ignore_order=True)
# self.pp.pprint(diff)
a = "Schema test1 schema does not match target {}".format(len(diff))
self.assertEqual(diff, {}, a)
def test_createschema2(self):
# print("Creating a new schema")
bqSchema2 = bqtools.create_schema(self.schemaTest2)
sa2 = []
# print("showing each field schema2")
for bqi in bqSchema2:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
# print("Schema2 as dict")
# self.pp.pprint(sa2)
expectedSchema2 = (
bigquery.SchemaField('string', 'STRING'),
bigquery.SchemaField('integer', 'INTEGER'),
bigquery.SchemaField('record', 'RECORD', fields=(
bigquery.SchemaField('string2', 'STRING'),
bigquery.SchemaField('float', 'FLOAT'),
bigquery.SchemaField('boolean2', 'BOOLEAN'),
bigquery.SchemaField('appended1', 'STRING')
)),
bigquery.SchemaField('array', 'RECORD', mode='REPEATED', fields=(
bigquery.SchemaField('string3', 'STRING'),
bigquery.SchemaField('integer3', 'INTEGER'),
bigquery.SchemaField('foo', 'FLOAT')
)),
bigquery.SchemaField('anotherarray', 'RECORD', mode='REPEATED', fields=(
bigquery.SchemaField('test1', 'INTEGER'),
bigquery.SchemaField('test2', 'BOOLEAN')
))
)
sa = []
for bqi in expectedSchema2:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
# self.pp.pprint(diff)
a = "Schema test1 schema does not match target {}".format(diff)
self.assertEqual(diff, {}, a)
logger = logging.getLogger("testBQTools")
evolved = bqtools.match_and_addtoschema({"string": "hello"}, expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 1")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52}, expectedSchema2,
logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 2")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52, "record": {}},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 3")
evolved = bqtools.match_and_addtoschema(
{"string": "hello", "integer": 52, "record": {"string2": "hello2"}}, expectedSchema2,
logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 4")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"}},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 6")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": []},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 7")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello"}]},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 8")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42}]},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 9")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141}]},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 10")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}]},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 11")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [{"test1": 52, "test2": False},
{"test1": 52, "test2": True}]},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 12")
# evolve tests bbelow prepare baseline
copyoforigschema = list(expectedSchema2)
savedSchema = copy.deepcopy(copyoforigschema)
sa = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa.append(i)
# Evolutio test 1
# add some stuff 2 layers down in an array
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [{"test1": 52, "test2": False},
{"test1": 52, "test2": True,
"fred": "I am an evolved string",
"iamanotherevolve": 32}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 13")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
diff = dict(diff)
print(
"============================================ evolve test 1 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 1 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'iamanotherevolve',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'fred',
'type': 'STRING'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}},
'iterable_item_removed': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}, diff,
"Schema evolution not as expected {}".format(self.pp.pformat(diff)))
# Evolution test 2
# this just adds a fiedl at top level
copyoforigschema = copy.deepcopy(savedSchema)
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"hellomike": 3.1415926,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [{"test1": 52, "test2": False},
{"test1": 52, "test2": True}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 14")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
print(
"============================================ evolve test 2 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 2 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[5]': {'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'hellomike',
'type': 'FLOAT'}}}, diff,
"Schema evolution not as expected {}".format(self.pp.pformat(diff)))
# Evolution test 3
# this is an object with root schema evolution
# Plus child objects with 2 different changes in them
# plus another with both
copyoforigschema = copy.deepcopy(savedSchema)
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"hellomike": 3.1415926,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [{"test1": 52, "test2": False,
"fred": "I am an evolution"},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3,
"fred": "I am same previous "
"evolution"}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 14")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
print(
"============================================ evolve test 3 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 3 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'fred',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'iamanotherevolution',
'type': 'FLOAT'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'hellomike',
'type': 'FLOAT'}},
'iterable_item_removed': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}, diff,
"Schema evolution not as expected {}".format(self.pp.pformat(diff)))
# Evolution test 4
# this is an object with root schema evolution
# Plus child objects with 2 different changes in them
copyoforigschema = copy.deepcopy(savedSchema)
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"hellomike": 3.1415926,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [
{"test1": 52, "test2": False,
"fred": "I am an evolution"},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 14")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
print(
"============================================ evolve test 4 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 4 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'fred',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'iamanotherevolution',
'type': 'FLOAT'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'hellomike',
'type': 'FLOAT'}},
'iterable_item_removed': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}, diff,
"Schema evolution not as expected")
# Evolution test 5
# add an array with strings an dno key this should fail
copyoforigschema = copy.deepcopy(savedSchema)
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"hellomike": 3.1415926,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [
{"test1": 52, "test2": False,
"fred": "I am an evolution"},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3,
"bill": ["hello", "fred", "break this"]}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 14")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
print(
"============================================ evolve test 5 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 5 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'fred',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'iamanotherevolution',
'type': 'FLOAT'},
{'description': None,
'fields': [
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'value',
'type': 'STRING'}],
'mode': 'REPEATED',
'name': 'bill',
'type': 'RECORD'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'hellomike',
'type': 'FLOAT'}},
'iterable_item_removed': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}, diff,
"Schema evolution not as expected")
# Evolution test 6
# add an array with strings an dno key this should fail
copyoforigschema = copy.deepcopy(savedSchema)
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"hellomike": 3.1415926,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [
{"test1": 52, "test2": False,
"fred": "I am an evolution"},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3,
"bill": {}}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 14")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
print(
"============================================ evolve test 6 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 6 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'fred',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'iamanotherevolution',
'type': 'FLOAT'},
{'description': None,
'fields': [
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'xxxDummySchemaAsNoneDefinedxxx',
'type': 'STRING'}],
'mode': 'NULLABLE',
'name': 'bill',
'type': 'RECORD'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'hellomike',
'type': 'FLOAT'}},
'iterable_item_removed': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}, diff,
"Schema evolution not as expected")
def test_patchbare(self):
startschema = bqtools.create_schema(self.schema2startnobare)
resultschema = bqtools.create_schema(self.schemaTest2nonbare)
origobject = copy.deepcopy(self.schemaTest2bare)
evolved = bqtools.match_and_addtoschema(self.schemaTest2bare, startschema)
self.assertEqual(evolved, True,
"Bare llist and multi dict evolution has not happened as expected")
diff = DeepDiff(resultschema, startschema, ignore_order=True)
print(
"============================================ mixed arrays added diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ mixed arrays added diff end "
"====================================")
def test_patch(self):
bqSchema2 = bqtools.create_schema(self.schemaTest2)
bqSchema = bqtools.create_schema(self.schemaTest1)
sa = []
for bqi in bqSchema:
i = bqtools.to_dict(bqi)
sa.append(i)
osa = copy.deepcopy(sa)
change, pschema = bqtools.recurse_and_add_to_schema(bqSchema2, sa)
diff = DeepDiff(pschema, osa, ignore_order=True)
# patching never removes fields so expect additions
# so after list of root[] should be one longer
expectedDiff = {'iterable_item_added': {'root[2]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'integer2',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'float',
'type': 'FLOAT'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'string2',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'boolean2',
'type': 'BOOLEAN'}],
'mode': 'NULLABLE',
'name': 'record',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'integer3',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'string3',
'type': 'STRING'}],
'mode': 'REPEATED',
'name': 'array',
'type': 'RECORD'}},
'iterable_item_removed': {'root[2]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'integer2',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'float',
'type': 'FLOAT'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'string2',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'boolean2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'appended1',
'type': 'STRING'}],
'mode': 'NULLABLE',
'name': 'record',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'integer3',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'string3',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'foo',
'type': 'FLOAT'}],
'mode': 'REPEATED',
'name': 'array',
'type': 'RECORD'},
'root[6]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}
self.assertEqual(diff, expectedDiff,
"Patch diff is not what is expected {}".format(self.pp.pformat(diff)))
self.assertEqual(change, True,
"Patch diff change result {} is not what is expected {}".format(change,
self.pp.pformat(
diff)))
bqSchema3 = bqtools.create_schema(self.schemaTest3)
bqSchema4 = bqtools.create_schema(self.schemaTest4)
sa2 = []
for bqi in bqSchema3:
i = bqtools.to_dict(bqi)
sa2.append(i)
osa = copy.deepcopy(sa2)
change, pschema = bqtools.recurse_and_add_to_schema(bqSchema4, sa2)
diff = DeepDiff(pschema, osa, ignore_order=True)
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), change))
# print("old {}".format(self.pp.pformat(osa)))
# print("new {}".format(self.pp.pformat(pschema)))
def test_patch2(self):
bqSchema2 = bqtools.create_schema(self.schemaTest2)
bqSchema = bqtools.create_schema(self.schemaTest2)
sa = []
for bqi in bqSchema:
i = bqtools.to_dict(bqi)
sa.append(i)
osa = copy.deepcopy(sa)
change, pschema = bqtools.recurse_and_add_to_schema(bqSchema2, sa)
diff = DeepDiff(pschema, osa, ignore_order=True)
# patching never removes fields so expect additions
# so after list of root[] should be one longer
expectedDiff = {}
self.assertEqual(diff, expectedDiff,
"Patch diff is not what is expected {}".format(self.pp.pformat(diff)))
self.assertEqual(change, False,
"Patch diff change result {} is not what is expected {}".format(change,
self.pp.pformat(
diff)))
# print("Patched schema diff {}".format(self.pp.pformat(diff)))
# print("old {}".format(self.pp.pformat(osa)))
# print("new {}".format(self.pp.pformat(pschema)))
# resultant schema and objects shoulld loook like this
self.schemaTest2nonbare = self.load_data("bqtools/tests/schemaTest2nonbare.json")
self.schemaTest4 = self.load_data("bqtools/tests/schemaTest4.json")
self.schemaTest3 = self.load_data("bqtools/tests/schemaTest3.json")
self.monsterSchema = self.load_data("bqtools/tests/monsterSchema.json")
def test_toDict(self):
schema2Dict = (
bigquery.SchemaField('string', 'STRING'),
bigquery.SchemaField('integer', 'INTEGER'),
bigquery.SchemaField('float', 'FLOAT'),
bigquery.SchemaField('boolean', 'BOOLEAN'),
bigquery.SchemaField('record', 'RECORD', fields=(
bigquery.SchemaField('string2', 'STRING'),
bigquery.SchemaField('float', 'FLOAT'),
bigquery.SchemaField('integer2', 'INTEGER'),
bigquery.SchemaField('boolean2', 'BOOLEAN')
)),
bigquery.SchemaField('array', 'RECORD', mode='REPEATED', fields=(
bigquery.SchemaField('string3', 'STRING'),
bigquery.SchemaField('integer3', 'INTEGER')
))
)
expectedResult = [
{
"name": 'string',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'integer',
"type": 'INTEGER',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'float',
"type": 'FLOAT',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'boolean',
"type": 'BOOLEAN',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'record',
"type": 'RECORD',
"description": None,
"mode": 'NULLABLE',
"fields": [
{"name": 'string2',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'float',
"type": 'FLOAT',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'integer2',
"type": 'INTEGER',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'boolean2',
"type": 'BOOLEAN',
"description": None,
"mode": 'NULLABLE',
"fields": []}
]},
{
"name": 'array',
"type": 'RECORD',
"description": None,
"mode": 'REPEATED',
"fields": [
{"name": 'string3',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE',
"fields": []},
{
"name": 'integer3',
"type": 'INTEGER',
"description": None,
"mode": 'NULLABLE',
"fields": []}
]}
]
sa = []
# print("showing each field")
for bqi in schema2Dict:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa.append(i)
diff = DeepDiff(expectedResult, sa, ignore_order=True)
self.assertEqual(diff, {},
"Unexpected result in toDict expected nothing insteadest got {}".format(
self.pp.pprint(diff)))
def test_createschema(self):
bqSchema = bqtools.create_schema(self.schemaTest1)
expectedSchema = (
bigquery.SchemaField('string', 'STRING'),
bigquery.SchemaField('integer', 'INTEGER'),
bigquery.SchemaField('float', 'FLOAT'),
bigquery.SchemaField('boolean', 'BOOLEAN'),
bigquery.SchemaField('record', 'RECORD', fields=(
bigquery.SchemaField('string2', 'STRING'),
bigquery.SchemaField('float', 'FLOAT'),
bigquery.SchemaField('integer2', 'INTEGER'),
bigquery.SchemaField('boolean2', 'BOOLEAN')
)),
bigquery.SchemaField('array', 'RECORD', mode='REPEATED', fields=(
bigquery.SchemaField('string3', 'STRING'),
bigquery.SchemaField('integer3', 'INTEGER')
))
)
# print("testing result")
# self.pp.pprint(bqSchema)
sa = []
# print("showing each field")
for bqi in bqSchema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa.append(i)
# print("Schema as dict")
# self.pp.pprint(sa)
isa = sa
# print("Expected result")
# self.pp.pprint(expectedSchema)
sa = []
# print("showing each expected field")
for bqi in expectedSchema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa.append(i)
# print("expected Schema as dict")
diff = DeepDiff(isa, sa, ignore_order=True)
# self.pp.pprint(diff)
a = "Schema test1 schema does not match target {}".format(len(diff))
self.assertEqual(diff, {}, a)
def test_createschema2(self):
# print("Creating a new schema")
bqSchema2 = bqtools.create_schema(self.schemaTest2)
sa2 = []
# print("showing each field schema2")
for bqi in bqSchema2:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
# print("Schema2 as dict")
# self.pp.pprint(sa2)
expectedSchema2 = (
bigquery.SchemaField('string', 'STRING'),
bigquery.SchemaField('integer', 'INTEGER'),
bigquery.SchemaField('record', 'RECORD', fields=(
bigquery.SchemaField('string2', 'STRING'),
bigquery.SchemaField('float', 'FLOAT'),
bigquery.SchemaField('boolean2', 'BOOLEAN'),
bigquery.SchemaField('appended1', 'STRING')
)),
bigquery.SchemaField('array', 'RECORD', mode='REPEATED', fields=(
bigquery.SchemaField('string3', 'STRING'),
bigquery.SchemaField('integer3', 'INTEGER'),
bigquery.SchemaField('foo', 'FLOAT')
)),
bigquery.SchemaField('anotherarray', 'RECORD', mode='REPEATED', fields=(
bigquery.SchemaField('test1', 'INTEGER'),
bigquery.SchemaField('test2', 'BOOLEAN')
))
)
sa = []
for bqi in expectedSchema2:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
# self.pp.pprint(diff)
a = "Schema test1 schema does not match target {}".format(diff)
self.assertEqual(diff, {}, a)
logger = logging.getLogger("testBQTools")
evolved = bqtools.match_and_addtoschema({"string": "hello"}, expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 1")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52}, expectedSchema2,
logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 2")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52, "record": {}},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 3")
evolved = bqtools.match_and_addtoschema(
{"string": "hello", "integer": 52, "record": {"string2": "hello2"}}, expectedSchema2,
logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 4")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"}},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 6")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": []},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 7")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello"}]},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 8")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42}]},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 9")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141}]},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 10")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}]},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 11")
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [{"test1": 52, "test2": False},
{"test1": 52, "test2": True}]},
expectedSchema2, logger=logger)
self.assertEqual(evolved, False, "Expected no evolve but got evolve true evolve test 12")
# evolve tests bbelow prepare baseline
copyoforigschema = list(expectedSchema2)
savedSchema = copy.deepcopy(copyoforigschema)
sa = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa.append(i)
# Evolutio test 1
# add some stuff 2 layers down in an array
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [{"test1": 52, "test2": False},
{"test1": 52, "test2": True,
"fred": "I am an evolved string",
"iamanotherevolve": 32}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 13")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
diff = dict(diff)
print(
"============================================ evolve test 1 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 1 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'iamanotherevolve',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'fred',
'type': 'STRING'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}},
'iterable_item_removed': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}, diff,
"Schema evolution not as expected {}".format(self.pp.pformat(diff)))
# Evolution test 2
# this just adds a fiedl at top level
copyoforigschema = copy.deepcopy(savedSchema)
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"hellomike": 3.1415926,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [{"test1": 52, "test2": False},
{"test1": 52, "test2": True}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 14")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
print(
"============================================ evolve test 2 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 2 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[5]': {'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'hellomike',
'type': 'FLOAT'}}}, diff,
"Schema evolution not as expected {}".format(self.pp.pformat(diff)))
# Evolution test 3
# this is an object with root schema evolution
# Plus child objects with 2 different changes in them
# plus another with both
copyoforigschema = copy.deepcopy(savedSchema)
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"hellomike": 3.1415926,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [{"test1": 52, "test2": False,
"fred": "I am an evolution"},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3,
"fred": "I am same previous "
"evolution"}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 14")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
print(
"============================================ evolve test 3 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 3 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'fred',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'iamanotherevolution',
'type': 'FLOAT'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'hellomike',
'type': 'FLOAT'}},
'iterable_item_removed': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}, diff,
"Schema evolution not as expected {}".format(self.pp.pformat(diff)))
# Evolution test 4
# this is an object with root schema evolution
# Plus child objects with 2 different changes in them
copyoforigschema = copy.deepcopy(savedSchema)
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"hellomike": 3.1415926,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [
{"test1": 52, "test2": False,
"fred": "I am an evolution"},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 14")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
print(
"============================================ evolve test 4 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 4 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'fred',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'iamanotherevolution',
'type': 'FLOAT'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'hellomike',
'type': 'FLOAT'}},
'iterable_item_removed': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}, diff,
"Schema evolution not as expected")
# Evolution test 5
# add an array with strings an dno key this should fail
copyoforigschema = copy.deepcopy(savedSchema)
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"hellomike": 3.1415926,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [
{"test1": 52, "test2": False,
"fred": "I am an evolution"},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3,
"bill": ["hello", "fred", "break this"]}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 14")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
print(
"============================================ evolve test 5 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 5 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'fred',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'iamanotherevolution',
'type': 'FLOAT'},
{'description': None,
'fields': [
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'value',
'type': 'STRING'}],
'mode': 'REPEATED',
'name': 'bill',
'type': 'RECORD'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'hellomike',
'type': 'FLOAT'}},
'iterable_item_removed': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}, diff,
"Schema evolution not as expected")
# Evolution test 6
# add an array with strings an dno key this should fail
copyoforigschema = copy.deepcopy(savedSchema)
evolved = bqtools.match_and_addtoschema({"string": "hello", "integer": 52,
"hellomike": 3.1415926,
"record": {"string2": "hello2", "float": 1.3,
"boolean2": False,
"appended1": "another string"},
"array": [{"string3": "hello", "integer3": 42,
"foo": 3.141},
{"integer3": 42, "foo": 3.141}],
"anotherarray": [
{"test1": 52, "test2": False,
"fred": "I am an evolution"},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3},
{"test1": 52, "test2": True,
"iamanotherevolution": 1.3,
"bill": {}}]},
copyoforigschema, logger=logger)
self.assertEqual(evolved, True,
"Expected evolve but got no evolve False for evolve test 14")
sa2 = []
for bqi in copyoforigschema:
i = bqtools.to_dict(bqi)
# self.pp.pprint(i)
sa2.append(i)
diff = DeepDiff(sa, sa2, ignore_order=True)
print(
"============================================ evolve test 6 diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ evolve test 6 diff end "
"====================================")
self.assertEqual({'iterable_item_added': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'fred',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'iamanotherevolution',
'type': 'FLOAT'},
{'description': None,
'fields': [
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name':
'xxxDummySchemaAsNoneDefinedxxx',
'type': 'STRING'}],
'mode': 'NULLABLE',
'name': 'bill',
'type': 'RECORD'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'hellomike',
'type': 'FLOAT'}},
'iterable_item_removed': {'root[4]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}, diff,
"Schema evolution not as expected")
def test_patchbare(self):
startschema = bqtools.create_schema(self.schema2startnobare)
resultschema = bqtools.create_schema(self.schemaTest2nonbare)
origobject = copy.deepcopy(self.schemaTest2bare)
evolved = bqtools.match_and_addtoschema(self.schemaTest2bare, startschema)
self.assertEqual(evolved, True,
"Bare llist and multi dict evolution has not happened as expected")
diff = DeepDiff(resultschema, startschema, ignore_order=True)
print(
"============================================ mixed arrays added diff start "
"====================================")
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), evolved))
print(
"============================================ mixed arrays added diff end "
"====================================")
def test_patch(self):
bqSchema2 = bqtools.create_schema(self.schemaTest2)
bqSchema = bqtools.create_schema(self.schemaTest1)
sa = []
for bqi in bqSchema:
i = bqtools.to_dict(bqi)
sa.append(i)
osa = copy.deepcopy(sa)
change, pschema = bqtools.recurse_and_add_to_schema(bqSchema2, sa)
diff = DeepDiff(pschema, osa, ignore_order=True)
# patching never removes fields so expect additions
# so after list of root[] should be one longer
expectedDiff = {'iterable_item_added': {'root[2]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'integer2',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'float',
'type': 'FLOAT'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'string2',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'boolean2',
'type': 'BOOLEAN'}],
'mode': 'NULLABLE',
'name': 'record',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'integer3',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'string3',
'type': 'STRING'}],
'mode': 'REPEATED',
'name': 'array',
'type': 'RECORD'}},
'iterable_item_removed': {'root[2]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'integer2',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'float',
'type': 'FLOAT'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'string2',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'boolean2',
'type': 'BOOLEAN'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'appended1',
'type': 'STRING'}],
'mode': 'NULLABLE',
'name': 'record',
'type': 'RECORD'},
'root[5]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'integer3',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'string3',
'type': 'STRING'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'foo',
'type': 'FLOAT'}],
'mode': 'REPEATED',
'name': 'array',
'type': 'RECORD'},
'root[6]': {'description': None,
'fields': [{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test1',
'type': 'INTEGER'},
{'description': None,
'fields': [],
'mode': 'NULLABLE',
'name': 'test2',
'type': 'BOOLEAN'}],
'mode': 'REPEATED',
'name': 'anotherarray',
'type': 'RECORD'}}}
self.assertEqual(diff, expectedDiff,
"Patch diff is not what is expected {}".format(self.pp.pformat(diff)))
self.assertEqual(change, True,
"Patch diff change result {} is not what is expected {}".format(change,
self.pp.pformat(
diff)))
bqSchema3 = bqtools.create_schema(self.schemaTest3)
bqSchema4 = bqtools.create_schema(self.schemaTest4)
sa2 = []
for bqi in bqSchema3:
i = bqtools.to_dict(bqi)
sa2.append(i)
osa = copy.deepcopy(sa2)
change, pschema = bqtools.recurse_and_add_to_schema(bqSchema4, sa2)
diff = DeepDiff(pschema, osa, ignore_order=True)
print("Patched schema diff {} change{}".format(self.pp.pformat(diff), change))
# print("old {}".format(self.pp.pformat(osa)))
# print("new {}".format(self.pp.pformat(pschema)))
def test_patch2(self):
bqSchema2 = bqtools.create_schema(self.schemaTest2)
bqSchema = bqtools.create_schema(self.schemaTest2)
sa = []
for bqi in bqSchema:
i = bqtools.to_dict(bqi)
sa.append(i)
osa = copy.deepcopy(sa)
change, pschema = bqtools.recurse_and_add_to_schema(bqSchema2, sa)
diff = DeepDiff(pschema, osa, ignore_order=True)
# patching never removes fields so expect additions
# so after list of root[] should be one longer
expectedDiff = {}
self.assertEqual(diff, expectedDiff,
"Patch diff is not what is expected {}".format(self.pp.pformat(diff)))
self.assertEqual(change, False,
"Patch diff change result {} is not what is expected {}".format(change,
self.pp.pformat(
diff)))
# print("Patched schema diff {}".format(self.pp.pformat(diff)))
# print("old {}".format(self.pp.pformat(osa)))
# print("new {}".format(self.pp.pformat(pschema)))
def test_sync(self):
logging.basicConfig(level=logging.INFO)
# get target datasets ready uses app default credentials
bqclient = bigquery.Client()
stclient = storage.Client()
# will use default project and public datsets for testing
destination_project = bqclient.project
# going to copy data from various datasets in bigquery-public-data project
# each destination will be of the form bqsynctest_<region>_<orignaldatasetname>
# in region - will be replaced with _ to make valid dataset nae
# as all public data is in us we will need for cross region a us bucket
# and a target region bucket
# tests are in region i.e. us to us
# us to eu
# us to europe-west2
# bucket names will be created if they do not exist of
# bqsynctest_<projectid>_<region>
# eac bucket will have a 1 day lifecycle added
# source will be picked with various source attribute types, partitioning and clustering strategy
# success is tables are copied no errors in extract, load or copy
# not tale numbers may vary
# at end the test datasets will be deleted the buckets will remain
# this as bucket names remain reserved for sometime after deletion
test_buckets = []
usbucket = "bqsynctest_{}_us".format(destination_project)
test_buckets.append({"name":usbucket,"region":"us"})
eubucket = "bqsynctest_{}_eu".format(destination_project)
test_buckets.append({"name":eubucket,"region":"eu"})
eu2bucket = "bqsynctest_{}_europe-west-2".format(destination_project)
test_buckets.append({"name":eu2bucket,"region":"europe-west2"})
logging.info("Checking buckets for bqsync tests exist in right regions and with lifecycle rules...")
# loop through test bucket if they do not exist create in the right region and add
# #lifecycle rule
# if they do exist check they are in right region and have the expected lifecycle rule
for bucket_dict in test_buckets:
bucket = None
try:
bucket = stclient.get_bucket(bucket_dict["name"])
except exceptions.NotFound:
bucket_ref = storage.Bucket(stclient,name=bucket_dict["name"])
bucket_ref.location = bucket_dict["region"]
storage.Bucket.create(bucket_ref,stclient)
bucket = stclient.get_bucket(bucket_dict["name"])
rules = bucket.lifecycle_rules
nrules = []
found1daydeletrule = False
for rule in rules:
if isinstance(rule, dict):
if "condition" in rule and "age" in rule["condition"] and rule["condition"][
"age"] == 1 and "isLive" in rule["condition"] and rule["condition"][
"isLive"]:
found1daydeletrule = True
nrules.append(rule)
if not found1daydeletrule:
nrules.append(
{"action": {"type": "Delete"}, "condition": {"age": 1, "isLive": True}})
bucket.lifecycle_rules = nrules
bucket.update(stclient)
# starting datsets to test with form project bigquery-public-data
# along with each entry is list of tables and length of maximum days for day partition
test_source_configs = []
# small dataset good to start tests basic types
test_source_configs.append({
"description":"small dataset good to start tests basic types",
"dataset_name":"fcc_political_ads",
"table_filter_regexp":['broadcast_tv_radio_station',
'content_info',
'file_history',
'file_record'],
"max_last_days":365
})
# small dataset good to start tests basic types
test_source_configs.append({
"description": "date partitioned 1 date type field",
"dataset_name": "wikipedia",
"table_filter_regexp": ['wikidata'],
"max_last_days": None
})
# a table with geography data type
test_source_configs.append({
"description":"a table with geography data type",
"dataset_name": "faa",
"table_filter_regexp": ['us_airports'],
"max_last_days": 365
})
# a dataset with a day partitioned table with clustering
# not using a specific partition column name so just ingest time
test_source_configs.append({
"description":"a dataset with a day partitioned table with clustering not using a specific partition column name so just ingest time",
"dataset_name": "new_york_subway",
"table_filter_regexp": ['geo_nyc_borough_boundaries'],
"max_last_days": 365
})
# a dataset with view referencing it self to demo simple view copying
test_source_configs.append({
"description":"a dataset with view referencing it self to demo simple view copying",
"dataset_name": "noaa_goes16",
"table_filter_regexp": ['.*'],
"max_last_days": 365
})
# a dataset with functions only
test_source_configs.append({
"description":"a dataset with functions only",
"dataset_name": "persistent_udfs",
"table_filter_regexp": ['.*'],
"max_last_days": 365
})
# a dataset with nested table example and a model
# models will fail
test_source_configs.append({
"description":"a dataset with nested table example and a model",
"dataset_name": "samples",
"table_filter_regexp": ['github_nested','model'],
"max_last_days": 365
})
# a dataset with day partioned no clustering using natural load time
test_source_configs.append({
"description":"a dataset with day partioned no clustering using natural load time",
"dataset_name": "sec_quarterly_financials",
"table_filter_regexp": ['.*'],
"max_last_days": 365 * 3
})
# a dataset with a day partitioned table with clustering
# using a specific partition column name so not just ingest time
# has repetade basic types
# note this shows the issue of bq nit correctly supporting avro logical types
# https://issuetracker.google.com/issues/35905894 will fail until resolved
test_source_configs.append({
"description":"a dataset with a day partitioned table with clustering using a specific partition column name so not just ingest time",
"dataset_name": "human_genome_variants",
"table_filter_regexp": ['platinum_genomes_deepvariant_variants_20180823'],
"max_last_days": None
})
test_destination_datasets_list = []
for src_destination in test_source_configs:
tests = []
# set up local us test
destdatset = "bqsynctest_{}_{}".format("US",src_destination["dataset_name"]).replace("-","_")
tests.append({
"subtest":"us intra region",
"destdataset": destdatset,
"destregion":"US"
})
test_destination_datasets_list.append(destdatset)
# set up us to eu test
destdatset = "bqsynctest_{}_{}".format("EU", src_destination["dataset_name"]).replace(
"-", "_")
tests.append({
"subtest": "us to eu cross region",
"destdataset": destdatset,
"destregion": "EU",
"dstbucket":eubucket
})
test_destination_datasets_list.append(destdatset)
# set up us to europe-west2 test
# set up us to eu test
destdatset = "bqsynctest_{}_{}".format("europe-west2", src_destination["dataset_name"]).replace(
"-", "_")
tests.append({
"subtest": "us to eu cross region",
"destdataset": destdatset,
"destregion": "europe-west2",
"dstbucket":eu2bucket
})
test_destination_datasets_list.append(destdatset)
src_destination["tests"] = tests
logging.info(
"Checking daatsets for bqsync tests exist in right regions and if exist empty them i.e. delete and recreate them...")
for datasetname in test_destination_datasets_list:
dataset_ref = bqclient.dataset(datasetname)
if bqtools.dataset_exists(bqclient,dataset_ref):
bqclient.delete_dataset(bqclient.get_dataset(dataset_ref),delete_contents=True)
# for each source run sub tests
logging.info("Staring tests...")
for test_config in test_source_configs:
# run sub test basically an initial copy followed by
# 2nd copy if no data latter should do nothing
for dstconfig in test_config["tests"]:
# create an empty dataset
dataset_ref = bqclient.dataset(dstconfig["destdataset"])
dataset = bigquery.Dataset(dataset_ref)
dataset.location = dstconfig["destregion"]
dataset = bqclient.create_dataset(dataset)
# create initial sync
# as source is all in US if not us must need buckets
synctest = None
if dstconfig["destregion"] == "US":
synctest = bqtools.MultiBQSyncCoordinator(["bigquery-public-data.{}".format(test_config["dataset_name"])],
["{}.{}".format(destination_project,dstconfig["destdataset"])],
remove_deleted_tables=True,
copy_data=True,
copy_types=["TABLE","VIEW","ROUTINE","MODEL"],
check_depth=0,
table_view_filter=test_config["table_filter_regexp"],
table_or_views_to_exclude=[],
latest_date=None,
days_before_latest_day=test_config["max_last_days"],
day_partition_deep_check=False,
analysis_project=destination_project)
else:
synctest = bqtools.MultiBQSyncCoordinator(
["bigquery-public-data.{}".format(test_config["dataset_name"])],
["{}.{}".format(destination_project, dstconfig["destdataset"])],
srcbucket=usbucket,
dstbucket=dstconfig["dstbucket"],
remove_deleted_tables=True,
copy_data=True,
copy_types=["TABLE","VIEW","ROUTINE","MODEL"],
check_depth=0,
table_view_filter=test_config["table_filter_regexp"],
table_or_views_to_exclude=[],
latest_date=None,
days_before_latest_day=test_config["max_last_days"],
day_partition_deep_check=False,
analysis_project=destination_project)
synctest.sync()
self.assertEqual(True, True, "Initial Sync {} {} from bigquery-public-data..{} with {}.{} completed".format(
test_config["description"],
dstconfig["subtest"],
test_config["dataset_name"],
destination_project,
dstconfig["destdataset"]
))
synctest.reset_stats()
synctest.sync()
self.assertEqual(synctest.tables_avoided, synctest.tables_synced,
"Second Sync {} {} from bigquery-public-data..{} with {}.{} "
"completed".format(
test_config["description"],
dstconfig["subtest"],
test_config["dataset_name"],
destination_project,
dstconfig["destdataset"]
))
eutest = bqtools.MultiBQSyncCoordinator(
["{}.{}".format(destination_project,test_config["tests"][1]["destdataset"])],
["{}.{}".format(destination_project,test_config["tests"][2]["destdataset"])],
srcbucket=eubucket,
dstbucket=eu2bucket,
remove_deleted_tables=True,
copy_data=True,
copy_types=["TABLE", "VIEW", "ROUTINE", "MODEL"],
check_depth=0,
table_view_filter=[".*"],
table_or_views_to_exclude=[],
latest_date=None,
days_before_latest_day=None,
day_partition_deep_check=False,
analysis_project=destination_project)
eutest.sync()
self.assertEqual(eutest.tables_avoided + eutest.view_avoided + eutest.routines_avoided,
eutest.tables_synced + eutest.views_synced + eutest.routines_synced,
"Inter europe Sync {} {} from {}.{} with {}.{}"
"completed".format(
test_config["description"],
"EU to europe-west2",
destination_project,
test_config["tests"][1]["destdataset"],
destination_project,
test_config["tests"][2]["destdataset"]
))
def test_gendiff(self):
bqSchema2 = bqtools.create_schema(self.schemaTest2)
views = bqtools.gen_diff_views('foo', 'ar', 'bob', bqSchema2, description="A test schema")
vexpected = {'bobdb': {
"query": """#standardSQL
SELECT
_PARTITIONTIME AS scantime,
ifnull(tabob.integer,0) as integer,
ifnull(A1.integer3,0) as arrayinteger3,
ifnull(A1.foo,0.0) as arrayfoo,
ifnull(A1.string3,"None") as arraystring3,
ifnull(A2.test1,0) as anotherarraytest1,
ifnull(A2.test2,False) as anotherarraytest2,
ifnull(tabob.string,"None") as string,
ifnull(tabob.record.appended1,"None") as recordappended1,
ifnull(tabob.record.float,0.0) as recordfloat,
ifnull(tabob.record.string2,"None") as recordstring2,
ifnull(tabob.record.boolean2,False) as recordboolean2
from `foo.ar.bob` as tabob
LEFT JOIN UNNEST(tabob.array) as A1
LEFT JOIN UNNEST(tabob.anotherarray) as A2""",
"description": "View used as basis for diffview:A test schema"},
'bobdiffday': {
"query": """#standardSQL
SELECT
o.scantime as origscantime,
l.scantime as laterscantime,
CASE
WHEN o.integer IS NULL THEN 'Added'
WHEN l.integer IS NULL THEN 'Deleted'
WHEN o.integer = l.integer AND o.arrayinteger3 = l.arrayinteger3 AND o.arrayfoo = l.arrayfoo AND o.arraystring3 = l.arraystring3 AND o.anotherarraytest1 = l.anotherarraytest1 AND o.anotherarraytest2 = l.anotherarraytest2 AND o.string = l.string AND o.recordappended1 = l.recordappended1 AND o.recordfloat = l.recordfloat AND o.recordstring2 = l.recordstring2 AND o.recordboolean2 = l.recordboolean2 THEN 'Same'
ELSE 'Updated'
END AS action,
o.integer as originteger,
l.integer as laterinteger,
case when o.integer = l.integer then 0 else 1 end as diffinteger,
o.arrayinteger3 as origarrayinteger3,
l.arrayinteger3 as laterarrayinteger3,
case when o.arrayinteger3 = l.arrayinteger3 then 0 else 1 end as diffarrayinteger3,
o.arrayfoo as origarrayfoo,
l.arrayfoo as laterarrayfoo,
case when o.arrayfoo = l.arrayfoo then 0 else 1 end as diffarrayfoo,
o.arraystring3 as origarraystring3,
l.arraystring3 as laterarraystring3,
case when o.arraystring3 = l.arraystring3 then 0 else 1 end as diffarraystring3,
o.anotherarraytest1 as origanotherarraytest1,
l.anotherarraytest1 as lateranotherarraytest1,
case when o.anotherarraytest1 = l.anotherarraytest1 then 0 else 1 end as diffanotherarraytest1,
o.anotherarraytest2 as origanotherarraytest2,
l.anotherarraytest2 as lateranotherarraytest2,
case when o.anotherarraytest2 = l.anotherarraytest2 then 0 else 1 end as diffanotherarraytest2,
o.string as origstring,
l.string as laterstring,
case when o.string = l.string then 0 else 1 end as diffstring,
o.recordappended1 as origrecordappended1,
l.recordappended1 as laterrecordappended1,
case when o.recordappended1 = l.recordappended1 then 0 else 1 end as diffrecordappended1,
o.recordfloat as origrecordfloat,
l.recordfloat as laterrecordfloat,
case when o.recordfloat = l.recordfloat then 0 else 1 end as diffrecordfloat,
o.recordstring2 as origrecordstring2,
l.recordstring2 as laterrecordstring2,
case when o.recordstring2 = l.recordstring2 then 0 else 1 end as diffrecordstring2,
o.recordboolean2 as origrecordboolean2,
l.recordboolean2 as laterrecordboolean2,
case when o.recordboolean2 = l.recordboolean2 then 0 else 1 end as diffrecordboolean2
FROM (SELECT
*
FROM
`foo.ar.bobdb`
WHERE
scantime = (
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob`
WHERE
_PARTITIONTIME < (
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob`)
AND
_PARTITIONTIME < TIMESTAMP_SUB(CURRENT_TIMESTAMP(),INTERVAL 1 DAY) ) ) o
FULL OUTER JOIN (
SELECT
*
FROM
`foo.ar.bobdb`
WHERE
scantime =(
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob` )) l
ON
l.integer = o.integer
AND l.arrayinteger3=o.arrayinteger3
AND l.arrayfoo=o.arrayfoo
AND l.arraystring3=o.arraystring3
AND l.anotherarraytest1=o.anotherarraytest1
AND l.anotherarraytest2=o.anotherarraytest2
AND l.string=o.string
AND l.recordappended1=o.recordappended1
AND l.recordfloat=o.recordfloat
AND l.recordstring2=o.recordstring2
AND l.recordboolean2=o.recordboolean2""",
"description": "Diff of day of underlying table bob description: A test schema"},
'bobdiffweek': {'query': """#standardSQL
SELECT
o.scantime as origscantime,
l.scantime as laterscantime,
CASE
WHEN o.integer IS NULL THEN 'Added'
WHEN l.integer IS NULL THEN 'Deleted'
WHEN o.integer = l.integer AND o.arrayinteger3 = l.arrayinteger3 AND o.arrayfoo = l.arrayfoo AND o.arraystring3 = l.arraystring3 AND o.anotherarraytest1 = l.anotherarraytest1 AND o.anotherarraytest2 = l.anotherarraytest2 AND o.string = l.string AND o.recordappended1 = l.recordappended1 AND o.recordfloat = l.recordfloat AND o.recordstring2 = l.recordstring2 AND o.recordboolean2 = l.recordboolean2 THEN 'Same'
ELSE 'Updated'
END AS action,
o.integer as originteger,
l.integer as laterinteger,
case when o.integer = l.integer then 0 else 1 end as diffinteger,
o.arrayinteger3 as origarrayinteger3,
l.arrayinteger3 as laterarrayinteger3,
case when o.arrayinteger3 = l.arrayinteger3 then 0 else 1 end as diffarrayinteger3,
o.arrayfoo as origarrayfoo,
l.arrayfoo as laterarrayfoo,
case when o.arrayfoo = l.arrayfoo then 0 else 1 end as diffarrayfoo,
o.arraystring3 as origarraystring3,
l.arraystring3 as laterarraystring3,
case when o.arraystring3 = l.arraystring3 then 0 else 1 end as diffarraystring3,
o.anotherarraytest1 as origanotherarraytest1,
l.anotherarraytest1 as lateranotherarraytest1,
case when o.anotherarraytest1 = l.anotherarraytest1 then 0 else 1 end as diffanotherarraytest1,
o.anotherarraytest2 as origanotherarraytest2,
l.anotherarraytest2 as lateranotherarraytest2,
case when o.anotherarraytest2 = l.anotherarraytest2 then 0 else 1 end as diffanotherarraytest2,
o.string as origstring,
l.string as laterstring,
case when o.string = l.string then 0 else 1 end as diffstring,
o.recordappended1 as origrecordappended1,
l.recordappended1 as laterrecordappended1,
case when o.recordappended1 = l.recordappended1 then 0 else 1 end as diffrecordappended1,
o.recordfloat as origrecordfloat,
l.recordfloat as laterrecordfloat,
case when o.recordfloat = l.recordfloat then 0 else 1 end as diffrecordfloat,
o.recordstring2 as origrecordstring2,
l.recordstring2 as laterrecordstring2,
case when o.recordstring2 = l.recordstring2 then 0 else 1 end as diffrecordstring2,
o.recordboolean2 as origrecordboolean2,
l.recordboolean2 as laterrecordboolean2,
case when o.recordboolean2 = l.recordboolean2 then 0 else 1 end as diffrecordboolean2
FROM (SELECT
*
FROM
`foo.ar.bobdb`
WHERE
scantime = (
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob`
WHERE
_PARTITIONTIME < (
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob`)
AND
_PARTITIONTIME < TIMESTAMP_SUB(CURRENT_TIMESTAMP(),INTERVAL 7 DAY) ) ) o
FULL OUTER JOIN (
SELECT
*
FROM
`foo.ar.bobdb`
WHERE
scantime =(
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob` )) l
ON
l.integer = o.integer
AND l.arrayinteger3=o.arrayinteger3
AND l.arrayfoo=o.arrayfoo
AND l.arraystring3=o.arraystring3
AND l.anotherarraytest1=o.anotherarraytest1
AND l.anotherarraytest2=o.anotherarraytest2
AND l.string=o.string
AND l.recordappended1=o.recordappended1
AND l.recordfloat=o.recordfloat
AND l.recordstring2=o.recordstring2
AND l.recordboolean2=o.recordboolean2""",
'description': 'Diff of week of underlying table bob description: A '
'test schema'},
'bobdiffmonth': {'query': """#standardSQL
SELECT
o.scantime as origscantime,
l.scantime as laterscantime,
CASE
WHEN o.integer IS NULL THEN 'Added'
WHEN l.integer IS NULL THEN 'Deleted'
WHEN o.integer = l.integer AND o.arrayinteger3 = l.arrayinteger3 AND o.arrayfoo = l.arrayfoo AND o.arraystring3 = l.arraystring3 AND o.anotherarraytest1 = l.anotherarraytest1 AND o.anotherarraytest2 = l.anotherarraytest2 AND o.string = l.string AND o.recordappended1 = l.recordappended1 AND o.recordfloat = l.recordfloat AND o.recordstring2 = l.recordstring2 AND o.recordboolean2 = l.recordboolean2 THEN 'Same'
ELSE 'Updated'
END AS action,
o.integer as originteger,
l.integer as laterinteger,
case when o.integer = l.integer then 0 else 1 end as diffinteger,
o.arrayinteger3 as origarrayinteger3,
l.arrayinteger3 as laterarrayinteger3,
case when o.arrayinteger3 = l.arrayinteger3 then 0 else 1 end as diffarrayinteger3,
o.arrayfoo as origarrayfoo,
l.arrayfoo as laterarrayfoo,
case when o.arrayfoo = l.arrayfoo then 0 else 1 end as diffarrayfoo,
o.arraystring3 as origarraystring3,
l.arraystring3 as laterarraystring3,
case when o.arraystring3 = l.arraystring3 then 0 else 1 end as diffarraystring3,
o.anotherarraytest1 as origanotherarraytest1,
l.anotherarraytest1 as lateranotherarraytest1,
case when o.anotherarraytest1 = l.anotherarraytest1 then 0 else 1 end as diffanotherarraytest1,
o.anotherarraytest2 as origanotherarraytest2,
l.anotherarraytest2 as lateranotherarraytest2,
case when o.anotherarraytest2 = l.anotherarraytest2 then 0 else 1 end as diffanotherarraytest2,
o.string as origstring,
l.string as laterstring,
case when o.string = l.string then 0 else 1 end as diffstring,
o.recordappended1 as origrecordappended1,
l.recordappended1 as laterrecordappended1,
case when o.recordappended1 = l.recordappended1 then 0 else 1 end as diffrecordappended1,
o.recordfloat as origrecordfloat,
l.recordfloat as laterrecordfloat,
case when o.recordfloat = l.recordfloat then 0 else 1 end as diffrecordfloat,
o.recordstring2 as origrecordstring2,
l.recordstring2 as laterrecordstring2,
case when o.recordstring2 = l.recordstring2 then 0 else 1 end as diffrecordstring2,
o.recordboolean2 as origrecordboolean2,
l.recordboolean2 as laterrecordboolean2,
case when o.recordboolean2 = l.recordboolean2 then 0 else 1 end as diffrecordboolean2
FROM (SELECT
*
FROM
`foo.ar.bobdb`
WHERE
scantime = (
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob`
WHERE
_PARTITIONTIME < (
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob`)
AND
_PARTITIONTIME < TIMESTAMP_SUB(CURRENT_TIMESTAMP(),INTERVAL 30 DAY) ) ) o
FULL OUTER JOIN (
SELECT
*
FROM
`foo.ar.bobdb`
WHERE
scantime =(
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob` )) l
ON
l.integer = o.integer
AND l.arrayinteger3=o.arrayinteger3
AND l.arrayfoo=o.arrayfoo
AND l.arraystring3=o.arraystring3
AND l.anotherarraytest1=o.anotherarraytest1
AND l.anotherarraytest2=o.anotherarraytest2
AND l.string=o.string
AND l.recordappended1=o.recordappended1
AND l.recordfloat=o.recordfloat
AND l.recordstring2=o.recordstring2
AND l.recordboolean2=o.recordboolean2""",
'description': 'Diff of month of underlying table bob description: A '
'test schema'},
'bobdifffortnight': {'query': """#standardSQL
SELECT
o.scantime as origscantime,
l.scantime as laterscantime,
CASE
WHEN o.integer IS NULL THEN 'Added'
WHEN l.integer IS NULL THEN 'Deleted'
WHEN o.integer = l.integer AND o.arrayinteger3 = l.arrayinteger3 AND o.arrayfoo = l.arrayfoo AND o.arraystring3 = l.arraystring3 AND o.anotherarraytest1 = l.anotherarraytest1 AND o.anotherarraytest2 = l.anotherarraytest2 AND o.string = l.string AND o.recordappended1 = l.recordappended1 AND o.recordfloat = l.recordfloat AND o.recordstring2 = l.recordstring2 AND o.recordboolean2 = l.recordboolean2 THEN 'Same'
ELSE 'Updated'
END AS action,
o.integer as originteger,
l.integer as laterinteger,
case when o.integer = l.integer then 0 else 1 end as diffinteger,
o.arrayinteger3 as origarrayinteger3,
l.arrayinteger3 as laterarrayinteger3,
case when o.arrayinteger3 = l.arrayinteger3 then 0 else 1 end as diffarrayinteger3,
o.arrayfoo as origarrayfoo,
l.arrayfoo as laterarrayfoo,
case when o.arrayfoo = l.arrayfoo then 0 else 1 end as diffarrayfoo,
o.arraystring3 as origarraystring3,
l.arraystring3 as laterarraystring3,
case when o.arraystring3 = l.arraystring3 then 0 else 1 end as diffarraystring3,
o.anotherarraytest1 as origanotherarraytest1,
l.anotherarraytest1 as lateranotherarraytest1,
case when o.anotherarraytest1 = l.anotherarraytest1 then 0 else 1 end as diffanotherarraytest1,
o.anotherarraytest2 as origanotherarraytest2,
l.anotherarraytest2 as lateranotherarraytest2,
case when o.anotherarraytest2 = l.anotherarraytest2 then 0 else 1 end as diffanotherarraytest2,
o.string as origstring,
l.string as laterstring,
case when o.string = l.string then 0 else 1 end as diffstring,
o.recordappended1 as origrecordappended1,
l.recordappended1 as laterrecordappended1,
case when o.recordappended1 = l.recordappended1 then 0 else 1 end as diffrecordappended1,
o.recordfloat as origrecordfloat,
l.recordfloat as laterrecordfloat,
case when o.recordfloat = l.recordfloat then 0 else 1 end as diffrecordfloat,
o.recordstring2 as origrecordstring2,
l.recordstring2 as laterrecordstring2,
case when o.recordstring2 = l.recordstring2 then 0 else 1 end as diffrecordstring2,
o.recordboolean2 as origrecordboolean2,
l.recordboolean2 as laterrecordboolean2,
case when o.recordboolean2 = l.recordboolean2 then 0 else 1 end as diffrecordboolean2
FROM (SELECT
*
FROM
`foo.ar.bobdb`
WHERE
scantime = (
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob`
WHERE
_PARTITIONTIME < (
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob`)
AND
_PARTITIONTIME < TIMESTAMP_SUB(CURRENT_TIMESTAMP(),INTERVAL 14 DAY) ) ) o
FULL OUTER JOIN (
SELECT
*
FROM
`foo.ar.bobdb`
WHERE
scantime =(
SELECT
MAX(_PARTITIONTIME)
FROM
`foo.ar.bob` )) l
ON
l.integer = o.integer
AND l.arrayinteger3=o.arrayinteger3
AND l.arrayfoo=o.arrayfoo
AND l.arraystring3=o.arraystring3
AND l.anotherarraytest1=o.anotherarraytest1
AND l.anotherarraytest2=o.anotherarraytest2
AND l.string=o.string
AND l.recordappended1=o.recordappended1
AND l.recordfloat=o.recordfloat
AND l.recordstring2=o.recordstring2
AND l.recordboolean2=o.recordboolean2""",
'description': 'Diff of fortnight of underlying table bob '
'description: A test schema'}}
for vi in views:
expected = vexpected[vi['name']]['query'].splitlines(1)
actual = vi['query'].splitlines(1)
diff = difflib.unified_diff(expected, actual)
print(''.join(diff))
self.assertEqual(len(vi['query']), len(vexpected[vi['name']]['query']),
"Query len for view {} is not equal to what is expected\n:{}:\n:{"
"}:".format(
vi['name'],
vi['query'],
vexpected[
vi['name']][
'query']))
self.assertEqual(vi['query'], vexpected[vi['name']]['query'],
"Query for view {} is not equal to what is expected\n:{}:\n:{"
"}:".format(
vi['name'], vi['query'], vexpected[vi['name']]['query']))
self.assertEqual(vi['description'], vexpected[vi['name']]['description'],
"Description for view {} is not equal to what is expected\n:{}:\n:{"
"}:".format(
vi['name'], vi['description'],
vexpected[vi['name']]['description']))
def test_calc_field_depth(self):
toTest = [{"name": 'string',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE'},
{"name": 'integer',
"type": 'INTEGER',
"description": None,
"mode": 'NULLABLE'},
{"name": 'float',
"type": 'FLOAT',
"description": None,
"mode": 'NULLABLE'},
{"name": 'boolean',
"type": 'BOOLEAN',
"description": None,
"mode": 'NULLABLE'},
{"name": 'record',
"type": 'RECORD',
"description": None,
"mode": 'NULLABLE',
"fields":
[{"name": 'string2',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE'},
{"name": 'float',
"type": 'FLOAT',
"description": None,
"mode": 'NULLABLE'},
{"name": 'integer2',
"type": 'INTEGER',
"description": None,
"mode": 'NULLABLE'},
{"name": 'boolean2',
"type": 'BOOLEAN',
"description": None,
"mode": 'NULLABLE'},
{"name": 'record',
"type": 'RECORD',
"description": None,
"mode": 'NULLABLE',
"fields":
[{"name": 'string2',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE'},
{"name": 'record',
"type": 'RECORD',
"description": None,
"mode": 'NULLABLE',
"fields":
[{"name": 'string2',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE'
}]
}
]
}]
},
{"name": 'array',
"type": 'RECORD',
"description": None,
"mode": 'REPEATED',
"fields": [
{"name": 'string3',
"type": 'STRING',
"description": None,
"mode": 'NULLABLE'},
{"name": 'integer3',
"type": 'INTEGER',
"description": None,
"mode": 'NULLABLE'}
]}
]
depth = bqtools.calc_field_depth(toTest)
self.assertEqual(depth, 3, "measured field depth should be 3")
bqtools.trunc_field_depth(toTest, 2)
depth = bqtools.calc_field_depth(toTest)
self.assertEqual(depth, 2, "measured field depth should be 2 is {}".format(depth))
depth = bqtools.calc_field_depth(self.monsterSchema['schema']['fields'])
self.assertEqual(depth, 13, "measured field depth should be 13 is {}".format(depth))
newMonster = copy.deepcopy(self.monsterSchema)
yamonster = bqtools.trunc_field_depth(newMonster['schema']['fields'], 10)
depth = bqtools.calc_field_depth(newMonster['schema']['fields'])
self.assertEqual(depth, 10, "measured field depth should be 10 is {}".format(depth))
depth = bqtools.calc_field_depth(yamonster)
self.assertEqual(depth, 10, "measured field depth should be 10 is {}".format(depth))
def main(argv):
unittest.main()
if __name__ == '__main__':
main(sys.argv)
| 58.90413 | 414 | 0.345344 |
4a2156303981e21d5bacccd853fa7f2336077da9
| 461 |
py
|
Python
|
Classes Programs/classEx.py
|
jarvis-1805/PYTHON-FILES
|
c12dc74f83dadb08acdf5d6260222c3ce8a91797
|
[
"MIT"
] | 1 |
2020-05-23T12:37:11.000Z
|
2020-05-23T12:37:11.000Z
|
Classes Programs/classEx.py
|
jarvis-1805/PYTHON-FILES
|
c12dc74f83dadb08acdf5d6260222c3ce8a91797
|
[
"MIT"
] | null | null | null |
Classes Programs/classEx.py
|
jarvis-1805/PYTHON-FILES
|
c12dc74f83dadb08acdf5d6260222c3ce8a91797
|
[
"MIT"
] | null | null | null |
class Person:
def __init__(self, fname, lname):
self.firstname = fname
self.lastname = lname
def printname(self):
print(self.firstname, self.lastname)
class Student(Person):
def __init__(self, fname, lname, year):
super().__init__(fname, lname)
self.graduationyear = year
def welcome(self):
print("Welcome", self.firstname, self.lastname, "to the class of", self.graduationyear)
x = Student("Mike", "Olsen", 2019)
x.welcome()
| 25.611111 | 91 | 0.691974 |
4a215730e96f347a78ad088205eaeb58c7b5a933
| 1,931 |
py
|
Python
|
src/generate_xml.py
|
AlvaroCavalcante/video2tfrecord
|
a77b6e999bbf0edbc254c0fa42549d9ab5f9013c
|
[
"MIT"
] | 1 |
2022-03-12T20:43:26.000Z
|
2022-03-12T20:43:26.000Z
|
src/generate_xml.py
|
AlvaroCavalcante/video2tfrecord
|
a77b6e999bbf0edbc254c0fa42549d9ab5f9013c
|
[
"MIT"
] | null | null | null |
src/generate_xml.py
|
AlvaroCavalcante/video2tfrecord
|
a77b6e999bbf0edbc254c0fa42549d9ab5f9013c
|
[
"MIT"
] | null | null | null |
import xml.etree.cElementTree as ET
class AnnotationGenerator(object):
def __init__(self, xml_path):
self.xml_path = xml_path
def generate_xml_annotation(self, output_bbox, im_width, im_height, file_name):
try:
annotation = ET.Element('annotation')
ET.SubElement(annotation, 'filename').text = file_name
size = ET.SubElement(annotation, 'size')
ET.SubElement(size, 'width').text = str(im_width)
ET.SubElement(size, 'height').text = str(im_height)
ET.SubElement(size, 'depth').text = '3'
count = 0
for class_name in output_bbox:
box = output_bbox.get(class_name)
if class_name == 'hand_1' or class_name == 'hand_2':
class_name = 'hand'
objectBox = ET.SubElement(annotation, 'object')
ET.SubElement(objectBox, 'name').text = class_name
ET.SubElement(objectBox, 'pose').text = 'Unspecified'
ET.SubElement(objectBox, 'truncated').text = '0'
ET.SubElement(objectBox, 'difficult').text = '0'
bndBox = ET.SubElement(objectBox, 'bndbox')
ET.SubElement(bndBox, 'xmin').text = str(box['xmin'])
ET.SubElement(bndBox, 'ymin').text = str(box['ymin'])
ET.SubElement(bndBox, 'xmax').text = str(box['xmax'])
ET.SubElement(bndBox, 'ymax').text = str(box['ymax'])
count += 1
if 'jpg' in file_name.split('.') or 'png' in file_name.split('.') or 'jpeg' in file_name.split('.'):
file_name = file_name.split('.')[0]
arquivo = ET.ElementTree(annotation)
arquivo.write(self.xml_path + file_name + '.xml')
except Exception as e:
print('Error to generate the XML for image {}'.format(file_name))
print(e)
| 43.886364 | 112 | 0.559296 |
4a215732f86119f063de4bb0c02b22c06dfaba60
| 935 |
py
|
Python
|
hdata/HighDenseTrainPatchMaker.py
|
beratkurar/page-segmentation-using-fcn
|
1462fc66c019eb06d9b7bd2b2561d09d63476385
|
[
"MIT"
] | null | null | null |
hdata/HighDenseTrainPatchMaker.py
|
beratkurar/page-segmentation-using-fcn
|
1462fc66c019eb06d9b7bd2b2561d09d63476385
|
[
"MIT"
] | null | null | null |
hdata/HighDenseTrainPatchMaker.py
|
beratkurar/page-segmentation-using-fcn
|
1462fc66c019eb06d9b7bd2b2561d09d63476385
|
[
"MIT"
] | 1 |
2018-12-26T13:10:26.000Z
|
2018-12-26T13:10:26.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 30 07:43:28 2017
@author: B
"""
import random
import cv2
import os
patchSize=320
patchNumber=0
folder='train/'
lfolder='ltrain/'
i=0
while (i <1000):
pages=os.listdir(folder)
page_number=random.randint(0,19)
page_name=pages[page_number]
page=cv2.imread(folder+page_name,3)
lpage=cv2.imread(lfolder+page_name[:-3]+'bmp',0)
rows,cols,ch=page.shape
x=random.randint(0,rows-patchSize)
y=random.randint(0,cols-patchSize)
lpatch=lpage[x:x+patchSize,y:y+patchSize]
bg=list(lpatch.flatten()).count(255)
print(bg)
if bg<85000:
print(i)
cv2.imwrite("p"+lfolder+page_name[:-4]+"_patch"+str(i)+".png",lpatch)
patch=page[x:x+patchSize,y:y+patchSize]
cv2.imwrite("p"+folder+page_name[:-4]+"_patch"+str(i)+".png",patch)
i=i+1
else:
print('pass')
| 23.375 | 78 | 0.604278 |
4a21590b8069da489bba84b2d9710172c74f6c83
| 2,013 |
py
|
Python
|
DailyProgrammer/DP20180129A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2 |
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/DP20180129A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/DP20180129A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
[2018-01-29] Challenge #349 [Easy] Change Calculator
https://www.reddit.com/r/dailyprogrammer/comments/7ttiq5/20180129_challenge_349_easy_change_calculator/
# Description
You own a nice tiny mini-market that sells candies to children. You need to know if you'll be able to give the change
back to those little cute creatures and it happens you don't know basic math because when you were a child you were
always eating candies and did not study very well. So you need some help from a little tool that tell you if you can.
# Input Description
On the line beginning "Input:" be given a single number that tells you how much change to produce, and then a list of
coins you own. The next line, beginning with "Output:", tells you the number of coins to give back to achieve the
change you need to give back (bounded by the number of coins you have). Here's one that says "give the customer 3 or
fewer coins". Example:
Input: 10 5 5 2 2 1
Output: n <= 3
# Output Description
Your progam should emit the coins you would give back to yield the correct value of change, if possible. Multiple
solutions may be possible. If no solution is possible, state that. Example:
5 5
# Challenge Input
Input: 150 100 50 50 50 50
Output: n < 5
Input: 130 100 20 18 12 5 5
Output: n < 6
Input: 200 50 50 20 20 10
Output: n >= 5
# Bonus
Output the minimum number of coins needed:
Input: 150 100 50 50 50 50
Output: 2
Input: 130 100 20 18 12 5 5
Output: 3
# Challenge
Input: 150 1 1 ... 1 (1 repeated 10000 times)
Output: 150
# Note
This is the subset sum problem with a twist, a classic computational complexity problem which poses fun questions about
efficient calculation and lower bounds of complexity.
# Credit
This challenge was suggested by use /u/Scara95, many thanks. If you have a challenge idea, please share it on
/r/dailyprogrammer_ideas and there's a good chance we'll use it.
"""
def main():
pass
if __name__ == "__main__":
main()
| 37.981132 | 119 | 0.73075 |
4a2159cf211357e466196cf58da00274fd20a30b
| 44 |
py
|
Python
|
students/intro_set.py
|
jonleopard/hacktoberfest-1
|
00b5dabd33aaac1dbde273af6e882769b564df3b
|
[
"MIT"
] | 35 |
2018-10-06T07:32:31.000Z
|
2021-10-08T06:27:40.000Z
|
students/intro_set.py
|
jonleopard/hacktoberfest-1
|
00b5dabd33aaac1dbde273af6e882769b564df3b
|
[
"MIT"
] | 26 |
2018-10-03T09:32:56.000Z
|
2020-05-25T20:27:16.000Z
|
students/intro_set.py
|
jonleopard/hacktoberfest-1
|
00b5dabd33aaac1dbde273af6e882769b564df3b
|
[
"MIT"
] | 239 |
2018-10-03T09:09:57.000Z
|
2021-10-20T16:43:56.000Z
|
list_new=[1,2,2,4,4,4]
print(set(list_new))
| 14.666667 | 22 | 0.681818 |
4a215a02ecdce9ac13c3dd8873a39e4cfc859ad6
| 29,316 |
py
|
Python
|
repos/insightface/src/data.py
|
batermj/DeepVideoAnalytics
|
daad116b87370fce1799b7948af73b92f617cf41
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | 1 |
2021-03-23T12:31:59.000Z
|
2021-03-23T12:31:59.000Z
|
repos/insightface/src/data.py
|
batermj/DeepVideoAnalytics
|
daad116b87370fce1799b7948af73b92f617cf41
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null |
repos/insightface/src/data.py
|
batermj/DeepVideoAnalytics
|
daad116b87370fce1799b7948af73b92f617cf41
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import logging
import sys
import sklearn
import datetime
import numpy as np
import cv2
import mxnet as mx
from mxnet import ndarray as nd
#from . import _ndarray_internal as _internal
#from mxnet._ndarray_internal import _cvimresize as imresize
#from ._ndarray_internal import _cvcopyMakeBorder as copyMakeBorder
from mxnet import io
from mxnet import recordio
sys.path.append(os.path.join(os.path.dirname(__file__), 'common'))
import face_preprocess
import multiprocessing
logger = logging.getLogger()
def pick_triplets_impl(q_in, q_out):
more = True
while more:
deq = q_in.get()
if deq is None:
more = False
else:
embeddings, emb_start_idx, nrof_images, alpha = deq
print('running', emb_start_idx, nrof_images, os.getpid())
for j in xrange(1,nrof_images):
a_idx = emb_start_idx + j - 1
neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
for pair in xrange(j, nrof_images): # For every possible positive pair.
p_idx = emb_start_idx + pair
pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN
all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection
#all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction
nrof_random_negs = all_neg.shape[0]
if nrof_random_negs>0:
rnd_idx = np.random.randint(nrof_random_negs)
n_idx = all_neg[rnd_idx]
#triplets.append( (a_idx, p_idx, n_idx) )
q_out.put( (a_idx, p_idx, n_idx) )
#emb_start_idx += nrof_images
print('exit',os.getpid())
class FaceImageIter(io.DataIter):
def __init__(self, batch_size, data_shape,
path_imgrec = None,
shuffle=False, aug_list=None, mean = None,
rand_mirror = False,
ctx_num = 0, images_per_identity = 0, data_extra = None, hard_mining = False,
triplet_params = None, coco_mode = False,
mx_model = None,
data_name='data', label_name='softmax_label', **kwargs):
super(FaceImageIter, self).__init__()
assert path_imgrec
if path_imgrec:
logging.info('loading recordio %s...',
path_imgrec)
path_imgidx = path_imgrec[0:-4]+".idx"
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
s = self.imgrec.read_idx(0)
header, _ = recordio.unpack(s)
if header.flag>0:
print('header0 label', header.label)
self.header0 = (int(header.label[0]), int(header.label[1]))
#assert(header.flag==1)
self.imgidx = range(1, int(header.label[0]))
self.id2range = {}
self.seq_identity = range(int(header.label[0]), int(header.label[1]))
for identity in self.seq_identity:
s = self.imgrec.read_idx(identity)
header, _ = recordio.unpack(s)
#print('flag', header.flag)
#print(header.label)
#assert(header.flag==2)
self.id2range[identity] = (int(header.label[0]), int(header.label[1]))
print('id2range', len(self.id2range))
else:
self.imgidx = list(self.imgrec.keys)
if shuffle:
self.seq = self.imgidx
self.oseq = self.imgidx
else:
self.seq = None
self.mean = mean
self.nd_mean = None
if self.mean:
self.mean = np.array(self.mean, dtype=np.float32).reshape(1,1,3)
self.nd_mean = mx.nd.array(self.mean).reshape((1,1,3))
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
self.batch_size = batch_size
self.data_shape = data_shape
self.shuffle = shuffle
self.image_size = '%d,%d'%(data_shape[1],data_shape[2])
self.rand_mirror = rand_mirror
print('rand_mirror', rand_mirror)
#self.cast_aug = mx.image.CastAug()
#self.color_aug = mx.image.ColorJitterAug(0.4, 0.4, 0.4)
self.ctx_num = ctx_num
self.per_batch_size = int(self.batch_size/self.ctx_num)
self.images_per_identity = images_per_identity
if self.images_per_identity>0:
self.identities = int(self.per_batch_size/self.images_per_identity)
self.per_identities = self.identities
self.repeat = 3000000.0/(self.images_per_identity*len(self.id2range))
self.repeat = int(self.repeat)
print(self.images_per_identity, self.identities, self.repeat)
self.data_extra = None
if data_extra is not None:
self.data_extra = nd.array(data_extra)
self.provide_data = [(data_name, (batch_size,) + data_shape), ('extra', data_extra.shape)]
self.hard_mining = hard_mining
self.mx_model = mx_model
if self.hard_mining:
assert self.images_per_identity>0
assert self.mx_model is not None
self.triplet_params = triplet_params
self.triplet_mode = False
self.coco_mode = coco_mode
if len(label_name)>0:
self.provide_label = [(label_name, (batch_size,))]
else:
self.provide_label = []
if self.coco_mode:
assert self.triplet_params is None
assert self.images_per_identity>0
if self.triplet_params is not None:
assert self.images_per_identity>0
assert self.mx_model is not None
self.triplet_bag_size = self.triplet_params[0]
self.triplet_alpha = self.triplet_params[1]
self.triplet_max_ap = self.triplet_params[2]
assert self.triplet_bag_size>0
assert self.triplet_alpha>=0.0
assert self.triplet_alpha<=1.0
self.triplet_mode = True
self.triplet_oseq_cur = 0
self.triplet_oseq_reset()
self.seq_min_size = self.batch_size*2
self.cur = 0
self.is_init = False
self.times = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#self.reset()
def ____pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
people_per_batch = len(nrof_images_per_class)
nrof_threads = 8
q_in = multiprocessing.Queue()
q_out = multiprocessing.Queue()
processes = [multiprocessing.Process(target=pick_triplets_impl, args=(q_in, q_out)) \
for i in range(nrof_threads)]
for p in processes:
p.start()
# VGG Face: Choosing good triplets is crucial and should strike a balance between
# selecting informative (i.e. challenging) examples and swamping training with examples that
# are too hard. This is achieve by extending each pair (a, p) to a triplet (a, p, n) by sampling
# the image n at random, but only between the ones that violate the triplet loss margin. The
# latter is a form of hard-negative mining, but it is not as aggressive (and much cheaper) than
# choosing the maximally violating example, as often done in structured output learning.
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
job = (embeddings, emb_start_idx, nrof_images, self.triplet_alpha)
emb_start_idx+=nrof_images
q_in.put(job)
for i in xrange(nrof_threads):
q_in.put(None)
print('joining')
for p in processes:
p.join()
print('joined')
q_out.put(None)
triplets = []
more = True
while more:
triplet = q_out.get()
if triplet is None:
more = False
else:
triplets.append(triplets)
np.random.shuffle(triplets)
return triplets
#cal pairwise dists on single gpu
def _pairwise_dists(self, embeddings):
nd_embedding = mx.nd.array(embeddings, mx.gpu(0))
pdists = []
for idx in xrange(embeddings.shape[0]):
a_embedding = nd_embedding[idx]
body = mx.nd.broadcast_sub(a_embedding, nd_embedding)
body = body*body
body = mx.nd.sum_axis(body, axis=1)
ret = body.asnumpy()
#print(ret.shape)
pdists.append(ret)
return pdists
def pairwise_dists(self, embeddings):
nd_embedding_list = []
for i in xrange(self.ctx_num):
nd_embedding = mx.nd.array(embeddings, mx.gpu(i))
nd_embedding_list.append(nd_embedding)
nd_pdists = []
pdists = []
for idx in xrange(embeddings.shape[0]):
emb_idx = idx%self.ctx_num
nd_embedding = nd_embedding_list[emb_idx]
a_embedding = nd_embedding[idx]
body = mx.nd.broadcast_sub(a_embedding, nd_embedding)
body = body*body
body = mx.nd.sum_axis(body, axis=1)
nd_pdists.append(body)
if len(nd_pdists)==self.ctx_num or idx==embeddings.shape[0]-1:
for x in nd_pdists:
pdists.append(x.asnumpy())
nd_pdists = []
return pdists
def pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
triplets = []
people_per_batch = len(nrof_images_per_class)
#self.time_reset()
pdists = self.pairwise_dists(embeddings)
#self.times[3] += self.time_elapsed()
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
for j in xrange(1,nrof_images):
#self.time_reset()
a_idx = emb_start_idx + j - 1
#neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
neg_dists_sqr = pdists[a_idx]
#self.times[3] += self.time_elapsed()
for pair in xrange(j, nrof_images): # For every possible positive pair.
p_idx = emb_start_idx + pair
#self.time_reset()
pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
#self.times[4] += self.time_elapsed()
#self.time_reset()
neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN
if self.triplet_max_ap>0.0:
if pos_dist_sqr>self.triplet_max_ap:
continue
all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<self.triplet_alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection
#self.times[5] += self.time_elapsed()
#self.time_reset()
#all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction
nrof_random_negs = all_neg.shape[0]
if nrof_random_negs>0:
rnd_idx = np.random.randint(nrof_random_negs)
n_idx = all_neg[rnd_idx]
triplets.append( (a_idx, p_idx, n_idx) )
emb_start_idx += nrof_images
np.random.shuffle(triplets)
return triplets
def __pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
triplets = []
people_per_batch = len(nrof_images_per_class)
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
if nrof_images<2:
continue
for j in xrange(1,nrof_images):
a_idx = emb_start_idx + j - 1
pcount = nrof_images-1
dists_a2all = np.sum(np.square(embeddings[a_idx] - embeddings), 1) #(N,)
#print(a_idx, dists_a2all.shape)
ba = emb_start_idx
bb = emb_start_idx+nrof_images
sorted_idx = np.argsort(dists_a2all)
#print('assert', sorted_idx[0], a_idx)
#assert sorted_idx[0]==a_idx
#for idx in sorted_idx:
# print(idx, dists_a2all[idx])
p2n_map = {}
pfound = 0
for idx in sorted_idx:
if idx==a_idx: #is anchor
continue
if idx<bb and idx>=ba: #is pos
p2n_map[idx] = [dists_a2all[idx], []] #ap, [neg_list]
pfound+=1
else: # is neg
an = dists_a2all[idx]
if pfound==pcount and len(p2n_map)==0:
break
to_del = []
for p_idx in p2n_map:
v = p2n_map[p_idx]
an_ap = an - v[0]
if an_ap<self.triplet_alpha:
v[1].append(idx)
else:
#output
if len(v[1])>0:
n_idx = random.choice(v[1])
triplets.append( (a_idx, p_idx, n_idx) )
to_del.append(p_idx)
for _del in to_del:
del p2n_map[_del]
for p_idx,v in p2n_map.iteritems():
if len(v[1])>0:
n_idx = random.choice(v[1])
triplets.append( (a_idx, p_idx, n_idx) )
emb_start_idx += nrof_images
np.random.shuffle(triplets)
return triplets
def triplet_oseq_reset(self):
#reset self.oseq by identities seq
self.triplet_oseq_cur = 0
ids = []
for k in self.id2range:
ids.append(k)
random.shuffle(ids)
self.oseq = []
for _id in ids:
v = self.id2range[_id]
_list = range(*v)
random.shuffle(_list)
if len(_list)>self.images_per_identity:
_list = _list[0:self.images_per_identity]
self.oseq += _list
print('oseq', len(self.oseq))
def time_reset(self):
self.time_now = datetime.datetime.now()
def time_elapsed(self):
time_now = datetime.datetime.now()
diff = time_now - self.time_now
return diff.total_seconds()
def select_triplets(self):
self.seq = []
while len(self.seq)<self.seq_min_size:
self.time_reset()
embeddings = None
bag_size = self.triplet_bag_size
batch_size = self.batch_size
#data = np.zeros( (bag_size,)+self.data_shape )
#label = np.zeros( (bag_size,) )
tag = []
#idx = np.zeros( (bag_size,) )
print('eval %d images..'%bag_size, self.triplet_oseq_cur)
print('triplet time stat', self.times)
if self.triplet_oseq_cur+bag_size>len(self.oseq):
self.triplet_oseq_reset()
print('eval %d images..'%bag_size, self.triplet_oseq_cur)
self.times[0] += self.time_elapsed()
self.time_reset()
#print(data.shape)
data = nd.zeros( self.provide_data[0][1] )
label = nd.zeros( self.provide_label[0][1] )
ba = 0
while True:
bb = min(ba+batch_size, bag_size)
if ba>=bb:
break
#_batch = self.data_iter.next()
#_data = _batch.data[0].asnumpy()
#print(_data.shape)
#_label = _batch.label[0].asnumpy()
#data[ba:bb,:,:,:] = _data
#label[ba:bb] = _label
for i in xrange(ba, bb):
_idx = self.oseq[i+self.triplet_oseq_cur]
s = self.imgrec.read_idx(_idx)
header, img = recordio.unpack(s)
img = self.imdecode(img)
data[i-ba][:] = self.postprocess_data(img)
label[i-ba][:] = header.label
tag.append( ( int(header.label), _idx) )
#idx[i] = _idx
db = mx.io.DataBatch(data=(data,), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
#print('eval for selecting triplets',ba,bb)
#print(net_out)
#print(len(net_out))
#print(net_out[0].asnumpy())
net_out = net_out[0].asnumpy()
#print(net_out)
#print('net_out', net_out.shape)
if embeddings is None:
embeddings = np.zeros( (bag_size, net_out.shape[1]))
embeddings[ba:bb,:] = net_out
ba = bb
assert len(tag)==bag_size
self.triplet_oseq_cur+=bag_size
embeddings = sklearn.preprocessing.normalize(embeddings)
self.times[1] += self.time_elapsed()
self.time_reset()
nrof_images_per_class = [1]
for i in xrange(1, bag_size):
if tag[i][0]==tag[i-1][0]:
nrof_images_per_class[-1]+=1
else:
nrof_images_per_class.append(1)
triplets = self.pick_triplets(embeddings, nrof_images_per_class) # shape=(T,3)
print('found triplets', len(triplets))
ba = 0
while True:
bb = ba+self.per_batch_size//3
if bb>len(triplets):
break
_triplets = triplets[ba:bb]
for i in xrange(3):
for triplet in _triplets:
_pos = triplet[i]
_idx = tag[_pos][1]
self.seq.append(_idx)
ba = bb
self.times[2] += self.time_elapsed()
def triplet_reset(self):
self.select_triplets()
def hard_mining_reset(self):
#import faiss
from annoy import AnnoyIndex
data = nd.zeros( self.provide_data[0][1] )
label = nd.zeros( self.provide_label[0][1] )
#label = np.zeros( self.provide_label[0][1] )
X = None
ba = 0
batch_num = 0
while ba<len(self.oseq):
batch_num+=1
if batch_num%10==0:
print('loading batch',batch_num, ba)
bb = min(ba+self.batch_size, len(self.oseq))
_count = bb-ba
for i in xrange(_count):
idx = self.oseq[i+ba]
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
img = self.imdecode(img)
data[i][:] = self.postprocess_data(img)
label[i][:] = header.label
db = mx.io.DataBatch(data=(data,self.data_extra), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
embedding = net_out[0].asnumpy()
nembedding = sklearn.preprocessing.normalize(embedding)
if _count<self.batch_size:
nembedding = nembedding[0:_count,:]
if X is None:
X = np.zeros( (len(self.id2range), nembedding.shape[1]), dtype=np.float32 )
nplabel = label.asnumpy()
for i in xrange(_count):
ilabel = int(nplabel[i])
#print(ilabel, ilabel.__class__)
X[ilabel] += nembedding[i]
ba = bb
X = sklearn.preprocessing.normalize(X)
d = X.shape[1]
t = AnnoyIndex(d, metric='euclidean')
for i in xrange(X.shape[0]):
t.add_item(i, X[i])
print('start to build index')
t.build(20)
print(X.shape)
k = self.per_identities
self.seq = []
for i in xrange(X.shape[0]):
nnlist = t.get_nns_by_item(i, k)
assert nnlist[0]==i
for _label in nnlist:
assert _label<len(self.id2range)
_id = self.header0[0]+_label
v = self.id2range[_id]
_list = range(*v)
if len(_list)<self.images_per_identity:
random.shuffle(_list)
else:
_list = np.random.choice(_list, self.images_per_identity, replace=False)
for i in xrange(self.images_per_identity):
_idx = _list[i%len(_list)]
self.seq.append(_idx)
#faiss_params = [20,5]
#quantizer = faiss.IndexFlatL2(d) # the other index
#index = faiss.IndexIVFFlat(quantizer, d, faiss_params[0], faiss.METRIC_L2)
#assert not index.is_trained
#index.train(X)
#index.add(X)
#assert index.is_trained
#print('trained')
#index.nprobe = faiss_params[1]
#D, I = index.search(X, k) # actual search
#print(I.shape)
#self.seq = []
#for i in xrange(I.shape[0]):
# #assert I[i][0]==i
# for j in xrange(k):
# _label = I[i][j]
# assert _label<len(self.id2range)
# _id = self.header0[0]+_label
# v = self.id2range[_id]
# _list = range(*v)
# if len(_list)<self.images_per_identity:
# random.shuffle(_list)
# else:
# _list = np.random.choice(_list, self.images_per_identity, replace=False)
# for i in xrange(self.images_per_identity):
# _idx = _list[i%len(_list)]
# self.seq.append(_idx)
def reset(self):
"""Resets the iterator to the beginning of the data."""
print('call reset()')
self.cur = 0
if self.images_per_identity>0:
if self.triplet_mode:
self.triplet_reset()
elif not self.hard_mining:
self.seq = []
idlist = []
for _id,v in self.id2range.iteritems():
idlist.append((_id,range(*v)))
for r in xrange(self.repeat):
if r%10==0:
print('repeat', r)
if self.shuffle:
random.shuffle(idlist)
for item in idlist:
_id = item[0]
_list = item[1]
#random.shuffle(_list)
if len(_list)<self.images_per_identity:
random.shuffle(_list)
else:
_list = np.random.choice(_list, self.images_per_identity, replace=False)
for i in xrange(self.images_per_identity):
_idx = _list[i%len(_list)]
self.seq.append(_idx)
else:
self.hard_mining_reset()
print('seq len', len(self.seq))
else:
if self.shuffle:
random.shuffle(self.seq)
if self.seq is None and self.imgrec is not None:
self.imgrec.reset()
def num_samples(self):
return len(self.seq)
def next_sample(self):
"""Helper function for reading in next sample."""
#set total batch size, for example, 1800, and maximum size for each people, for example 45
if self.seq is not None:
if self.cur >= len(self.seq):
raise StopIteration
idx = self.seq[self.cur]
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
return header.label, img, None, None
else:
label, fname, bbox, landmark = self.imglist[idx]
return label, self.read_image(fname), bbox, landmark
else:
s = self.imgrec.read()
if s is None:
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img, None, None
def brightness_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
src *= alpha
return src
def contrast_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
src *= alpha
src += gray
return src
def saturation_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = np.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
def color_aug(self, img, x):
augs = [self.brightness_aug, self.contrast_aug, self.saturation_aug]
random.shuffle(augs)
for aug in augs:
#print(img.shape)
img = aug(img, x)
#print(img.shape)
return img
def mirror_aug(self, img):
_rd = random.randint(0,1)
if _rd==1:
for c in xrange(img.shape[2]):
img[:,:,c] = np.fliplr(img[:,:,c])
return img
def next(self):
if not self.is_init:
self.reset()
self.is_init = True
"""Returns the next batch of data."""
#print('in next', self.cur, self.labelcur)
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.empty((batch_size, c, h, w))
if self.provide_label is not None:
batch_label = nd.empty(self.provide_label[0][1])
i = 0
try:
while i < batch_size:
label, s, bbox, landmark = self.next_sample()
_data = self.imdecode(s)
if self.rand_mirror:
_rd = random.randint(0,1)
if _rd==1:
_data = mx.ndarray.flip(data=_data, axis=1)
if self.nd_mean is not None:
_data = _data.astype('float32')
_data -= self.nd_mean
_data *= 0.0078125
#_npdata = _data.asnumpy()
#if landmark is not None:
# _npdata = face_preprocess.preprocess(_npdata, bbox = bbox, landmark=landmark, image_size=self.image_size)
#if self.rand_mirror:
# _npdata = self.mirror_aug(_npdata)
#if self.mean is not None:
# _npdata = _npdata.astype(np.float32)
# _npdata -= self.mean
# _npdata *= 0.0078125
#nimg = np.zeros(_npdata.shape, dtype=np.float32)
#nimg[self.patch[1]:self.patch[3],self.patch[0]:self.patch[2],:] = _npdata[self.patch[1]:self.patch[3], self.patch[0]:self.patch[2], :]
#_data = mx.nd.array(nimg)
data = [_data]
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
#print('aa',data[0].shape)
#data = self.augmentation_transform(data)
#print('bb',data[0].shape)
for datum in data:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
#print(datum.shape)
batch_data[i][:] = self.postprocess_data(datum)
if self.provide_label is not None:
if not self.coco_mode:
batch_label[i][:] = label
else:
batch_label[i][:] = (i%self.per_batch_size)//self.images_per_identity
i += 1
except StopIteration:
if i<batch_size:
raise StopIteration
#print('next end', batch_size, i)
_label = None
if self.provide_label is not None:
_label = [batch_label]
if self.data_extra is not None:
return io.DataBatch([batch_data, self.data_extra], _label, batch_size - i)
else:
return io.DataBatch([batch_data], _label, batch_size - i)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
img = mx.image.imdecode(s) #mx.ndarray
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = [ret for src in data for ret in aug(src)]
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
class FaceImageIterList(io.DataIter):
def __init__(self, iter_list):
assert len(iter_list)>0
self.provide_data = iter_list[0].provide_data
self.provide_label = iter_list[0].provide_label
self.iter_list = iter_list
self.cur_iter = None
def reset(self):
self.cur_iter.reset()
def next(self):
self.cur_iter = random.choice(self.iter_list)
while True:
try:
ret = self.cur_iter.next()
except StopIteration:
self.cur_iter.reset()
continue
return ret
| 38.27154 | 151 | 0.566517 |
4a215a5db8f927e389e10120934e5b5c717711e4
| 1,275 |
py
|
Python
|
backend/users/models.py
|
Dragonfly-Co/django-shop-sample
|
154cc3510767c468b76fbf98b501aab044c07e71
|
[
"MIT"
] | null | null | null |
backend/users/models.py
|
Dragonfly-Co/django-shop-sample
|
154cc3510767c468b76fbf98b501aab044c07e71
|
[
"MIT"
] | null | null | null |
backend/users/models.py
|
Dragonfly-Co/django-shop-sample
|
154cc3510767c468b76fbf98b501aab044c07e71
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import AbstractUser
from django.db import models
def _get_image_directory_path(self, instance) -> str:
# file will be uploaded to MEDIA_ROOT / user_<id>_<username>
return f'user_{instance.user.id}_{instance.user.username}'
class GenderChoices(models.TextChoices):
MALE = 'Male'
FEMALE = 'female'
class User(AbstractUser):
middle_initial = models.CharField(null=True, max_length=250)
birthday = models.DateField(null=True, blank=False)
phone = models.CharField(null=True, max_length=50)
gender = models.CharField(choices=GenderChoices.choices, max_length=50, null=True, blank=False)
country = models.CharField(null=False, blank=False, max_length=100, default='Russia') # TODO: create choices
region = models.CharField(null=True, max_length=250)
city = models.CharField(null=True, max_length=250)
address_line = models.TextField(null=True)
image = models.URLField(null=True) # TODO: create like in other project
# image = ImageField(upload_to=_get_image_directory_path)
@property
def get_user_full_name(self) -> str:
if self.middle_initial is None:
return self.get_full_name()
return f'{self.last_name} {self.first_name} {self.middle_initial}'
| 39.84375 | 113 | 0.73098 |
4a215a7c9bd152778c8e63f11e79876093746094
| 13,766 |
py
|
Python
|
alfworld/agents/graph_map/slam_map.py
|
roy402/VSGM
|
15844232b8eebb5ec7301a9a48a3210925114da9
|
[
"MIT"
] | 6 |
2021-05-22T15:33:42.000Z
|
2022-01-12T03:34:39.000Z
|
alfworld/agents/graph_map/slam_map.py
|
roy402/VSGM
|
15844232b8eebb5ec7301a9a48a3210925114da9
|
[
"MIT"
] | 1 |
2021-06-19T10:04:13.000Z
|
2021-06-20T03:37:23.000Z
|
alfworld/agents/graph_map/slam_map.py
|
roy402/VSGM
|
15844232b8eebb5ec7301a9a48a3210925114da9
|
[
"MIT"
] | null | null | null |
import torch
from sys import platform
if platform != "win32":
from torch_geometric.data import Data
else:
import open3d as o3d
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import quaternion
from scipy.spatial.transform import Rotation
sys.path.insert(0, os.path.join(os.environ['ALFWORLD_ROOT']))
from agents.graph_map.utils_graph_map import *#intrinsic_from_fov, load_extrinsic, load_intrinsic, pixel_coord_np, grid, get_cam_coords
from agents.graph_map.graph_map import BasicMap, test_load_img, test_load_meta_data
'''
Neural-SLAM
'''
sys.path.insert(0, os.path.join(os.environ['ALFWORLD_ROOT']))
from agents.graph_map.Neural_SLAM.env.habitat.utils import visualizations as vu
from agents.graph_map.Neural_SLAM.env.habitat.utils import pose as pu
# 10x10xcfg.GRAPH_MAP.GRAPH_MAP_SIZE_S
# self.map.activate_nodes = set()
class SlamMAP(BasicMap):
def __init__(self, cfg, device="cuda"):
super().__init__()
self.cfg = cfg
self.device = device
self.GPU = cfg.SCENE_GRAPH.GPU
self.map_size_cm = self.cfg.SLAM_MAP.map_size_cm
self.EMBED_FEATURE_SIZE = cfg.SCENE_GRAPH.EMBED_FEATURE_SIZE
self.count_episode = 0
self.timestep = 0
self.mapper = self.build_mapper()
self.reset_map()
self.figure, self.ax = plt.subplots(
3, 1, figsize=(4, 6*16/9),
facecolor="whitesmoke",
num="Thread 0")
self.net_map_embedding = self._create_map_embedding_model()
self.net_map_embedding.to(device)
def build_mapper(self):
from agents.graph_map.Neural_SLAM.env.utils.map_builder import MapBuilder
params = {}
params['frame_width'] = self.cfg.SLAM_MAP.env_frame_width
params['frame_height'] = self.cfg.SLAM_MAP.env_frame_height
params['fov'] = self.cfg.SLAM_MAP.hfov
params['resolution'] = self.cfg.SLAM_MAP.map_resolution
params['map_size_cm'] = self.cfg.SLAM_MAP.map_size_cm
params['agent_min_z'] = 25
params['agent_max_z'] = 150
params['agent_height'] = self.cfg.SLAM_MAP.camera_height * 100
params['agent_view_angle'] = 0
params['du_scale'] = self.cfg.SLAM_MAP.du_scale
params['vision_range'] = self.cfg.SLAM_MAP.vision_range
params['visualize'] = False
params['obs_threshold'] = self.cfg.SLAM_MAP.obs_threshold
mapper = MapBuilder(params)
return mapper
def _create_map_embedding_model(self):
from agents.graph_map.Neural_SLAM.model import Global_Policy
self.global_downscaling = self.cfg.SLAM_MAP.global_downscaling
map_size = self.cfg.SLAM_MAP.map_size_cm // self.cfg.SLAM_MAP.map_resolution
full_w, full_h = map_size, map_size
local_w, local_h = int(full_w / self.cfg.SLAM_MAP.global_downscaling),\
int(full_h / self.cfg.SLAM_MAP.global_downscaling)
return Global_Policy((1, local_w, local_h), out_shape=self.EMBED_FEATURE_SIZE)
def reset_map(self):
self.curr_loc = [self.map_size_cm/100.0/2.0,
self.map_size_cm/100.0/2.0, 0.]
self.curr_loc_gt = self.curr_loc
self.last_loc_gt = self.curr_loc_gt
self.mapper.reset_map(self.map_size_cm)
self.map = self.mapper.map
self.collison_map = np.zeros(self.map.shape[:2])
self.visited_gt = np.zeros(self.map.shape[:2])
full_map_size = self.cfg.SLAM_MAP.map_size_cm//self.cfg.SLAM_MAP.map_resolution
self.explorable_map = np.zeros((full_map_size, full_map_size))
self.count_episode += 1
self.timestep = 0
self.last_sim_location = None
# # Convert pose to cm and degrees for mapper
# mapper_gt_pose = (self.curr_loc_gt[0]*100.0,
# self.curr_loc_gt[1]*100.0,
# np.deg2rad(self.curr_loc_gt[2]))
# # Update ground_truth map and explored area
# fp_proj, self.map, fp_explored, self.explored_map = \
# self.mapper.update_map(depth, mapper_gt_pose)
def update_map(self, depth_image, agent_meta, sgg_result):
self.timestep += 1
# Get base sensor and ground-truth pose
dx_gt, dy_gt, do_gt = self.get_gt_pose_change(agent_meta)
self.curr_loc_gt = pu.get_new_pose(self.curr_loc_gt, (dx_gt, dy_gt, do_gt))
self.last_loc_gt = np.copy(self.curr_loc_gt)
# Convert pose to cm and degrees for mapper
mapper_gt_pose = (self.curr_loc_gt[0]*100.0,
self.curr_loc_gt[1]*100.0,
np.deg2rad(self.curr_loc_gt[2]))
# Update ground_truth map and explored area
agent_view_degrees = agent_meta["cameraHorizon"]
self.mapper.agent_view_angle = agent_view_degrees
'''
depth process
'''
depth_image = depth_image[:, :, 0]
fp_proj, self.map, fp_explored, self.explored_map = \
self.mapper.update_map(depth_image, mapper_gt_pose)
# torch.Size([1, 1, 480, 480])
map_tensor = torch.tensor([[self.map]], dtype=torch.float).to(device=self.device)
self.map_feature = self.net_map_embedding(map_tensor)
'''
visualize
'''
# self.visualize_graph_map(depth_image)
return self.map_feature
def get_sim_location(self, agent_meta):
x, z, y = \
agent_meta['position']['x'], agent_meta['position']['y'], agent_meta['position']['z']
rotation_x, rotation_y, rotation_z = \
agent_meta["rotation"]["x"], agent_meta["rotation"]["z"], agent_meta["rotation"]["y"]
# rotation = np.quaternion(0.999916136264801, 0, 0.0132847428321838, 0)
quat = Rotation.from_euler('xyz', [rotation_x, rotation_y, rotation_z], degrees=True)
# import pdb ;pdb.set_trace()
rotation = np.quaternion(*quat.as_quat())
axis = quaternion.as_euler_angles(rotation)[0]
if (axis % (2*np.pi)) < 0.1 or (axis % (2*np.pi)) > 2*np.pi - 0.1:
o = quaternion.as_euler_angles(rotation)[1]
else:
o = 2*np.pi - quaternion.as_euler_angles(rotation)[1]
if o > np.pi:
o -= 2 * np.pi
return x, y, o
def get_gt_pose_change(self, agent_meta):
curr_sim_pose = self.get_sim_location(agent_meta)
if self.last_sim_location is None:
self.last_sim_location = curr_sim_pose
dx, dy, do = pu.get_rel_pose_change(curr_sim_pose, self.last_sim_location)
self.last_sim_location = curr_sim_pose
return dx, dy, do
def _get_short_term_goal(self):
# # Update collision map
# if action == 1:
# x1, y1, t1 = self.last_loc
# x2, y2, t2 = self.curr_loc
# if abs(x1 - x2)< 0.05 and abs(y1 - y2) < 0.05:
# self.col_width += 2
# self.col_width = min(self.col_width, 9)
# else:
# self.col_width = 1
# dist = pu.get_l2_distance(x1, x2, y1, y2)
# if dist < args.collision_threshold: #Collision
# length = 2
# width = self.col_width
# buf = 3
# for i in range(length):
# for j in range(width):
# wx = x1 + 0.05*((i+buf) * np.cos(np.deg2rad(t1)) + \
# (j-width//2) * np.sin(np.deg2rad(t1)))
# wy = y1 + 0.05*((i+buf) * np.sin(np.deg2rad(t1)) - \
# (j-width//2) * np.cos(np.deg2rad(t1)))
# r, c = wy, wx
# r, c = int(r*100/args.map_resolution), int(c*100/args.map_resolution)
# [r, c] = pu.threshold_poses([r, c], self.collison_map.shape)
# self.collison_map[r,c] = 1
# Get last loc ground truth pose
last_start_x, last_start_y = self.last_loc_gt[0], self.last_loc_gt[1]
r, c = last_start_y, last_start_x
last_start = [int(r * 100.0/self.cfg.SLAM_MAP.map_resolution),
int(c * 100.0/self.cfg.SLAM_MAP.map_resolution)]
last_start = pu.threshold_poses(last_start, self.visited_gt.shape)
# Get ground truth pose
start_x_gt, start_y_gt, start_o_gt = self.curr_loc_gt
r, c = start_y_gt, start_x_gt
start_gt = [int(r * 100.0/self.cfg.SLAM_MAP.map_resolution),
int(c * 100.0/self.cfg.SLAM_MAP.map_resolution)]
start_gt = pu.threshold_poses(start_gt, self.visited_gt.shape)
steps = 25
for i in range(steps):
x = int(last_start[0] + (start_gt[0] - last_start[0]) * (i+1) / steps)
y = int(last_start[1] + (start_gt[1] - last_start[1]) * (i+1) / steps)
self.visited_gt[x, y] = 1
def visualize_graph_map(self, rgb_img, depth_image):
self._get_short_term_goal()
dump_dir = "./slam_dump/"
ep_dir = '{}/{}/'.format(
dump_dir, self.count_episode)
if not os.path.exists(ep_dir):
os.makedirs(ep_dir)
# Get ground truth pose
start_x_gt, start_y_gt, start_o_gt = self.curr_loc_gt
goal_coor_didnt_use = (0, 0)
vis_grid = vu.get_colored_map(
self.map,
self.collison_map,
self.visited_gt,
self.visited_gt,
goal_coor_didnt_use,
self.explored_map,
self.explorable_map,
self.map*self.explored_map)
vis_grid = np.flipud(vis_grid)
vu.visualize(
self.figure, self.ax, rgb_img, depth_image, vis_grid[:, :, ::-1],
(start_x_gt, start_y_gt, start_o_gt),
(start_x_gt, start_y_gt, start_o_gt),
dump_dir, self.count_episode, self.timestep,
visualize=True, print_images=True, vis_style=0)
def main():
import yaml
import glob
import json
sys.path.insert(0, os.environ['ALFWORLD_ROOT'])
sys.path.insert(0, os.path.join(os.environ['ALFWORLD_ROOT'], 'agents'))
from agents.sgg import alfred_data_format
from config import cfg as _C
if sys.platform == "win32":
root = r"D:\cvml_project\projections\inverse_projection\data\d2\trial_T20190909_075955_678702\\"
root = r"D:\cvml_project\projections\inverse_projection\data\d2\trial_T20190909_100908_040512\\"
semantic_config_file = r"D:\alfred\alfred\models\config\sgg_without_oracle.yaml"
else:
root = r"/home/alfred/data/full_2.1.0/train/pick_and_place_simple-RemoteControl-None-Ottoman-208/trial_T20190909_100908_040512/"
semantic_config_file = "/home/alfred/models/config/graph_map.yaml"
def win():
nonlocal _C
config = _C
config.SLAM_MAP.map_resolution = 5
config.SLAM_MAP.map_size_cm = 2400
config.SLAM_MAP.map_size_cm = 800
config.SLAM_MAP.agent_max_z = 200
config.SLAM_MAP.vision_range = 64
# config.SLAM_MAP.vision_range = 128
alfred_dataset = alfred_data_format.AlfredDataset(config)
grap_map = SlamMAP(
config,
)
traj_data_path = root + "traj_data.json"
with open(traj_data_path, 'r') as f:
traj_data = json.load(f)
frames_depth = test_load_img(os.path.join(root, 'depth_images'), traj_data["images"], None).view(-1, 300, 300, 3)
frames_rgb = test_load_img(os.path.join(root, 'raw_images'), traj_data["images"], None, type_image=".jpg").view(-1, 300, 300, 3)
agent_meta_data = test_load_meta_data(root, traj_data["images"])
for i in range(len(frames_depth)):
depth_image = frames_depth[i]
rgb_img = frames_rgb[i]
agent_meta = agent_meta_data['agent_sgg_meta_data'][i]
# import pdb; pdb.set_trace()
target = None
feature = grap_map.update_map(
np.array(depth_image),
agent_meta,
target)
grap_map.visualize_graph_map(rgb_img, depth_image)
grap_map.reset_map()
def linux():
import time
start = time.time()
nonlocal _C
config = _C
config.merge_from_file(semantic_config_file)
# sgg model
sys.path.insert(0, os.environ['GRAPH_RCNN_ROOT'])
from lib.config import cfg
cfg.merge_from_file("/home/graph-rcnn.pytorch/configs/attribute.yaml")
config['sgg_cfg'] = cfg
alfred_dataset = alfred_data_format.AlfredDataset(config)
grap_map = SlamMAP(
config,
)
traj_data_path = root + "traj_data.json"
with open(traj_data_path, 'r') as f:
traj_data = json.load(f)
frames_depth = test_load_img(os.path.join(root, 'depth_images'), traj_data["images"], None).view(-1, 3, 300, 300)
frames_rgb = test_load_img(os.path.join(root, 'instance_masks'), traj_data["images"], alfred_dataset.trans_meta_data.transforms).view(-1, 3, 300, 300)
agent_meta_data = test_load_meta_data(root, traj_data["images"])
for i in range(len(frames_depth)):
depth_image = frames_depth[i]
rgb_image = frames_rgb[i]
agent_meta = agent_meta_data['agent_sgg_meta_data'][i]
# import pdb; pdb.set_trace()
feature = grap_map.update_map(
np.array(depth_image.view(300, 300, 3)),
agent_meta,
)
grap_map.visualize_graph_map(depth_image)
grap_map.reset_map()
# time
end = time.time()
print(end - start)
if sys.platform == "win32":
win()
else:
linux()
if __name__ == '__main__':
main()
| 42.619195 | 158 | 0.613613 |
4a215a9a2320467e5391d25656bae0b63d06b194
| 701 |
py
|
Python
|
app/bond/__init__.py
|
BoostryJP/ibet-Issuer
|
efc599f8784be06588cf3ad8f239d36f24fdf3fa
|
[
"Apache-2.0"
] | 1 |
2021-06-16T03:38:07.000Z
|
2021-06-16T03:38:07.000Z
|
app/bond/__init__.py
|
BoostryJP/ibet-Issuer
|
efc599f8784be06588cf3ad8f239d36f24fdf3fa
|
[
"Apache-2.0"
] | 17 |
2021-04-26T03:28:40.000Z
|
2021-11-24T07:15:55.000Z
|
app/bond/__init__.py
|
BoostryJP/ibet-Issuer
|
efc599f8784be06588cf3ad8f239d36f24fdf3fa
|
[
"Apache-2.0"
] | 1 |
2021-05-30T14:09:11.000Z
|
2021-05-30T14:09:11.000Z
|
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from flask import Blueprint
bond = Blueprint('bond', __name__, url_prefix='/bond')
from . import views
| 28.04 | 74 | 0.776034 |
4a215abd2e764a76be30195e943ba6a7744aa4ad
| 47,618 |
py
|
Python
|
tensorflow/python/framework/importer_test.py
|
JanX2/tensorflow
|
dd788dbbfa544c1ea4768940ac4300c22bb7e88e
|
[
"Apache-2.0"
] | 3 |
2017-12-04T07:45:22.000Z
|
2018-04-20T06:53:17.000Z
|
tensorflow/python/framework/importer_test.py
|
JanX2/tensorflow
|
dd788dbbfa544c1ea4768940ac4300c22bb7e88e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/framework/importer_test.py
|
JanX2/tensorflow
|
dd788dbbfa544c1ea4768940ac4300c22bb7e88e
|
[
"Apache-2.0"
] | 1 |
2020-05-14T06:13:24.000Z
|
2020-05-14T06:13:24.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
@test_util.with_c_api
class ImportGraphDefTest(test.TestCase):
def _MakeGraphDef(self,
text,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (producer,
min_consumer,
text)
ret = graph_pb2.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutputFloatOutput' }
node { name: 'B' op: 'ListOutput'
attr { key: 'T'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=["A", "B", "C", "D"],
name="import")
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, "IntOutputFloatOutput")
self.assertEqual(b.type, "ListOutput")
self.assertEqual(c.type, "ListInput")
self.assertEqual(d.type, "ListInput")
self.assertEqual(a.outputs[0].dtype, dtypes.int32)
self.assertEqual(a.outputs[1].dtype, dtypes.float32)
self.assertEqual(b.outputs[0].dtype, dtypes.int32)
self.assertEqual(b.outputs[1].dtype, dtypes.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, "import/A")
self.assertEqual(b.name, "import/B")
self.assertEqual(c.name, "import/C")
self.assertEqual(d.name, "import/D")
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
def testMultipleImport(self):
if ops._USE_C_API: return # TODO(skyewm): set uniquify_names
graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
""")
with ops.Graph().as_default():
# Initial import
a, b = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a.name, "A")
self.assertEqual(b.name, "B")
self.assertEqual(list(b.inputs), [a.outputs[0]])
# Repeat the same import
a1, b1 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a1.name, "A_1")
self.assertEqual(b1.name, "B_1")
self.assertEqual(list(b1.inputs), [a1.outputs[0]])
# Repeat the same import again
a2, b2 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a2.name, "A_2")
self.assertEqual(b2.name, "B_2")
self.assertEqual(list(b2.inputs), [a2.outputs[0]])
# Import with an already-used name
a3, b3 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="A")
self.assertEqual(a3.name, "A_3/A")
self.assertEqual(b3.name, "A_3/B")
self.assertEqual(list(b3.inputs), [a3.outputs[0]])
# Import with existing de-duped node names
a4, b4 = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A_1' op: 'IntOutput' }
node { name: 'B_1' op: 'IntInput' input: 'A_1:0' }
"""),
return_elements=["A_1", "B_1"],
name="")
self.assertEqual(a4.name, "A_1_1")
self.assertEqual(b4.name, "B_1_1")
self.assertEqual(list(b4.inputs), [a4.outputs[0]])
# Create a name scope and then import node with same name
with ops.name_scope("foo"):
constant_op.constant(1)
foo, = importer.import_graph_def(
self._MakeGraphDef("node { name: 'foo' op: 'IntOutput' }"),
return_elements=["foo"],
name="")
self.assertEqual(foo.name, "foo_1")
# Imported node name can't conflict with intermediate name scope (but can
# conflict with outer scope and full name scope)
with ops.name_scope("outer"):
with ops.name_scope("inner"):
c = constant_op.constant(1, name="c")
self.assertEqual(c.op.name, "outer/inner/c")
outer, inner, new_c, outer_inner, outer_inner_c = (
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'outer' op: 'IntOutput' }"
"node { name: 'inner' op: 'IntOutput' }"
"node { name: 'c' op: 'IntOutput' }"
"node { name: 'outer/inner' op: 'IntOutput' }"
"node { name: 'outer/inner/c' op: 'IntOutput' }"),
return_elements=["outer", "inner", "c", "outer/inner",
"outer/inner/c"],
name=""))
self.assertEqual(outer.name, "outer_1")
self.assertEqual(inner.name, "inner")
self.assertEqual(new_c.name, "c")
self.assertEqual(outer_inner.name, "outer/inner_1")
self.assertEqual(outer_inner_c.name, "outer/inner/c_1")
def testInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={"A:0": feed_a_0,
"B:1": feed_b_1},
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b"A:0": feed_a_0,
b"B:1": feed_b_1},
return_elements=[b"A", b"B", b"C", b"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u"A:0": feed_a_0,
u"B:1": feed_b_1},
return_elements=[u"A", u"B", u"C", u"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'IntInput' input: 'A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
"""),
input_map={"A": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'RefOutput' }
node { name: 'B' op: 'IntOutput' }
node { name: 'C' op: 'TwoIntInputs' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'RefInputIntInput' input: 'A:0' input: 'B:0' }
"""),
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, dtypes.int32_ref)
self.assertEqual(c._input_dtypes, [dtypes.int32, dtypes.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_dtypes, [dtypes.int32_ref, dtypes.int32])
self.assertEqual(d.outputs, [])
def testCyclic(self):
# Importing cycles not supported with C API enabled (this test will
# eventually be deleted).
# TODO(skyewm): write while loop test
if ops._USE_C_API: return
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'B:0' }
node { name: 'B' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'A:0' }
"""),
return_elements=["A", "B"])
self.assertEqual(a.inputs[0], b.outputs[0])
self.assertEqual(b.inputs[0], a.outputs[0])
def testTypeMismatchInGraphDef(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""))
self.assertTrue(
"Cannot convert a tensor of type int32 to an input of type float" in
str(e.exception))
def testShapeWhitelist(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
# Barrier's shape is an output vector of 2, but the
# graph says it's a scalar. This is currently whitelisted.
with ops.Graph().as_default():
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Barrier'
attr { key: '_output_shapes'
value { list { shape { } } } } }
"""),
return_elements=["A"],
name="import")
def testShapeWhitelistViolation(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
# L2 loss produces a scalar shape, but the graph
# has the wrong shape, so raise an error.
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'FloatOutput' }
node { name: 'B' op: 'L2Loss'
input: 'A:0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: '_output_shapes'
value { list { shape { dim { size: 43 } } } } } }
"""),
return_elements=["B"],
name="import")
self.assertTrue(
"Shapes () and (43,) are not compatible" in str(e.exception))
def testInvalidSignatureTooManyInputsInGraphDef(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'None' input: 'A:0' }
"""))
self.assertTrue("More inputs specified ('A:0') than the op expects" in
str(e.exception))
def testInvalidSignatureNotEnoughInputsInGraphDef(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInputFloatInput' input: 'A:0' }
"""))
self.assertTrue("Input types mismatch (expected 'int32, float32' but "
"got 'int32')" in str(e.exception))
def testMissingInputOpInGraphDef(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""))
self.assertTrue("Input tensor 'A:0' not found" in str(e.exception))
def testMissingInputOpInGraphDefButAppearsInInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(5.0)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""),
input_map={"A:0": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testMissingInputTensorInGraphDef(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'FloatOutput' }
node { name: 'B' op: 'FloatInput' input: 'A:1' }
"""))
self.assertTrue("Input tensor 'A:1' not found" in str(e.exception))
def testMissingControlInputInGraphDef(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: '^A' }
"""))
self.assertTrue("Control input '^A' not found" in str(e.exception))
def testInvalidTensorNameOutputIndexInGraphDef(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B' }
"""))
self.assertEqual("Cannot convert 'A:B' to a tensor name.",
str(e.exception))
def testInvalidTensorNameInGraphDef(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B:0' }
"""))
self.assertEqual("Cannot convert 'A:B:0' to a tensor name.",
str(e.exception))
def testMissingReturnOperation(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["B"])
self.assertTrue(
"return_element 'B' not found in graph_def." in str(e.exception))
def testMissingReturnTensor(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["A:1"])
self.assertTrue(
"return_element 'A:1' not found in graph_def." in str(e.exception))
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["B:0"])
self.assertTrue(
"return_element 'B:0' not found in graph_def." in str(e.exception))
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["A:B:0"])
self.assertTrue(
"return_element 'A:B:0' not found in graph_def." in str(e.exception))
def testMissingInputMap(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
input_map={"B:0": constant_op.constant(5.0)})
self.assertTrue("not found in graph_def: [B:0]" in str(e.exception))
def testInputMapUnusedAsInput(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
# Mapping an unused node output should succeed.
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
# Mapping a non-existent output of an existing node should fail.
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
input_map={"A:2": constant_op.constant(5.0)})
self.assertTrue("not found in graph_def: [A:2]" in str(e.exception))
def testInputMapTypeMismatch(self):
if ops._USE_C_API:
error_msg = ("Input 0 of node import/B was passed float from Const:0 "
"incompatible with expected int32.")
else:
error_msg = ("Cannot convert a tensor of type float32 to an input of "
"type int32.")
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
def testNoReturns(self):
with ops.Graph().as_default() as g:
ret = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""))
self.assertEqual(ret, None)
a = g.get_operation_by_name("import/A")
self.assertEqual(a.type, "None")
def testOverrideNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name="imported_graph")
self.assertEqual(a.name, "imported_graph/A")
def testDefaultNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name=None)
self.assertEqual(a.name, "import/A")
def testNamePrefixColocationAttrs(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
b, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
self.assertTrue("_class" in b.node_def.attr)
self.assertProtoEquals(
"list { s: 'loc:@imported_graph/A' }",
b.node_def.attr["_class"])
def testColocationWithDeviceFn(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
# A device function that places "A" on one device and "B" on
# another device. Because B is colocated with A, we test that B's
# device function is overridden by A.
def CustomDeviceFn(op):
if "A" in op.name:
return "/device:A:0"
else:
return "/device:B:0"
with ops.Graph().as_default():
with ops.device(CustomDeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "/device:A:0")
self.assertEqual(b.device, "/device:A:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
# Test a scenario where 'A' doesn't get a device; 'A' should not have a
# device, but during runtime will get colocated with 'B' because of the
# colocation attribute. B's device function is still overridden by A.
def BDeviceFn(op):
if "B" in op.name:
return "/device:B:0"
return ""
with ops.Graph().as_default():
with ops.device(BDeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "")
self.assertEqual(b.device, "")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
# Only A gets a device, so B inherits it implicitly.
def ADeviceFn(op):
if "A" in op.name:
return "/device:A:0"
return ""
with ops.Graph().as_default():
with ops.device(ADeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "/device:A:0")
self.assertEqual(b.device, "/device:A:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
def testMultipleColocationWithDeviceFn(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None'}
node { name: 'B' op: 'None'}
node { name: 'C' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' s: 'loc:@B' } }
} }""")
# A device function that places "B" on a device, and "A" is empty.
#
# B and C should contain "/device:B". A will not right now. But
# because of the colocation property, at runtime it would be
# placed with B and C.
def CustomDeviceFn(op):
if "B" in op.name:
return "/device:B:0"
return ""
with ops.Graph().as_default():
with ops.device(CustomDeviceFn):
a, b, c = importer.import_graph_def(original_graph_def,
return_elements=["A", "B", "C"],
name="imported_graph")
self.assertEqual(a.device, "")
self.assertEqual(b.device, "/device:B:0")
self.assertEqual(c.device, "/device:B:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/B"])
self.assertEqual(c.colocation_groups(),
[b"loc:@imported_graph/A", b"loc:@imported_graph/B"])
def testNamePrefixColocationAttrsMultipleImport(self):
if ops._USE_C_API: return # TODO(skyewm): set uniquify_names
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
b, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="")
_, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="")
self.assertProtoEqualsVersion("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'A_1' op: 'None' }
node { name: 'B_1' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A_1' } }
} }""", b.graph.as_graph_def())
def testNamePrefixColocationAttrsNotFound(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
if ops._USE_C_API:
error_msg = "Node 'B' expects to be colocated with unknown node 'A'"
else:
error_msg = "does not exist during import"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
def testEmptyGraph(self):
with ops.Graph().as_default() as g:
init_version = g.version
importer.import_graph_def(self._MakeGraphDef(""))
self.assertEqual(init_version, g.version)
def testInvalidInputForGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def("")
self.assertEqual("graph_def must be a GraphDef proto.", str(e.exception))
def testInvalidInputForInputMap(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def(
self._MakeGraphDef(""), input_map=[constant_op.constant(5.0)])
self.assertEqual("input_map must be a dictionary mapping strings to "
"Tensor objects.", str(e.exception))
graph_def = self._MakeGraphDef("""
node { name: 'a' op: 'Placeholder'
attr { key: 'dtype' value { type: DT_FLOAT } }}
node { name: 'id' op: 'Identity' input: 'a:0'
attr { key: 'T' value { type: DT_FLOAT } }}""")
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
graph_def,
input_map={"a:0": variables.Variable(5.0)},
name="")
self.assertStartsWith(str(e.exception),
"tf.import_graph_def() requires a non-empty `name` "
"if `input_map` contains non-Tensor values.")
with ops.Graph().as_default():
t, = importer.import_graph_def(
graph_def,
input_map={"a:0": constant_op.constant(5.0)},
name="",
return_elements=["id:0"])
with self.test_session():
self.assertEqual(5.0, t.eval())
def testInvalidInputForReturnOperations(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def(self._MakeGraphDef(""), return_elements=[7])
self.assertEqual("return_elements must be a list of strings.",
str(e.exception))
if ops._USE_C_API:
error_msg = "Cannot convert 'a:b:c' to a tensor name."
else:
error_msg = "Requested return_element 'a:b:c' not found in graph_def."
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(self._MakeGraphDef(""),
return_elements=["a:b:c"])
def testDuplicateOperationNames(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntOutput' }
node { name: 'A' op: 'IntOutput' }
"""))
self.assertEqual("Duplicate name 'A' in GraphDef.", str(e.exception))
def testWithExtensionAndAttr(self):
with ops.Graph().as_default() as g:
c = constant_op.constant(5.0, dtype=dtypes.float32, name="c")
array_ops.stack([c, c], name="pack")
gdef = g.as_graph_def()
with self.test_session():
pack, = importer.import_graph_def(gdef, return_elements=["pack"])
self.assertAllEqual(pack.outputs[0].eval(), [5.0, 5.0])
def testWithDevice(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default() as g:
# No device.
a = constant_op.constant(3.0, name="a")
with ops.device("/cpu:0"):
b = constant_op.constant(4.0, name="b")
with ops.device("/job:worker"):
c = constant_op.constant(5.0, name="c")
gdef = g.as_graph_def()
with ops.Graph().as_default():
a2, b2, c2 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual(a.device, a2.device)
self.assertEqual(b.device, b2.device)
self.assertEqual(c.device, c2.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/task:0")):
a3, b3, c3 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/task:0", a3.device)
self.assertEqual("/task:0/device:CPU:0", b3.device) # canonicalized.
self.assertEqual(c.device + "/task:0", c3.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/job:ps")):
a4, b4, c4 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/job:ps", a4.device)
self.assertEqual("/job:ps/device:CPU:0", b4.device) # canonicalized.
self.assertEqual(c.device, c4.device) # worker overrides ps.
with ops.Graph().as_default():
with ops.device(device.merge_device("/device:GPU:0")):
a5, b5, c5 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/device:GPU:0", a5.device)
self.assertEqual("/device:CPU:0", b5.device) # cpu overrides gpu.
self.assertEqual(c.device + "/device:GPU:0", c5.device)
def testWithDeviceFunctionDependingOnInputs(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default() as g:
with ops.device("/job:ps"):
v1 = constant_op.constant(1.0)
v2 = constant_op.constant(1.0)
_ = v1 + v2
_ = v1 - v2
_ = array_ops.identity(v1)
gdef = g.as_graph_def()
# We'll use the following device function to observe ops with two inputs.
ops_with_two_inputs = []
def InputCounter(op):
if len(op.inputs) == 2:
ops_with_two_inputs.append(op)
return ""
with ops.Graph().as_default() as g:
with ops.device(InputCounter):
importer.import_graph_def(gdef)
# We expect to see the add and subtract, but not identity.
self.assertEqual(2, len(ops_with_two_inputs))
def testGradient(self):
if ops._USE_C_API: return # TODO(skyewm): get_shape() doesn't work
with ops.Graph().as_default() as g:
inputs = array_ops.placeholder(
dtypes.float32, shape=[None, 100], name="input")
weights = array_ops.placeholder(
dtypes.float32, shape=[100, 10], name="weights")
biases = array_ops.placeholder(dtypes.float32, shape=[10], name="biases")
activations = nn_ops.relu(
math_ops.matmul(inputs, weights) + biases, name="activations")
loss = math_ops.reduce_mean(activations, name="loss")
gdef = g.as_graph_def()
with ops.Graph().as_default() as g:
input_placeholder = array_ops.placeholder(dtypes.float32, shape=[32, 100])
weights_var = variables.Variable(
random_ops.truncated_normal([100, 10]), name="weights")
biases_var = variables.Variable(array_ops.zeros([10]), name="biases")
activations, loss = importer.import_graph_def(
gdef,
input_map={
"input:0": input_placeholder,
"weights:0": weights_var,
"biases:0": biases_var
},
return_elements=["activations:0", "loss:0"])
self.assertEqual([32, 10], activations.get_shape())
self.assertEqual([], loss.get_shape())
weights_grad, biases_grad = gradients_impl.gradients(
loss, [weights_var, biases_var])
self.assertEqual([100, 10], weights_grad.get_shape())
self.assertEqual([10], biases_grad.get_shape())
def testLargeGraph(self):
with self.test_session():
# The default message byte limit is 64M. Ours is 2G with a warning at 512.
# Adding a 130M entries float32 tensor should exceed the warning, but not
# the hard limit.
input_shape = [130, 1000, 1000]
tensor_input = np.ones(input_shape, dtype=np.float32)
t = constant_op.constant(tensor_input, shape=input_shape)
g = array_ops.identity(t)
g.eval()
def testVersion(self):
v0 = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
v2 = versions.GRAPH_DEF_VERSION
v1 = (v0 + v2) // 2
for producer in v0, v1, v2:
for min_consumer in v0, v1, v2:
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'TwoIntOutputs' }",
producer=producer,
min_consumer=min_consumer),
return_elements=["A"])
self.assertEqual(a.graph.graph_def_versions.producer, producer)
self.assertEqual(a.graph.graph_def_versions.min_consumer,
min_consumer)
def testVersionLow(self):
with ops.Graph().as_default() as g:
pat = (r"GraphDef producer version -1 below min producer %d supported "
r"by TensorFlow \S+\. Please regenerate your graph.$" %
versions.GRAPH_DEF_VERSION_MIN_PRODUCER)
# C API throws error during import, Python-only throws error during run
if ops._USE_C_API:
with self.assertRaisesRegexp(Exception, pat):
importer.import_graph_def(self._MakeGraphDef("", producer=-1))
else:
importer.import_graph_def(self._MakeGraphDef("", producer=-1))
x = constant_op.constant(
7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionHigh(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default() as g:
pat = (r"GraphDef min consumer version %d above current version %d "
r"for TensorFlow \S+\. Please upgrade TensorFlow\.$" %
(1 << 30, versions.GRAPH_DEF_VERSION))
importer.import_graph_def(self._MakeGraphDef("", min_consumer=1 << 30))
x = constant_op.constant(
7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionAppliesToOpConstruction(self):
"""These tests rely on shape fns in test_ops.cc."""
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
with ops.Graph().as_default():
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'RequiresOlderGraphVersion' }",
producer=versions.GRAPH_DEF_VERSION - 1),
return_elements=["A"])
with ops.Graph().as_default():
with self.assertRaisesWithPredicateMatch(ValueError,
"Wrong graph version.*"):
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'RequiresOlderGraphVersion' }",
producer=versions.GRAPH_DEF_VERSION),
return_elements=["A"])
def testDefaultAttrsAdded(self):
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithDefaultAttr' }
"""),
return_elements=["A"])
self.assertEqual(123.0, a[0].get_attr("default_float"))
def testDefaultAttrsRemoved(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
producer_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'OpWithFutureDefaultAttr'
attr { name: 'default_int' type: 'int' default_value { i: 456 } }
}
""", producer_op_list)
# Attr only in producer_op_list with default value gets removed.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 456 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
with self.assertRaisesRegexp(ValueError, "No attr named 'default_int'"):
a[0].get_attr("default_int")
# Attr only in producer_op_list with non-default value is preserved.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 987 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
self.assertEqual(987, a[0].get_attr("default_int"))
def testFunctions(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype, dtype)
def Grad(x, y, dout1, dout2): # pylint: disable=unused-argument
# Return the inputs for simplicity of testing. The correct return value
# would be (dout1 + dout2, dout1 - dout2)
return x, y
@function.Defun(dtype, dtype, grad_func=Grad)
def FuncWithGrad(x, y):
return x + y, x - y
@function.Defun(dtypes.int32)
def ExternalTensorFunc(x):
# c must be defined in the containing graph
return x + c
@function.Defun(dtypes.int32, dtypes.int32)
def OuterFunc(x, y):
@function.Defun(dtypes.int32)
def InnerFunc(x):
return x + x
return InnerFunc(x) + y
# Create graph with function calls and export to GraphDef
with ops.Graph().as_default() as g1:
p1 = array_ops.placeholder(dtype, name="p1")
p2 = array_ops.placeholder(dtype, name="p2")
# pylint: disable=unexpected-keyword-arg
a, b = FuncWithGrad(p1, p2, name="f")
c = constant_op.constant(10, dtype=dtypes.int32)
ExternalTensorFunc(1, name="external")
OuterFunc(10, 1, name="outer")
# pylint: enable=unexpected-keyword-arg
gdef = g1.as_graph_def()
# Import GraphDef into new graph, add imported gradients, and test that
# imported functions can be run
with ops.Graph().as_default() as g2:
p1, p2, a, b = importer.import_graph_def(
gdef, return_elements=["p1:0", "p2:0", "f:0", "f:1"], name="")
grad = gradients_impl.gradients([a], [p1, p2])
with self.test_session(graph=g2) as sess:
feed_dict = {p1: 1, p2: 2}
a_val, b_val, grad_val = sess.run([a, b, grad], feed_dict=feed_dict)
self.assertEqual(a_val, 3.0)
self.assertEqual(b_val, -1.0)
# Grad function returns inputs values for testing
self.assertEqual(grad_val, [1.0, 2.0])
self.assertEqual(sess.run("external:0"), 11)
self.assertEqual(sess.run("outer:0"), 21)
# Export the new graph and reimport to test that imported functions can be
# successfully exported/imported again
gdef = g2.as_graph_def()
with ops.Graph().as_default() as g3:
p1, p2, a, b = importer.import_graph_def(
gdef, return_elements=["p1:0", "p2:0", "f:0", "f:1"], name="")
# Create new gradient functions (in additional to the imported gradient
# functions created in g2).
grad = gradients_impl.gradients([a], [p1, p2])
with self.test_session(graph=g3) as sess:
feed_dict = {p1: 1, p2: 2}
a_val, b_val, grad_val = sess.run([a, b, grad], feed_dict=feed_dict)
self.assertEqual(a_val, 3.0)
self.assertEqual(b_val, -1.0)
self.assertEqual(grad_val, [1.0, 2.0])
self.assertEqual(sess.run("external:0"), 11)
self.assertEqual(sess.run("outer:0"), 21)
def testImportInsideDefun(self):
if ops._USE_C_API: return # TODO(skyewm): make this work with C API
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = constant_op.constant(3.0, dtype=dtypes.float32)
y = constant_op.constant(-5.0, dtype=dtypes.float32)
z = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
@function.Defun()
def TestFunc():
return importer.import_graph_def(gdef, return_elements=["z:0"])[0]
z = TestFunc()
with self.test_session():
z_val = z.eval()
self.assertEqual(z_val, -2.0)
def testImportGraphWithFunctionTwice(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = array_ops.placeholder(dtype=dtypes.float32, name="x")
y = array_ops.placeholder(dtype=dtypes.float32, name="y")
_ = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
x = random_ops.random_uniform(dtype=dtypes.float32, shape=())
y = random_ops.random_uniform(dtype=dtypes.float32, shape=())
input_map = {"x:0": x, "y:0": y}
with ops.name_scope("first"):
z1 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with ops.name_scope("second"):
z2 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with self.test_session() as sess:
z1_val, z2_val = sess.run((z1, z2))
self.assertAllEqual(z1_val, z2_val)
if __name__ == "__main__":
test.main()
| 38.808476 | 81 | 0.590869 |
4a215b55906797eb6376b9d1b3ea38e866f4a220
| 9,747 |
py
|
Python
|
sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/operations/_recommended_actions_operations.py
|
JianpingChen/azure-sdk-for-python
|
3072fc8c0366287fbaea1b02493a50259c3248a2
|
[
"MIT"
] | 3 |
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/operations/_recommended_actions_operations.py
|
JianpingChen/azure-sdk-for-python
|
3072fc8c0366287fbaea1b02493a50259c3248a2
|
[
"MIT"
] | 510 |
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/operations/_recommended_actions_operations.py
|
JianpingChen/azure-sdk-for-python
|
3072fc8c0366287fbaea1b02493a50259c3248a2
|
[
"MIT"
] | 5 |
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RecommendedActionsOperations(object):
"""RecommendedActionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mysql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
server_name, # type: str
advisor_name, # type: str
recommended_action_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RecommendationAction"
"""Retrieve recommended actions from the advisor.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param advisor_name: The advisor name for recommendation action.
:type advisor_name: str
:param recommended_action_name: The recommended action name.
:type recommended_action_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RecommendationAction, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mysql.models.RecommendationAction
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecommendationAction"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'advisorName': self._serialize.url("advisor_name", advisor_name, 'str'),
'recommendedActionName': self._serialize.url("recommended_action_name", recommended_action_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RecommendationAction', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/advisors/{advisorName}/recommendedActions/{recommendedActionName}'} # type: ignore
def list_by_server(
self,
resource_group_name, # type: str
server_name, # type: str
advisor_name, # type: str
session_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RecommendationActionsResultList"]
"""Retrieve recommended actions from the advisor.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param advisor_name: The advisor name for recommendation action.
:type advisor_name: str
:param session_id: The recommendation action session identifier.
:type session_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RecommendationActionsResultList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.mysql.models.RecommendationActionsResultList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RecommendationActionsResultList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'advisorName': self._serialize.url("advisor_name", advisor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if session_id is not None:
query_parameters['sessionId'] = self._serialize.query("session_id", session_id, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RecommendationActionsResultList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/advisors/{advisorName}/recommendedActions'} # type: ignore
| 49.227273 | 230 | 0.663691 |
4a215bfe41fdf71a11d8d9170b9b0fe2fd5d4ab5
| 1,309 |
py
|
Python
|
tests/test_pipeadiabaticsteam.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 19 |
2015-12-08T23:33:51.000Z
|
2022-01-31T04:41:10.000Z
|
tests/test_pipeadiabaticsteam.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 2 |
2019-10-04T10:57:00.000Z
|
2021-10-01T06:46:17.000Z
|
tests/test_pipeadiabaticsteam.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 7 |
2015-11-04T02:25:01.000Z
|
2021-12-08T03:14:28.000Z
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.node import PipeAdiabaticSteam
log = logging.getLogger(__name__)
class TestPipeAdiabaticSteam(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_pipeadiabaticsteam(self):
pyidf.validation_level = ValidationLevel.error
obj = PipeAdiabaticSteam()
# alpha
var_name = "Name"
obj.name = var_name
# node
var_inlet_node_name = "node|Inlet Node Name"
obj.inlet_node_name = var_inlet_node_name
# node
var_outlet_node_name = "node|Outlet Node Name"
obj.outlet_node_name = var_outlet_node_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.pipeadiabaticsteams[0].name, var_name)
self.assertEqual(idf2.pipeadiabaticsteams[0].inlet_node_name, var_inlet_node_name)
self.assertEqual(idf2.pipeadiabaticsteams[0].outlet_node_name, var_outlet_node_name)
| 28.456522 | 92 | 0.675325 |
4a215d88ab5cca64c293b6d6ed367a3195e8186b
| 1,416 |
py
|
Python
|
river/metrics/multioutput/macro.py
|
online-ml/creme
|
60872844e6052b5ef20e4075aea30f9031377136
|
[
"BSD-3-Clause"
] | 1,105 |
2019-01-24T15:15:30.000Z
|
2020-11-10T18:27:00.000Z
|
river/metrics/multioutput/macro.py
|
online-ml/creme
|
60872844e6052b5ef20e4075aea30f9031377136
|
[
"BSD-3-Clause"
] | 328 |
2019-01-25T13:48:43.000Z
|
2020-11-11T11:41:44.000Z
|
river/metrics/multioutput/macro.py
|
online-ml/creme
|
60872844e6052b5ef20e4075aea30f9031377136
|
[
"BSD-3-Clause"
] | 150 |
2019-01-29T19:05:21.000Z
|
2020-11-11T11:50:14.000Z
|
import statistics
from collections import defaultdict
from copy import deepcopy
from functools import partial
from river import metrics, utils
from river.metrics.multioutput.base import MultiOutputMetric
__all__ = ["MacroAverage"]
class MacroAverage(MultiOutputMetric, metrics.base.WrapperMetric):
"""Macro-average wrapper.
A copy of the provided metric is made for each output. The arithmetic average of all the
metrics is returned.
Parameters
----------
metric
A classification or a regression metric.
"""
def __init__(self, metric):
self._metric = metric
self.metrics = defaultdict(partial(deepcopy, self._metric))
@property
def metric(self):
return self._metric
def works_with(self, model) -> bool:
if isinstance(self.metric, metrics.base.ClassificationMetric):
return utils.inspect.ismoclassifier(model)
return utils.inspect.ismoregressor(model)
def update(self, y_true, y_pred, sample_weight=1.0):
for i in y_pred:
self.metrics[i].update(y_true[i], y_pred[i], sample_weight)
return self
def revert(self, y_true, y_pred, sample_weight=1.0):
for i in y_pred:
self.metrics[i].revert(y_true[i], y_pred[i], sample_weight)
return self
def get(self):
return statistics.mean(metric.get() for metric in self.metrics.values())
| 28.32 | 92 | 0.683616 |
4a215db0348a33b42ac0add8888d44cddbdac6ca
| 259 |
py
|
Python
|
styling_tool/styling_tool/doctype/desk_icons/desk_icons.py
|
omaralhoori/frappe_styling_tool
|
d3b9fcdaff1a519ec9d950036957bbcf225b25a5
|
[
"MIT"
] | null | null | null |
styling_tool/styling_tool/doctype/desk_icons/desk_icons.py
|
omaralhoori/frappe_styling_tool
|
d3b9fcdaff1a519ec9d950036957bbcf225b25a5
|
[
"MIT"
] | null | null | null |
styling_tool/styling_tool/doctype/desk_icons/desk_icons.py
|
omaralhoori/frappe_styling_tool
|
d3b9fcdaff1a519ec9d950036957bbcf225b25a5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Omar Alhori and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class DeskIcons(Document):
pass
| 23.545455 | 50 | 0.772201 |
4a215db8ff07ba04a03ab00fa4d202481a32d0d9
| 8,642 |
py
|
Python
|
qiskit/circuit/controlflow/for_loop.py
|
t-imamichi/qiskit-core
|
8d2eeeac44f97af1e10514cdae4157e5923ff2e5
|
[
"Apache-2.0"
] | 1,456 |
2017-08-05T16:33:05.000Z
|
2018-06-05T04:15:35.000Z
|
qiskit/circuit/controlflow/for_loop.py
|
t-imamichi/qiskit-core
|
8d2eeeac44f97af1e10514cdae4157e5923ff2e5
|
[
"Apache-2.0"
] | 365 |
2017-08-04T06:09:16.000Z
|
2018-06-05T08:33:37.000Z
|
qiskit/circuit/controlflow/for_loop.py
|
declanmillar/qiskit-terra
|
43e4a72c9c1537dd3d220a52f7e56423dfdd926c
|
[
"Apache-2.0"
] | 463 |
2017-08-05T04:10:01.000Z
|
2018-06-05T06:43:21.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"Circuit operation representing a ``for`` loop."
import warnings
from typing import Iterable, Optional, Union
from qiskit.circuit.parameter import Parameter
from qiskit.circuit.exceptions import CircuitError
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .control_flow import ControlFlowOp
class ForLoopOp(ControlFlowOp):
"""A circuit operation which repeatedly executes a subcircuit
(``body``) parameterized by a parameter ``loop_parameter`` through
the set of integer values provided in ``indexset``.
Parameters:
indexset: A collection of integers to loop over.
loop_parameter: The placeholder parameterizing ``body`` to which
the values from ``indexset`` will be assigned.
body: The loop body to be repeatedly executed.
label: An optional label for identifying the instruction.
**Circuit symbol:**
.. parsed-literal::
┌───────────┐
q_0: ┤0 ├
│ │
q_1: ┤1 ├
│ for_loop │
q_2: ┤2 ├
│ │
c_0: ╡0 ╞
└───────────┘
"""
def __init__(
self,
indexset: Iterable[int],
loop_parameter: Union[Parameter, None],
body: QuantumCircuit,
label: Optional[str] = None,
):
num_qubits = body.num_qubits
num_clbits = body.num_clbits
super().__init__(
"for_loop", num_qubits, num_clbits, [indexset, loop_parameter, body], label=label
)
@property
def params(self):
return self._params
@params.setter
def params(self, parameters):
indexset, loop_parameter, body = parameters
if not isinstance(loop_parameter, (Parameter, type(None))):
raise CircuitError(
"ForLoopOp expects a loop_parameter parameter to "
"be either of type Parameter or None, but received "
f"{type(loop_parameter)}."
)
if not isinstance(body, QuantumCircuit):
raise CircuitError(
"ForLoopOp expects a body parameter to be of type "
f"QuantumCircuit, but received {type(body)}."
)
if body.num_qubits != self.num_qubits or body.num_clbits != self.num_clbits:
raise CircuitError(
"Attempted to assign a body parameter with a num_qubits or "
"num_clbits different than that of the ForLoopOp. "
f"ForLoopOp num_qubits/clbits: {self.num_qubits}/{self.num_clbits} "
f"Supplied body num_qubits/clbits: {body.num_qubits}/{body.num_clbits}."
)
if (
loop_parameter is not None
and loop_parameter not in body.parameters
and loop_parameter.name in (p.name for p in body.parameters)
):
warnings.warn(
"The Parameter provided as a loop_parameter was not found "
"on the loop body and so no binding of the indexset to loop "
"parameter will occur. A different Parameter of the same name "
f"({loop_parameter.name}) was found. If you intended to loop "
"over that Parameter, please use that Parameter instance as "
"the loop_parameter.",
stacklevel=2,
)
# Consume indexset into a tuple unless it was provided as a range.
# Preserve ranges so that they can be exported as OpenQASM3 ranges.
indexset = indexset if isinstance(indexset, range) else tuple(indexset)
self._params = [indexset, loop_parameter, body]
@property
def blocks(self):
return (self._params[2],)
def replace_blocks(self, blocks):
(body,) = blocks
return ForLoopOp(self.params[0], self.params[1], body, label=self.label)
class ForLoopContext:
"""A context manager for building up ``for`` loops onto circuits in a natural order, without
having to construct the loop body first.
Within the block, a lot of the bookkeeping is done for you; you do not need to keep track of
which qubits and clbits you are using, for example, and a loop parameter will be allocated for
you, if you do not supply one yourself. All normal methods of accessing the qubits on the
underlying :obj:`~QuantumCircuit` will work correctly, and resolve into correct accesses within
the interior block.
You generally should never need to instantiate this object directly. Instead, use
:obj:`.QuantumCircuit.for_loop` in its context-manager form, i.e. by not supplying a ``body`` or
sets of qubits and clbits.
Example usage::
import math
from qiskit import QuantumCircuit
qc = QuantumCircuit(2, 1)
with qc.for_loop(None, range(5)) as i:
qc.rx(i * math.pi/4, 0)
qc.cx(0, 1)
qc.measure(0, 0)
qc.break_loop().c_if(0, True)
This context should almost invariably be created by a :meth:`.QuantumCircuit.for_loop` call, and
the resulting instance is a "friend" of the calling circuit. The context will manipulate the
circuit's defined scopes when it is entered (by pushing a new scope onto the stack) and exited
(by popping its scope, building it, and appending the resulting :obj:`.ForLoopOp`).
.. warning::
This is an internal interface and no part of it should be relied upon outside of Qiskit
Terra.
"""
# Class-level variable keep track of the number of auto-generated loop variables, so we don't
# get naming clashes.
_generated_loop_parameters = 0
__slots__ = (
"_circuit",
"_generate_loop_parameter",
"_loop_parameter",
"_indexset",
"_label",
"_used",
)
def __init__(
self,
circuit: QuantumCircuit,
indexset: Iterable[int],
loop_parameter: Optional[Parameter] = None,
*,
label: Optional[str] = None,
):
self._circuit = circuit
self._generate_loop_parameter = loop_parameter is None
self._loop_parameter = loop_parameter
# We can pass through `range` instances because OpenQASM 3 has native support for this type
# of iterator set.
self._indexset = indexset if isinstance(indexset, range) else tuple(indexset)
self._label = label
self._used = False
def __enter__(self):
if self._used:
raise CircuitError("A for-loop context manager cannot be re-entered.")
self._used = True
self._circuit._push_scope()
if self._generate_loop_parameter:
self._loop_parameter = Parameter(f"_loop_i_{self._generated_loop_parameters}")
type(self)._generated_loop_parameters += 1
return self._loop_parameter
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
# If we're leaving the context manager because an exception was raised, there's nothing
# to do except restore the circuit state.
self._circuit._pop_scope()
return False
scope = self._circuit._pop_scope()
# Loops do not need to pass any further resources in, because this scope itself defines the
# extent of ``break`` and ``continue`` statements.
body = scope.build(scope.qubits, scope.clbits)
# We always bind the loop parameter if the user gave it to us, even if it isn't actually
# used, because they requested we do that by giving us a parameter. However, if they asked
# us to auto-generate a parameter, then we only add it if they actually used it, to avoid
# using unnecessary resources.
if self._generate_loop_parameter and self._loop_parameter not in body.parameters:
loop_parameter = None
else:
loop_parameter = self._loop_parameter
self._circuit.append(
ForLoopOp(self._indexset, loop_parameter, body, label=self._label),
tuple(body.qubits),
tuple(body.clbits),
)
return False
| 38.238938 | 100 | 0.634922 |
4a215e57c1f4e299e48764c4fb0d2cb8ee0a9f57
| 686 |
py
|
Python
|
IWS/plugins/twilio/reqhandler.py
|
FlyingDiver/Indigo-Twilio
|
b912e83c8a62f62329d01881d719de5ae097037d
|
[
"MIT"
] | 1 |
2016-12-10T20:57:49.000Z
|
2016-12-10T20:57:49.000Z
|
IWS/plugins/twilio/reqhandler.py
|
FlyingDiver/Indigo-Twilio
|
b912e83c8a62f62329d01881d719de5ae097037d
|
[
"MIT"
] | 9 |
2018-05-07T21:59:44.000Z
|
2022-01-29T22:49:29.000Z
|
IWS/plugins/twilio/reqhandler.py
|
FlyingDiver/Indigo-Twilio
|
b912e83c8a62f62329d01881d719de5ae097037d
|
[
"MIT"
] | null | null | null |
####################
import cherrypy
from indigopy.basereqhandler import BaseRequestHandler
####################
def PluginName():
return u"Twilio Ping"
def PluginDescription():
return u"This is the Twilio-Indigo Ping Plugin."
def ShowOnControlPageList():
return False # if True, then above name/description is shown on the Control Page list index
####################
class TwilioRequestHandler(BaseRequestHandler):
def __init__(self, logFunc, debugLogFunc):
BaseRequestHandler.__init__(self, logFunc, debugLogFunc)
def ping(self, **params):
cherrypy.server.indigoDb.VariableSetValue(cherrypy.server.indigoConn, "twilio_ping", "true")
return
ping.exposed = True
| 26.384615 | 94 | 0.714286 |
4a215ed8332f244789aa52b2cfae4804b77159a4
| 411 |
py
|
Python
|
runremotely/template.py
|
OdysseeT/ec2-jupyter-notebook
|
a64e1ea47a50db3de877a7b0cd8e63ead658ef21
|
[
"Apache-2.0"
] | null | null | null |
runremotely/template.py
|
OdysseeT/ec2-jupyter-notebook
|
a64e1ea47a50db3de877a7b0cd8e63ead658ef21
|
[
"Apache-2.0"
] | null | null | null |
runremotely/template.py
|
OdysseeT/ec2-jupyter-notebook
|
a64e1ea47a50db3de877a7b0cd8e63ead658ef21
|
[
"Apache-2.0"
] | null | null | null |
import pickle
import glob
print("Starting")
picklefiles = glob.glob("/home/ec2-user/*.tp")
print("[remote] Loading function {}".format(picklefiles[0]))
# Load on remote server
with open(picklefiles[0], 'rb') as f:
exec_model = pickle.load(f)
print("[remote] Model Loaded")
response = exec_model()#*args, **kwargs)
print("[remote] Dumping the result")
pickle.dump(response, open("result.pickle", "wb"))
| 22.833333 | 60 | 0.70073 |
4a215feb69df564b24ef1c7e3d0637241b264a67
| 397 |
py
|
Python
|
backend/test_29221/wsgi.py
|
crowdbotics-apps/test-29221
|
238f1083416c60366294c473ebc57006b40f307c
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/test_29221/wsgi.py
|
crowdbotics-apps/test-29221
|
238f1083416c60366294c473ebc57006b40f307c
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/test_29221/wsgi.py
|
crowdbotics-apps/test-29221
|
238f1083416c60366294c473ebc57006b40f307c
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for test_29221 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_29221.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
4a21600a57a26026265f28924cc22ecdc704f935
| 29,439 |
py
|
Python
|
sdk/keyvault/azure-keyvault-keys/tests/test_crypto_client_async.py
|
jochen-ott-by/azure-sdk-for-python
|
56193e95eac71933cbaebea56d63c48c4a01d333
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-keys/tests/test_crypto_client_async.py
|
jochen-ott-by/azure-sdk-for-python
|
56193e95eac71933cbaebea56d63c48c4a01d333
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-keys/tests/test_crypto_client_async.py
|
jochen-ott-by/azure-sdk-for-python
|
56193e95eac71933cbaebea56d63c48c4a01d333
|
[
"MIT"
] | null | null | null |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import codecs
from datetime import datetime
import functools
import hashlib
import os
from unittest import mock
from azure.core.exceptions import HttpResponseError
from azure.keyvault.keys import JsonWebKey, KeyCurveName, KeyVaultKey
from azure.keyvault.keys.aio import KeyClient
from azure.keyvault.keys.crypto._key_validity import _UTC
from azure.keyvault.keys.crypto.aio import CryptographyClient, EncryptionAlgorithm, KeyWrapAlgorithm, SignatureAlgorithm
from azure.keyvault.keys._shared import HttpChallengeCache
from azure.mgmt.keyvault.models import KeyPermissions, Permissions
from devtools_testutils import PowerShellPreparer
import pytest
from _shared.helpers_async import get_completed_future
from _shared.json_attribute_matcher import json_attribute_matcher
from _shared.test_case_async import KeyVaultTestCase
KeyVaultPreparer = functools.partial(
PowerShellPreparer,
"keyvault",
azure_keyvault_url="https://vaultname.vault.azure.net"
)
# without keys/get, a CryptographyClient created with a key ID performs all ops remotely
NO_GET = Permissions(keys=[p.value for p in KeyPermissions if p.value != "get"])
class CryptoClientTests(KeyVaultTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, match_body=False, custom_request_matchers=[json_attribute_matcher], **kwargs)
def tearDown(self):
HttpChallengeCache.clear()
assert len(HttpChallengeCache._cache) == 0
super(CryptoClientTests, self).tearDown()
plaintext = b"5063e6aaa845f150200547944fd199679c98ed6f99da0a0b2dafeaf1f4684496fd532c1c229968cb9dee44957fcef7ccef59ceda0b362e56bcd78fd3faee5781c623c0bb22b35beabde0664fd30e0e824aba3dd1b0afffc4a3d955ede20cf6a854d52cfd"
iv = codecs.decode("89b8adbfb07345e3598932a09c517441", "hex_codec")
aad = b"test"
def create_key_client(self, vault_uri, **kwargs):
credential = self.get_credential(KeyClient, is_async=True)
return self.create_client_from_credential(KeyClient, credential=credential, vault_url=vault_uri, **kwargs)
def create_crypto_client(self, key, **kwargs):
credential = self.get_credential(CryptographyClient, is_async=True)
return self.create_client_from_credential(CryptographyClient, credential=credential, key=key, **kwargs)
def _validate_rsa_key_bundle(self, key_attributes, vault, key_name, kty, key_ops):
prefix = "/".join(s.strip("/") for s in [vault, "keys", key_name])
key = key_attributes.key
kid = key_attributes.id
self.assertTrue(kid.index(prefix) == 0, "Key Id should start with '{}', but value is '{}'".format(prefix, kid))
self.assertEqual(key.kty, kty, "kty should by '{}', but is '{}'".format(key, key.kty))
self.assertTrue(key.n and key.e, "Bad RSA public material.")
self.assertEqual(key_ops, key.key_ops, "keyOps should be '{}', but is '{}'".format(key_ops, key.key_ops))
self.assertTrue(
key_attributes.properties.created_on and key_attributes.properties.updated_on,
"Missing required date attributes.",
)
async def _import_test_key(self, client, name):
def _to_bytes(hex):
if len(hex) % 2:
hex = "0{}".format(hex)
return codecs.decode(hex, "hex_codec")
key = JsonWebKey(
kty="RSA",
key_ops=["encrypt", "decrypt", "sign", "verify", "wrapKey", "unwrapKey"],
n=_to_bytes(
"00a0914d00234ac683b21b4c15d5bed887bdc959c2e57af54ae734e8f00720d775d275e455207e3784ceeb60a50a4655dd72a7a94d271e8ee8f7959a669ca6e775bf0e23badae991b4529d978528b4bd90521d32dd2656796ba82b6bbfc7668c8f5eeb5053747fd199319d29a8440d08f4412d527ff9311eda71825920b47b1c46b11ab3e91d7316407e89c7f340f7b85a34042ce51743b27d4718403d34c7b438af6181be05e4d11eb985d38253d7fe9bf53fc2f1b002d22d2d793fa79a504b6ab42d0492804d7071d727a06cf3a8893aa542b1503f832b296371b6707d4dc6e372f8fe67d8ded1c908fde45ce03bc086a71487fa75e43aa0e0679aa0d20efe35"
),
e=_to_bytes("10001"),
d=_to_bytes(
"627c7d24668148fe2252c7fa649ea8a5a9ed44d75c766cda42b29b660e99404f0e862d4561a6c95af6a83d213e0a2244b03cd28576473215073785fb067f015da19084ade9f475e08b040a9a2c7ba00253bb8125508c9df140b75161d266be347a5e0f6900fe1d8bbf78ccc25eeb37e0c9d188d6e1fc15169ba4fe12276193d77790d2326928bd60d0d01d6ead8d6ac4861abadceec95358fd6689c50a1671a4a936d2376440a41445501da4e74bfb98f823bd19c45b94eb01d98fc0d2f284507f018ebd929b8180dbe6381fdd434bffb7800aaabdd973d55f9eaf9bb88a6ea7b28c2a80231e72de1ad244826d665582c2362761019de2e9f10cb8bcc2625649"
),
p=_to_bytes(
"00d1deac8d68ddd2c1fd52d5999655b2cf1565260de5269e43fd2a85f39280e1708ffff0682166cb6106ee5ea5e9ffd9f98d0becc9ff2cda2febc97259215ad84b9051e563e14a051dce438bc6541a24ac4f014cf9732d36ebfc1e61a00d82cbe412090f7793cfbd4b7605be133dfc3991f7e1bed5786f337de5036fc1e2df4cf3"
),
q=_to_bytes(
"00c3dc66b641a9b73cd833bc439cd34fc6574465ab5b7e8a92d32595a224d56d911e74624225b48c15a670282a51c40d1dad4bc2e9a3c8dab0c76f10052dfb053bc6ed42c65288a8e8bace7a8881184323f94d7db17ea6dfba651218f931a93b8f738f3d8fd3f6ba218d35b96861a0f584b0ab88ddcf446b9815f4d287d83a3237"
),
dp=_to_bytes(
"00c9a159be7265cbbabc9afcc4967eb74fe58a4c4945431902d1142da599b760e03838f8cbd26b64324fea6bdc9338503f459793636e59b5361d1e6951e08ddb089e1b507be952a81fbeaf7e76890ea4f536e25505c3f648b1e88377dfc19b4c304e738dfca07211b792286a392a704d0f444c0a802539110b7f1f121c00cff0a9"
),
dq=_to_bytes(
"00a0bd4c0a3d9f64436a082374b5caf2488bac1568696153a6a5e4cd85d186db31e2f58f024c617d29f37b4e6b54c97a1e25efec59c4d1fd3061ac33509ce8cae5c11f4cd2e83f41a8264f785e78dc0996076ee23dfdfc43d67c463afaa0180c4a718357f9a6f270d542479a0f213870e661fb950abca4a14ca290570ba7983347"
),
qi=_to_bytes(
"009fe7ae42e92bc04fcd5780464bd21d0c8ac0c599f9af020fde6ab0a7e7d1d39902f5d8fb6c614184c4c1b103fb46e94cd10a6c8a40f9991a1f28269f326435b6c50276fda6493353c650a833f724d80c7d522ba16c79f0eb61f672736b68fb8be3243d10943c4ab7028d09e76cfb5892222e38bc4d35585bf35a88cd68c73b07"
),
)
imported_key = await client.import_key(name, key)
self._validate_rsa_key_bundle(imported_key, client.vault_url, name, key.kty, key.key_ops)
return imported_key
async def _import_symmetric_test_key(self, client, name):
key = JsonWebKey(
kty="oct-HSM",
key_ops=["encrypt", "decrypt", "wrapKey", "unwrapKey"],
k=codecs.decode("e27ed0c84512bbd55b6af434d237c11feba311870f80f2c2e3364260f31c82c8", "hex_codec"),
)
imported_key = await client.import_key(name, key)
return imported_key
@KeyVaultPreparer()
async def test_ec_key_id(self, azure_keyvault_url, **kwargs):
"""When initialized with a key ID, the client should retrieve the key and perform public operations locally"""
key_client = self.create_key_client(azure_keyvault_url)
key = await key_client.create_ec_key(self.get_resource_name("eckey"))
crypto_client = self.create_crypto_client(key.id)
await crypto_client._initialize()
assert crypto_client.key_id == key.id
# ensure all remote crypto operations will fail
crypto_client._client = None
await crypto_client.verify(SignatureAlgorithm.es256, hashlib.sha256(self.plaintext).digest(), self.plaintext)
@KeyVaultPreparer()
async def test_rsa_key_id(self, azure_keyvault_url, **kwargs):
"""When initialized with a key ID, the client should retrieve the key and perform public operations locally"""
key_client = self.create_key_client(azure_keyvault_url)
key = await key_client.create_rsa_key(self.get_resource_name("rsakey"))
crypto_client = self.create_crypto_client(key.id)
await crypto_client._initialize()
assert crypto_client.key_id == key.id
# ensure all remote crypto operations will fail
crypto_client._client = None
await crypto_client.encrypt(EncryptionAlgorithm.rsa_oaep, self.plaintext)
await crypto_client.verify(SignatureAlgorithm.rs256, hashlib.sha256(self.plaintext).digest(), self.plaintext)
await crypto_client.wrap_key(KeyWrapAlgorithm.rsa_oaep, self.plaintext)
@KeyVaultPreparer()
async def test_encrypt_and_decrypt(self, azure_keyvault_url, **kwargs):
key_client = self.create_key_client(azure_keyvault_url, permissions=NO_GET)
key_name = self.get_resource_name("keycrypt")
imported_key = await self._import_test_key(key_client, key_name)
crypto_client = self.create_crypto_client(imported_key.id)
result = await crypto_client.encrypt(EncryptionAlgorithm.rsa_oaep, self.plaintext)
self.assertEqual(result.key_id, imported_key.id)
result = await crypto_client.decrypt(result.algorithm, result.ciphertext)
self.assertEqual(result.key_id, imported_key.id)
self.assertEqual(EncryptionAlgorithm.rsa_oaep, result.algorithm)
self.assertEqual(self.plaintext, result.plaintext)
@KeyVaultPreparer()
async def test_sign_and_verify(self, azure_keyvault_url, **kwargs):
key_client = self.create_key_client(azure_keyvault_url, permissions=NO_GET)
key_name = self.get_resource_name("keysign")
md = hashlib.sha256()
md.update(self.plaintext)
digest = md.digest()
imported_key = await self._import_test_key(key_client, key_name)
crypto_client = self.create_crypto_client(imported_key.id)
result = await crypto_client.sign(SignatureAlgorithm.rs256, digest)
self.assertEqual(result.key_id, imported_key.id)
verified = await crypto_client.verify(result.algorithm, digest, result.signature)
self.assertEqual(result.key_id, imported_key.id)
self.assertEqual(result.algorithm, SignatureAlgorithm.rs256)
self.assertTrue(verified.is_valid)
@KeyVaultPreparer()
async def test_wrap_and_unwrap(self, azure_keyvault_url, **kwargs):
key_client = self.create_key_client(azure_keyvault_url, permissions=NO_GET)
key_name = self.get_resource_name("keywrap")
created_key = await key_client.create_key(key_name, "RSA")
self.assertIsNotNone(created_key)
crypto_client = self.create_crypto_client(created_key.id)
# Wrap a key with the created key, then unwrap it. The wrapped key's bytes should round-trip.
key_bytes = self.plaintext
result = await crypto_client.wrap_key(KeyWrapAlgorithm.rsa_oaep, key_bytes)
self.assertEqual(result.key_id, created_key.id)
result = await crypto_client.unwrap_key(result.algorithm, result.encrypted_key)
self.assertEqual(key_bytes, result.key)
@KeyVaultPreparer()
async def test_symmetric_encrypt_and_decrypt(self, azure_keyvault_url, **kwargs):
if self.is_live:
pytest.skip("MHSM-only algorithms can't be tested in CI yet")
key_client = self.create_key_client(azure_keyvault_url)
key_name = self.get_resource_name("symmetric-encrypt")
imported_key = await self._import_symmetric_test_key(key_client, key_name)
assert imported_key is not None
crypto_client = self.create_crypto_client(imported_key.id)
# Use 256-bit AES algorithms for the 256-bit key
symmetric_algorithms = [algo for algo in EncryptionAlgorithm if algo.startswith("A256")]
for algorithm in symmetric_algorithms:
if algorithm.endswith("GCM"):
result = await crypto_client.encrypt(algorithm, self.plaintext, additional_authenticated_data=self.aad)
assert result.key_id == imported_key.id
result = await crypto_client.decrypt(
result.algorithm,
result.ciphertext,
iv=result.iv,
authentication_tag=result.tag,
additional_authenticated_data=self.aad
)
else:
result = await crypto_client.encrypt(
algorithm, self.plaintext, iv=self.iv, additional_authenticated_data=self.aad
)
self.assertEqual(result.key_id, imported_key.id)
result = await crypto_client.decrypt(
result.algorithm, result.ciphertext, iv=self.iv, additional_authenticated_data=self.aad
)
assert result.key_id == imported_key.id
assert result.algorithm == algorithm
if algorithm.endswith("CBC"):
assert result.plaintext.startswith(self.plaintext) # AES-CBC returns a zero-padded plaintext
else:
assert result.plaintext == self.plaintext
@KeyVaultPreparer()
async def test_symmetric_wrap_and_unwrap(self, azure_keyvault_url, **kwargs):
if self.is_live:
pytest.skip("MHSM-only algorithms can't be tested in CI yet")
key_client = self.create_key_client(azure_keyvault_url)
key_name = self.get_resource_name("symmetric-kw")
imported_key = await self._import_symmetric_test_key(key_client, key_name)
assert imported_key is not None
crypto_client = self.create_crypto_client(imported_key.id)
result = await crypto_client.wrap_key(KeyWrapAlgorithm.aes_256, self.plaintext)
assert result.key_id == imported_key.id
result = await crypto_client.unwrap_key(result.algorithm, result.encrypted_key)
assert result.key == self.plaintext
@KeyVaultPreparer()
async def test_encrypt_local(self, azure_keyvault_url, **kwargs):
"""Encrypt locally, decrypt with Key Vault"""
key_client = self.create_key_client(azure_keyvault_url)
key_name = self.get_resource_name("encrypt-local")
key = await key_client.create_rsa_key(key_name, size=4096)
crypto_client = self.create_crypto_client(key)
rsa_encrypt_algorithms = [algo for algo in EncryptionAlgorithm if algo.startswith("RSA")]
for encrypt_algorithm in rsa_encrypt_algorithms:
result = await crypto_client.encrypt(encrypt_algorithm, self.plaintext)
self.assertEqual(result.key_id, key.id)
result = await crypto_client.decrypt(result.algorithm, result.ciphertext)
self.assertEqual(result.plaintext, self.plaintext)
@KeyVaultPreparer()
async def test_wrap_local(self, azure_keyvault_url, **kwargs):
"""Wrap locally, unwrap with Key Vault"""
key_client = self.create_key_client(azure_keyvault_url)
key_name = self.get_resource_name("wrap-local")
key = await key_client.create_rsa_key(key_name, size=4096)
crypto_client = self.create_crypto_client(key)
for wrap_algorithm in (algo for algo in KeyWrapAlgorithm if algo.startswith("RSA")):
result = await crypto_client.wrap_key(wrap_algorithm, self.plaintext)
self.assertEqual(result.key_id, key.id)
result = await crypto_client.unwrap_key(result.algorithm, result.encrypted_key)
self.assertEqual(result.key, self.plaintext)
@KeyVaultPreparer()
async def test_rsa_verify_local(self, azure_keyvault_url, **kwargs):
"""Sign with Key Vault, verify locally"""
key_client = self.create_key_client(azure_keyvault_url)
for size in (2048, 3072, 4096):
key_name = self.get_resource_name("rsa-verify-{}".format(size))
key = await key_client.create_rsa_key(key_name, size=size)
crypto_client = self.create_crypto_client(key)
for signature_algorithm, hash_function in (
(SignatureAlgorithm.ps256, hashlib.sha256),
(SignatureAlgorithm.ps384, hashlib.sha384),
(SignatureAlgorithm.ps512, hashlib.sha512),
(SignatureAlgorithm.rs256, hashlib.sha256),
(SignatureAlgorithm.rs384, hashlib.sha384),
(SignatureAlgorithm.rs512, hashlib.sha512),
):
digest = hash_function(self.plaintext).digest()
result = await crypto_client.sign(signature_algorithm, digest)
self.assertEqual(result.key_id, key.id)
result = await crypto_client.verify(result.algorithm, digest, result.signature)
self.assertTrue(result.is_valid)
@KeyVaultPreparer()
async def test_ec_verify_local(self, azure_keyvault_url, **kwargs):
"""Sign with Key Vault, verify locally"""
key_client = self.create_key_client(azure_keyvault_url)
matrix = {
KeyCurveName.p_256: (SignatureAlgorithm.es256, hashlib.sha256),
KeyCurveName.p_256_k: (SignatureAlgorithm.es256_k, hashlib.sha256),
KeyCurveName.p_384: (SignatureAlgorithm.es384, hashlib.sha384),
KeyCurveName.p_521: (SignatureAlgorithm.es512, hashlib.sha512),
}
for curve, (signature_algorithm, hash_function) in sorted(matrix.items()):
key_name = self.get_resource_name("ec-verify-{}".format(curve.value))
key = await key_client.create_ec_key(key_name, curve=curve)
crypto_client = self.create_crypto_client(key)
digest = hash_function(self.plaintext).digest()
result = await crypto_client.sign(signature_algorithm, digest)
self.assertEqual(result.key_id, key.id)
result = await crypto_client.verify(result.algorithm, digest, result.signature)
self.assertTrue(result.is_valid)
@KeyVaultPreparer()
async def test_local_validity_period_enforcement(self, azure_keyvault_url, **kwargs):
"""Local crypto operations should respect a key's nbf and exp properties"""
key_client = self.create_key_client(azure_keyvault_url, permissions=NO_GET)
async def test_operations(key, expected_error_substrings, encrypt_algorithms, wrap_algorithms):
crypto_client = self.create_crypto_client(key)
for algorithm in encrypt_algorithms:
with pytest.raises(ValueError) as ex:
await crypto_client.encrypt(algorithm, self.plaintext)
for substring in expected_error_substrings:
assert substring in str(ex.value)
for algorithm in wrap_algorithms:
with pytest.raises(ValueError) as ex:
await crypto_client.wrap_key(algorithm, self.plaintext)
for substring in expected_error_substrings:
assert substring in str(ex.value)
# operations should not succeed with a key whose nbf is in the future
the_year_3000 = datetime(3000, 1, 1, tzinfo=_UTC)
rsa_wrap_algorithms = [algo for algo in KeyWrapAlgorithm if algo.startswith("RSA")]
rsa_encryption_algorithms = [algo for algo in EncryptionAlgorithm if algo.startswith("RSA")]
key_name = self.get_resource_name("rsa-not-yet-valid")
not_yet_valid_key = await key_client.create_rsa_key(key_name, not_before=the_year_3000)
await test_operations(not_yet_valid_key, [str(the_year_3000)], rsa_encryption_algorithms, rsa_wrap_algorithms)
# nor should they succeed with a key whose exp has passed
the_year_2000 = datetime(2000, 1, 1, tzinfo=_UTC)
key_name = self.get_resource_name("rsa-expired")
expired_key = await key_client.create_rsa_key(key_name, expires_on=the_year_2000)
await test_operations(expired_key, [str(the_year_2000)], rsa_encryption_algorithms, rsa_wrap_algorithms)
# when exp and nbf are set, error messages should contain both
the_year_3001 = datetime(3001, 1, 1, tzinfo=_UTC)
key_name = self.get_resource_name("rsa-valid")
valid_key = await key_client.create_rsa_key(key_name, not_before=the_year_3000, expires_on=the_year_3001)
await test_operations(
valid_key, (str(the_year_3000), str(the_year_3001)), rsa_encryption_algorithms, rsa_wrap_algorithms
)
def test_custom_hook_policy():
class CustomHookPolicy(object):
pass
client = CryptographyClient("https://localhost/fake/key/version", object(), custom_hook_policy=CustomHookPolicy())
assert isinstance(client._client._config.custom_hook_policy, CustomHookPolicy)
@pytest.mark.asyncio
async def test_symmetric_wrap_and_unwrap_local():
key = KeyVaultKey(
key_id="http://localhost/keys/key/version", k=os.urandom(32), kty="oct", key_ops=["unwrapKey", "wrapKey"],
)
crypto_client = CryptographyClient(key, credential=lambda *_: None)
# Wrap a key with the created key, then unwrap it. The wrapped key's bytes should round-trip.
key_bytes = os.urandom(32)
wrap_result = await crypto_client.wrap_key(KeyWrapAlgorithm.aes_256, key_bytes)
unwrap_result = await crypto_client.unwrap_key(wrap_result.algorithm, wrap_result.encrypted_key)
assert unwrap_result.key == key_bytes
@pytest.mark.asyncio
async def test_initialization_given_key():
"""If the client is given key material, it should not attempt to get this from the vault"""
mock_client = mock.Mock()
key = mock.Mock(spec=KeyVaultKey, id="https://localhost/fake/key/version")
client = CryptographyClient(key, mock.Mock())
client._client = mock_client
mock_client.get_key.return_value = get_completed_future()
with mock.patch(CryptographyClient.__module__ + ".get_local_cryptography_provider") as get_provider:
await client.verify(SignatureAlgorithm.rs256, b"...", b"...")
get_provider.assert_called_once_with(key)
assert mock_client.get_key.call_count == 0
@pytest.mark.asyncio
async def test_initialization_get_key_successful():
"""If the client is able to get key material, it shouldn't do so again"""
key_id = "https://localhost/fake/key/version"
mock_key = mock.Mock()
mock_key.key.kid = key_id
mock_client = mock.Mock()
mock_client.get_key.return_value = get_completed_future(mock_key)
client = CryptographyClient(key_id, mock.Mock())
client._client = mock_client
assert mock_client.get_key.call_count == 0
with mock.patch(CryptographyClient.__module__ + ".get_local_cryptography_provider") as get_provider:
await client.verify(SignatureAlgorithm.rs256, b"...", b"...")
args, _ = get_provider.call_args
assert len(args) == 1 and isinstance(args[0], KeyVaultKey) and args[0].id == key_id
for _ in range(3):
assert mock_client.get_key.call_count == 1
assert get_provider.call_count == 1
await client.verify(SignatureAlgorithm.rs256, b"...", b"...")
@pytest.mark.asyncio
async def test_initialization_forbidden_to_get_key():
"""If the client is forbidden to get key material, it should try to do so exactly once"""
mock_client = mock.Mock()
mock_client.get_key.side_effect = HttpResponseError(response=mock.Mock(status_code=403))
mock_client.verify.return_value = get_completed_future(mock.Mock())
client = CryptographyClient("https://localhost/fake/key/version", mock.Mock())
client._client = mock_client
assert mock_client.get_key.call_count == 0
for _ in range(3):
await client.verify(SignatureAlgorithm.rs256, b"...", b"...")
assert mock_client.get_key.call_count == 1
@pytest.mark.asyncio
async def test_initialization_transient_failure_getting_key():
"""If the client is not forbidden to get key material, it should retry after failing to do so"""
mock_client = mock.Mock()
mock_client.get_key.side_effect = HttpResponseError(response=mock.Mock(status_code=500))
mock_client.verify.return_value = get_completed_future(mock.Mock())
client = CryptographyClient("https://localhost/fake/key/version", mock.Mock())
client._client = mock_client
for i in range(3):
assert mock_client.get_key.call_count == i
await client.verify(SignatureAlgorithm.rs256, b"...", b"...")
@pytest.mark.asyncio
async def test_calls_service_for_operations_unsupported_locally():
"""When an operation can't be performed locally, the client should request Key Vault perform it"""
class _AsyncMock(mock.Mock):
async def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
mock_client = _AsyncMock()
key = mock.Mock(spec=KeyVaultKey, id="https://localhost/fake/key/version")
client = CryptographyClient(key, mock.Mock())
client._client = mock_client
supports_nothing = mock.Mock(supports=mock.Mock(return_value=False))
with mock.patch(CryptographyClient.__module__ + ".get_local_cryptography_provider", lambda *_: supports_nothing):
await client.decrypt(EncryptionAlgorithm.rsa_oaep, b"...")
assert mock_client.decrypt.call_count == 1
assert supports_nothing.decrypt.call_count == 0
await client.encrypt(EncryptionAlgorithm.rsa_oaep, b"...")
assert mock_client.encrypt.call_count == 1
assert supports_nothing.encrypt.call_count == 0
await client.sign(SignatureAlgorithm.rs256, b"...")
assert mock_client.sign.call_count == 1
assert supports_nothing.sign.call_count == 0
await client.verify(SignatureAlgorithm.rs256, b"...", b"...")
assert mock_client.verify.call_count == 1
assert supports_nothing.verify.call_count == 0
await client.unwrap_key(KeyWrapAlgorithm.rsa_oaep, b"...")
assert mock_client.unwrap_key.call_count == 1
assert supports_nothing.unwrap_key.call_count == 0
await client.wrap_key(KeyWrapAlgorithm.rsa_oaep, b"...")
assert mock_client.wrap_key.call_count == 1
assert supports_nothing.wrap_key.call_count == 0
@pytest.mark.asyncio
async def test_prefers_local_provider():
"""The client should complete operations locally whenever possible"""
mock_client = mock.Mock()
key = mock.Mock(
spec=KeyVaultKey,
id="https://localhost/fake/key/version",
properties=mock.Mock(
not_before=datetime(2000, 1, 1, tzinfo=_UTC), expires_on=datetime(3000, 1, 1, tzinfo=_UTC)
),
)
client = CryptographyClient(key, mock.Mock())
client._client = mock_client
supports_everything = mock.Mock(supports=mock.Mock(return_value=True))
with mock.patch(CryptographyClient.__module__ + ".get_local_cryptography_provider", lambda *_: supports_everything):
await client.decrypt(EncryptionAlgorithm.rsa_oaep, b"...")
assert mock_client.decrypt.call_count == 0
assert supports_everything.decrypt.call_count == 1
await client.encrypt(EncryptionAlgorithm.rsa_oaep, b"...")
assert mock_client.encrypt.call_count == 0
assert supports_everything.encrypt.call_count == 1
await client.sign(SignatureAlgorithm.rs256, b"...")
assert mock_client.sign.call_count == 0
assert supports_everything.sign.call_count == 1
await client.verify(SignatureAlgorithm.rs256, b"...", b"...")
assert mock_client.verify.call_count == 0
assert supports_everything.verify.call_count == 1
await client.unwrap_key(KeyWrapAlgorithm.rsa_oaep, b"...")
assert mock_client.unwrap_key.call_count == 0
assert supports_everything.unwrap_key.call_count == 1
await client.wrap_key(KeyWrapAlgorithm.rsa_oaep, b"...")
assert mock_client.wrap_key.call_count == 0
assert supports_everything.wrap_key.call_count == 1
@pytest.mark.asyncio
async def test_encrypt_argument_validation():
"""The client should raise an error when arguments don't work with the specified algorithm"""
mock_client = mock.Mock()
key = mock.Mock(
spec=KeyVaultKey,
id="https://localhost/fake/key/version",
properties=mock.Mock(
not_before=datetime(2000, 1, 1, tzinfo=_UTC), expires_on=datetime(3000, 1, 1, tzinfo=_UTC)
),
)
client = CryptographyClient(key, mock.Mock())
client._client = mock_client
with pytest.raises(ValueError) as ex:
await client.encrypt(EncryptionAlgorithm.rsa_oaep, b"...", iv=b"...")
assert "iv" in str(ex.value)
with pytest.raises(ValueError) as ex:
await client.encrypt(EncryptionAlgorithm.rsa_oaep, b"...", additional_authenticated_data=b"...")
assert "additional_authenticated_data" in str(ex.value)
@pytest.mark.asyncio
async def test_decrypt_argument_validation():
mock_client = mock.Mock()
key = mock.Mock(
spec=KeyVaultKey,
id="https://localhost/fake/key/version",
properties=mock.Mock(
not_before=datetime(2000, 1, 1, tzinfo=_UTC), expires_on=datetime(3000, 1, 1, tzinfo=_UTC)
),
)
client = CryptographyClient(key, mock.Mock())
client._client = mock_client
with pytest.raises(ValueError) as ex:
await client.decrypt(EncryptionAlgorithm.rsa_oaep, b"...", iv=b"...")
assert "iv" in str(ex.value)
with pytest.raises(ValueError) as ex:
await client.decrypt(EncryptionAlgorithm.rsa_oaep, b"...", additional_authenticated_data=b"...")
assert "additional_authenticated_data" in str(ex.value)
with pytest.raises(ValueError) as ex:
await client.decrypt(EncryptionAlgorithm.rsa_oaep, b"...", authentication_tag=b"...")
assert "authentication_tag" in str(ex.value)
| 48.820896 | 532 | 0.719556 |
4a216025c51b67ee54d2e5c59fbb17ffa0e38a71
| 5,193 |
py
|
Python
|
pysnmp/ENTERASYS-AAA-POLICY-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11 |
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/ENTERASYS-AAA-POLICY-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75 |
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/ENTERASYS-AAA-POLICY-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module ENTERASYS-AAA-POLICY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ENTERASYS-AAA-POLICY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:48:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint")
etsysModules, = mibBuilder.importSymbols("ENTERASYS-MIB-NAMES", "etsysModules")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Integer32, TimeTicks, ObjectIdentity, NotificationType, MibIdentifier, ModuleIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Unsigned32, IpAddress, iso, Bits, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "TimeTicks", "ObjectIdentity", "NotificationType", "MibIdentifier", "ModuleIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Unsigned32", "IpAddress", "iso", "Bits", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
etsysAAAPolicyMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51))
etsysAAAPolicyMIB.setRevisions(('2004-07-29 19:06',))
if mibBuilder.loadTexts: etsysAAAPolicyMIB.setLastUpdated('200407291906Z')
if mibBuilder.loadTexts: etsysAAAPolicyMIB.setOrganization('Enterasys Networks, Inc')
class AAAProtocol(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("any", 1), ("none", 2), ("radius", 3), ("tacacs", 4))
etsysAAAPolicyObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 1))
etsysAAAPolicyMgmtAccess = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 1, 1))
etsysAAAMgmtAccessTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 1, 1, 1), )
if mibBuilder.loadTexts: etsysAAAMgmtAccessTable.setStatus('current')
etsysAAAMgmtAccessEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 1, 1, 1, 1), ).setIndexNames((0, "ENTERASYS-AAA-POLICY-MIB", "etsysAAAMgmtAccessProtocol"))
if mibBuilder.loadTexts: etsysAAAMgmtAccessEntry.setStatus('current')
etsysAAAMgmtAccessProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("allProtocols", 1))))
if mibBuilder.loadTexts: etsysAAAMgmtAccessProtocol.setStatus('current')
etsysAAAMgmtRemoteAuthProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 1, 1, 1, 1, 2), AAAProtocol().clone('any')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysAAAMgmtRemoteAuthProtocol.setStatus('current')
etsysAAAMgmtRemoteAcctProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 1, 1, 1, 1, 3), AAAProtocol().clone('any')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysAAAMgmtRemoteAcctProtocol.setStatus('current')
etsysAAAPolicyMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 2))
etsysAAAPolicyMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 2, 1))
etsysAAAPolicyMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 2, 2))
etsysAAAPolicyMgmtGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 2, 2, 1)).setObjects(("ENTERASYS-AAA-POLICY-MIB", "etsysAAAMgmtRemoteAuthProtocol"), ("ENTERASYS-AAA-POLICY-MIB", "etsysAAAMgmtRemoteAcctProtocol"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysAAAPolicyMgmtGroup = etsysAAAPolicyMgmtGroup.setStatus('current')
etsysAAAPolicyMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 5624, 1, 2, 51, 2, 1, 1)).setObjects(("ENTERASYS-AAA-POLICY-MIB", "etsysAAAPolicyMgmtGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysAAAPolicyMIBCompliance = etsysAAAPolicyMIBCompliance.setStatus('current')
mibBuilder.exportSymbols("ENTERASYS-AAA-POLICY-MIB", etsysAAAPolicyMIBGroups=etsysAAAPolicyMIBGroups, etsysAAAPolicyObjects=etsysAAAPolicyObjects, etsysAAAPolicyMIB=etsysAAAPolicyMIB, etsysAAAMgmtAccessProtocol=etsysAAAMgmtAccessProtocol, etsysAAAPolicyMgmtGroup=etsysAAAPolicyMgmtGroup, etsysAAAMgmtAccessTable=etsysAAAMgmtAccessTable, etsysAAAPolicyMIBCompliances=etsysAAAPolicyMIBCompliances, etsysAAAPolicyMgmtAccess=etsysAAAPolicyMgmtAccess, etsysAAAMgmtRemoteAuthProtocol=etsysAAAMgmtRemoteAuthProtocol, etsysAAAPolicyMIBConformance=etsysAAAPolicyMIBConformance, etsysAAAPolicyMIBCompliance=etsysAAAPolicyMIBCompliance, etsysAAAMgmtRemoteAcctProtocol=etsysAAAMgmtRemoteAcctProtocol, etsysAAAMgmtAccessEntry=etsysAAAMgmtAccessEntry, PYSNMP_MODULE_ID=etsysAAAPolicyMIB, AAAProtocol=AAAProtocol)
| 110.489362 | 798 | 0.77566 |
4a2160bc19d2ba87ba4fa9a43fed07b78b3a1f98
| 52 |
py
|
Python
|
apps/lightrfp/__init__.py
|
ExpoAshique/ProveBanking__s
|
f0b45fffea74d00d14014be27aa50fe5f42f6903
|
[
"MIT"
] | null | null | null |
apps/lightrfp/__init__.py
|
ExpoAshique/ProveBanking__s
|
f0b45fffea74d00d14014be27aa50fe5f42f6903
|
[
"MIT"
] | null | null | null |
apps/lightrfp/__init__.py
|
ExpoAshique/ProveBanking__s
|
f0b45fffea74d00d14014be27aa50fe5f42f6903
|
[
"MIT"
] | null | null | null |
default_app_config = 'lightrfp.apps.LightRFPConfig'
| 26 | 51 | 0.846154 |
4a2160c338c9048088586d2765ac90ba696710d5
| 1,107 |
py
|
Python
|
preisach.py
|
cosmo-jana/numerics-physics-stuff
|
f5fb35c00c84ca713877e20c1d8186e76883cd28
|
[
"MIT"
] | 1 |
2020-10-16T16:35:35.000Z
|
2020-10-16T16:35:35.000Z
|
preisach.py
|
cosmo-jana/numerics-physics-stuff
|
f5fb35c00c84ca713877e20c1d8186e76883cd28
|
[
"MIT"
] | null | null | null |
preisach.py
|
cosmo-jana/numerics-physics-stuff
|
f5fb35c00c84ca713877e20c1d8186e76883cd28
|
[
"MIT"
] | null | null | null |
#encoding: utf-8
# Preisach simulation to model hysteresis curves
# * [1] F. Preisach, Über die magnetische Nachwirkung. Zeitschrift fuer Physik, 94:277-302, 1935
# Implemented after: June 2015, Markus Osterhoff
import numpy as np
import matplotlib.pyplot as plt
N = 32768 # number of hysterons
M = 2000 # number of steps
L = 2 # number of loops
r = 0.8 # abs max external field
# initialize the simulation
width = np.random.rand(N)
position = np.random.rand(N) - 0.5
alpha = position - width/2.
beta = position + width/2.
state = np.random.choice([-1., 1.], N)
xs = []; ys = []
def step(i):
x = r*i/M
global state, xs, ys
state = np.where(x >= beta, 1.0, np.where(x <= alpha, -1.0, state))
y = np.mean(state)
xs.append(x)
ys.append(y)
# print "%+6.3f, %+6.3f" % (x, y)
# make L loops throu the ramping
for j in range(L):
# ramp up
for i in range(0, M + 1): step(i)
# ramp down
for i in range(M, -M - 1, -1): step(i)
# ramp to 0
for i in range(-M, 1, 1): step(i)
plt.plot(xs, ys)
plt.xlabel("Externes Feld")
plt.ylabel("Internes Feld")
plt.show()
| 24.065217 | 96 | 0.628726 |
4a21617ef98e97080e889a510b9ecff55a0fcd95
| 976 |
py
|
Python
|
portfolio/2013_OrSys/settings.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/2013_OrSys/settings.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/2013_OrSys/settings.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | 5 |
2016-03-22T07:40:46.000Z
|
2021-05-30T16:12:21.000Z
|
# this is MySQL credentials:
db_password = '7#iA!g!e^@Vk'
db_name = "orsys"
#
# this is users auth dictionary:
users = {
'Tony': {
'password': 'st0len', 'img': 'astronaut',
'role': 'superuser', 'email': 'a.s.kosinov@gmail.com',
'phoneNo': '+380667275551'
},
'Jake': {
'password': 'FastFresh', 'img': 'cat-eye',
'role': 'superuser', 'email': ' jake.taylor@fastfresh.com.au',
'phoneNo': '02133425566'
}
}
#
# Twillio credentials:
ACCOUNT_SID = 'AC189d1534b1dc120d37610d3efacd94aa'
AUTH_TOKEN = '6722206ba3a216e1ff29e53759ce15ba'
# SID_PN = 'd8a6d9ac05b770ee02fa23933ae18242'
# Phone Number (inbound/outbound sms only)
TEST_PhN = '+61437881387'
#
# Fax sending properties:
TEST_EMAIL = '0282128065@fax.utbox.net'
TEST_EMAIL_FROM = 'orders@fastfresh.com.au'
# Magento MySQL DB credentials:
MdbUser = 'fastfres_ordsys'
MdbPassword = 'snW599gDP86GUp84VpK'
Mdb = 'fastfres_magento'
MdbIP = '103.1.217.37'
| 27.885714 | 70 | 0.66291 |
4a2161c1b531473cc8d0a4488383b38a74b22242
| 5,348 |
py
|
Python
|
sample_scripts/Gen10_Server_Update.py
|
LaudateCorpus1/iLOAmpPack-Redfish-API-Docs
|
69e14512059c763d3ff39e45d0ba08de8a5a15a9
|
[
"Apache-2.0"
] | 8 |
2018-09-26T04:08:58.000Z
|
2022-01-15T22:24:26.000Z
|
sample_scripts/Gen10_Server_Update.py
|
LaudateCorpus1/iLOAmpPack-Redfish-API-Docs
|
69e14512059c763d3ff39e45d0ba08de8a5a15a9
|
[
"Apache-2.0"
] | 8 |
2019-12-19T09:42:24.000Z
|
2022-02-26T04:15:34.000Z
|
sample_scripts/Gen10_Server_Update.py
|
LaudateCorpus1/iLOAmpPack-Redfish-API-Docs
|
69e14512059c763d3ff39e45d0ba08de8a5a15a9
|
[
"Apache-2.0"
] | 6 |
2019-01-09T16:23:57.000Z
|
2021-07-23T17:03:06.000Z
|
#(C) Copyright [2021] Hewlett Packard Enterprise Development LP
#Licensed under the Apache License, Version 2.0 (the "License"); you may
#not use this file except in compliance with the License. You may obtain
#a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#License for the specific language governing permissions and limitations
#under the License.
# Gen9 server update
import sys
import redfish
import time
import json
import argparse
# Check for the multiple occurance of same argument, if so raise exception
class Once(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if hasattr(self, 'seen'):
raise argparse.ArgumentError(self, 'Only once please')
setattr(self, 'seen', True)
setattr(namespace, self.dest, values)
my_parser = argparse.ArgumentParser()
boolValue = ["True", "False"]
operationType = ["iLORepositoryOnline", "iLORepositoryOffline"]
operationStrategy = ["StageAndDeploy", "DeployOnly", "StageOnly"]
my_parser.add_argument('--iLOAmpAddress', action=Once, required=True, type=str)
my_parser.add_argument('--Username', action=Once, required=True, type=str)
my_parser.add_argument('--Password', action=Once, required=True, type=str)
my_parser.add_argument('--BaselineId', action=Once, required=True, type=int)
my_parser.add_argument('--OperationStrategy', action=Once, choices=operationStrategy, required=True, type=str)
my_parser.add_argument('--OperationType', action=Once, choices=operationType, required=True, type=str)
my_parser.add_argument('--ResetFlag', action=Once, choices=boolValue, type=str, default="False")
my_parser.add_argument('--DowngradeFlag', action=Once, choices=boolValue, type=str, default="False")
my_parser.add_argument('--CleanUpRepository', action=Once, choices=boolValue, type=str, default="False")
my_parser.add_argument('--BatchSize', action=Once, type=int, default=20)
my_parser.add_argument('--OutputFileName', action=Once, type=str)
my_parser.add_argument('--ServerAddress', action=Once, required=True, type=str, nargs='*')
args = my_parser.parse_args()
# Redirect standard output into a file(optional)
if args.OutputFileName != None:
if (args.OutputFileName.find('.txt') == -1):
args.OutputFileName += ".txt"
sys.stdout = open(args.OutputFileName, "w")
# Connect using iLO Amplifier Pack address, username and password
login_host = "https://" + args.iLOAmpAddress
login_account = args.Username
login_password = args.Password
## Create a REDFISH object
try:
REDFISH_OBJ = redfish.redfish_client(base_url=login_host, \
username=login_account, \
password=login_password, timeout=10, max_retry=3)
except:
print("Wrong iLO Amplifier Pack address/credential or iLO Amplifier Pack is not reachable\n")
exit()
# Login into iLO Amplifier Pack and create a session
try:
REDFISH_OBJ.login(auth="session")
except:
print("Invalid iLO Amplifier Pack credential\n")
exit()
updateServerList = list( dict.fromkeys(args.ServerAddress))
body = dict()
body["BaselineId"] = args.BaselineId
body["BatchSize"] = args.BatchSize
body["OperationStrategy"] = args.OperationStrategy
body["OperationType"] = args.OperationType
body["DowngradeFlag"] = True if args.DowngradeFlag == "True" else False
if args.OperationType == "iLORepositoryOffline":
body["ResetFlag"] = False
else:
body["ResetFlag"] = True if args.ResetFlag == "True" else False
body["CleanUpRepository"] = True if args.CleanUpRepository == "True" else False
body["SelectedSystemsManagerAddress"] = updateServerList
ManagerCredentials = dict()
ManagerCredentials["UserName"] = ""
ManagerCredentials["Password"]: ""
body["ManagerCredentials"] = ManagerCredentials
try:
response = REDFISH_OBJ.post("/redfish/v1/JobService/Actions/Oem/Hpe/HpeWfmJobServiceExt.ServerUpdateJobs", body=body)
except:
print("POST request to /redfish/v1/JobService/Actions/Oem/Hpe/HpeWfmJobServiceExt.ServerUpdateJobs\n")
REDFISH_OBJ.logout()
exit()
errMessage = ""
if response.status != 202:
sys.stdout.write("Unable to update gen 10 server\n")
sys.stdout.write("status code %s\n" % response.status)
try:
sys.stdout.write(
"ErrorType: %s\n" % response.dict["error"]["@Message.ExtendedInfo"][0]["MessageId"].split(".")[-1])
for errStr in response.dict["error"]["@Message.ExtendedInfo"][0]["MessageArgs"]:
errMessage += errStr + ", "
sys.stdout.write("errMessage: %s\n" % errMessage)
except:
pass
REDFISH_OBJ.logout()
exit()
jobCreatedLink = response.getheader("Location")
if jobCreatedLink == None:
jobCreatedLink = response.getheader("Link")
if jobCreatedLink == None:
jobCreatedLink = response.dict['error']['@Message.ExtendedInfo'][0]['MessageArgs'][0]
jobId = jobCreatedLink.split("/")[-1]
sys.stdout.write("Gen10 server update job has been created successfully\n")
sys.stdout.write("check job status using jobId %s\n" % jobId)
if args.OutputFileName != None:
sys.stdout.close()
# Logout of the current session
REDFISH_OBJ.logout()
| 40.210526 | 121 | 0.731301 |
4a2162dfc0c809aae4a8de9454565d0973b98879
| 738 |
py
|
Python
|
egta/envs/tragedy/tests/test_continuous.py
|
instadeepai/EGTA-NMARL
|
544b2e0e4b5518edefc6819975f9de4573ff434c
|
[
"MIT"
] | 6 |
2020-12-09T06:50:50.000Z
|
2022-01-29T19:15:06.000Z
|
egta/envs/tragedy/tests/test_continuous.py
|
instadeepai/EGTA-NMARL
|
544b2e0e4b5518edefc6819975f9de4573ff434c
|
[
"MIT"
] | null | null | null |
egta/envs/tragedy/tests/test_continuous.py
|
instadeepai/EGTA-NMARL
|
544b2e0e4b5518edefc6819975f9de4573ff434c
|
[
"MIT"
] | 2 |
2020-10-21T17:17:29.000Z
|
2021-02-14T11:20:07.000Z
|
import torch
from ..env import Continuous
def test_sampling():
test_low_high = [(0, 1), (-2, 1), (-23, 61), (200, 200.1)]
test_shapes = [(1,), (2,), (2,10), (100, 100)]
for shape in test_shapes:
for low, high in test_low_high:
temp_space = Continuous(low, high)
sample = temp_space.sample(shape)
# test that the sample shape is correct
assert tuple(sample.shape) == shape
# test that the sample values are within the correct range
assert torch.all(sample <= high) # according to the pytorch docs, this should be exclusive, however, I don't care about that here
assert torch.all(low <= sample)
# def test_device
# def test_dtype
| 32.086957 | 141 | 0.615176 |
4a216363174886bf8d55c1e6f952a72131f6b762
| 808 |
py
|
Python
|
banksystem/manage.py
|
somacode1/Building.RESTfulAPIs.with.Django.1.11
|
70d1144b675e0e34191164e8ff4d0d6ab638be27
|
[
"MIT"
] | 8 |
2019-02-01T14:10:45.000Z
|
2022-03-17T12:05:09.000Z
|
banksystem/manage.py
|
somacode1/Building.RESTfulAPIs.with.Django.1.11
|
70d1144b675e0e34191164e8ff4d0d6ab638be27
|
[
"MIT"
] | null | null | null |
banksystem/manage.py
|
somacode1/Building.RESTfulAPIs.with.Django.1.11
|
70d1144b675e0e34191164e8ff4d0d6ab638be27
|
[
"MIT"
] | 7 |
2019-02-01T14:12:17.000Z
|
2022-03-06T20:57:43.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "banksystem.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.130435 | 77 | 0.643564 |
4a21645cd9d9b48890921ca06194e136d9b7a7ff
| 3,105 |
py
|
Python
|
visualiser/generators/okitResourceManagerGenerator.py
|
bnasslahsen/oci-designer-toolkit
|
63343e8ecb17a0fe6ebb3c5b18e7c5607dd5c4bf
|
[
"UPL-1.0",
"Apache-2.0"
] | null | null | null |
visualiser/generators/okitResourceManagerGenerator.py
|
bnasslahsen/oci-designer-toolkit
|
63343e8ecb17a0fe6ebb3c5b18e7c5607dd5c4bf
|
[
"UPL-1.0",
"Apache-2.0"
] | null | null | null |
visualiser/generators/okitResourceManagerGenerator.py
|
bnasslahsen/oci-designer-toolkit
|
63343e8ecb17a0fe6ebb3c5b18e7c5607dd5c4bf
|
[
"UPL-1.0",
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = ["Andrew Hopkinson (Oracle Cloud Solutions A-Team)"]
__version__ = "1.0.0"
__module__ = "ociTerraform11Generator"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import os
from common.okitCommon import writeTerraformFile
from common.okitLogging import getLogger
from generators.okitTerraformGenerator import OCITerraformGenerator
# Configure logging
logger = getLogger()
class OCIResourceManagerGenerator(OCITerraformGenerator):
def __init__(self, template_root, output_root, visualiser_json, use_vars=False, tenancy_ocid=None, region=None, compartment_ocid=None):
DIRECTORY_SUFFIX = 'resource-manager'
super(OCIResourceManagerGenerator, self).__init__(template_root, output_root, visualiser_json, use_vars)
self.output_dir = os.path.join(output_root, DIRECTORY_SUFFIX)
logger.info('OCIResourceManagerGenerator : Template Directory {0!s:s}'.format(template_root))
logger.info('OCIResourceManagerGenerator : Output Directory {0!s:s}'.format(output_root))
# Check output directory
self.getCheckOutputDirectory()
self.resource_manager_keys = {}
self.resource_manager_keys['tenancy_ocid'] = tenancy_ocid
self.resource_manager_keys['region'] = region
self.resource_manager_keys['compartment_ocid'] = compartment_ocid
logger.info('Resource Manager Keys : {0!s:s}'.format(self.resource_manager_keys))
def initialiseJinja2Variables(self):
super(OCIResourceManagerGenerator, self).initialiseJinja2Variables()
self.jinja2_variables["resource_manager"] = True
def writeFiles(self):
main_rendered = self.getRenderedMain()
# Write Main tf processing file
# Remove Provider entry because this is not required for Resource Manager
writeTerraformFile(os.path.join(self.output_dir, self.MAIN_FILE_NAME), main_rendered[1:])
# Write Variable files
variable_definitions = []
# Delete Provider Variables
del self.run_variables['user_ocid']
del self.run_variables['private_key_path']
del self.run_variables['fingerprint']
# Specify Values for Resource Manager Connection
self.run_variables['tenancy_ocid'] = self.resource_manager_keys['tenancy_ocid']
self.run_variables['region'] = self.resource_manager_keys['region']
self.run_variables['compartment_ocid'] = self.resource_manager_keys['compartment_ocid']
for key, value in self.getVariables().items():
# Convert to string
self.run_variables[key] = str(value)
variable_definitions.append('variable "{0!s:s}" {{}}'.format(key))
writeTerraformFile(os.path.join(self.output_dir, self.VARIABLES_FILE_NAME), variable_definitions)
return
| 47.045455 | 139 | 0.687923 |
4a216553a94601f2bdd46a09246772c627fc9caf
| 2,478 |
py
|
Python
|
octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py
|
zhangi/octavia
|
e68c851fecf55e1b5ffe7d5b849f729626af28a3
|
[
"Apache-2.0"
] | 129 |
2015-06-23T08:06:23.000Z
|
2022-03-31T12:38:20.000Z
|
octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py
|
zhangi/octavia
|
e68c851fecf55e1b5ffe7d5b849f729626af28a3
|
[
"Apache-2.0"
] | 6 |
2016-05-20T11:05:27.000Z
|
2021-03-23T06:05:52.000Z
|
octavia/tests/unit/controller/worker/v2/flows/test_l7policy_flows.py
|
zhangi/octavia
|
e68c851fecf55e1b5ffe7d5b849f729626af28a3
|
[
"Apache-2.0"
] | 166 |
2015-07-15T16:24:05.000Z
|
2022-03-02T20:54:36.000Z
|
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.controller.worker.v2.flows import l7policy_flows
import octavia.tests.unit.base as base
class TestL7PolicyFlows(base.TestCase):
def setUp(self):
self.L7PolicyFlow = l7policy_flows.L7PolicyFlows()
super().setUp()
def test_get_create_l7policy_flow(self):
l7policy_flow = self.L7PolicyFlow.get_create_l7policy_flow()
self.assertIsInstance(l7policy_flow, flow.Flow)
self.assertIn(constants.LISTENERS, l7policy_flow.requires)
self.assertIn(constants.L7POLICY, l7policy_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, l7policy_flow.requires)
self.assertEqual(3, len(l7policy_flow.requires))
self.assertEqual(0, len(l7policy_flow.provides))
def test_get_delete_l7policy_flow(self):
l7policy_flow = self.L7PolicyFlow.get_delete_l7policy_flow()
self.assertIsInstance(l7policy_flow, flow.Flow)
self.assertIn(constants.LISTENERS, l7policy_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, l7policy_flow.requires)
self.assertIn(constants.L7POLICY, l7policy_flow.requires)
self.assertEqual(3, len(l7policy_flow.requires))
self.assertEqual(0, len(l7policy_flow.provides))
def test_get_update_l7policy_flow(self):
l7policy_flow = self.L7PolicyFlow.get_update_l7policy_flow()
self.assertIsInstance(l7policy_flow, flow.Flow)
self.assertIn(constants.L7POLICY, l7policy_flow.requires)
self.assertIn(constants.LISTENERS, l7policy_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, l7policy_flow.requires)
self.assertIn(constants.UPDATE_DICT, l7policy_flow.requires)
self.assertEqual(4, len(l7policy_flow.requires))
self.assertEqual(0, len(l7policy_flow.provides))
| 35.913043 | 75 | 0.752623 |
4a21655643a41c6228fffbdac99c678c1969e4cf
| 3,679 |
py
|
Python
|
jonathan_steward_automation/jonathan_steward_automation/JuniperConfig.py
|
gjonathansteward/FYP-network-automation
|
f8c43bd5f4e142e655248e26e497be2d383459db
|
[
"MIT"
] | 1 |
2018-05-11T12:54:16.000Z
|
2018-05-11T12:54:16.000Z
|
jonathan_steward_automation/jonathan_steward_automation/JuniperConfig.py
|
gjonathansteward/FYP-network-automation
|
f8c43bd5f4e142e655248e26e497be2d383459db
|
[
"MIT"
] | null | null | null |
jonathan_steward_automation/jonathan_steward_automation/JuniperConfig.py
|
gjonathansteward/FYP-network-automation
|
f8c43bd5f4e142e655248e26e497be2d383459db
|
[
"MIT"
] | null | null | null |
from jnpr.junos import Device
from jnpr.junos.utils.config import Config
from jnpr.junos.exception import ConnectError
from jnpr.junos.exception import ConfigLoadError
from jnpr.junos.exception import CommitError
from lxml.etree import XMLSyntaxError
import time
from common_tasks import print_error
def Juniper_connect(device_object):
"""
Author - Jonathan Steward
Function - Create a connnection to a juniper device
Inputs - device_object - Device object
returns - device connection object
"""
print "{}: connecting".format(time.strftime('%Y-%m-%d %H:%M:%S'))
try:
device_con = Device(
host=device_object.ip,
user=device_object.username,
passwd=device_object.password).open()
except ConnectError as error:
print_error("{}: there was an issue connecting: {}".format(time.strftime('%Y-%m-%d %H:%M:%S'), error))
return
return device_con
def Juniper_config(configuration, device_object, config_format='xml'):
"""
Author - Jonathan Steward
Function - use device connection object and connect to load configuration
Inputs -
configuration - string of what configuration you want to apply
device_object - device object containing all the device
config_format - the format of the configuration, default is xml but can also be set commands
returns - bool - state of if the config operation worked or not.
"""
device_con = Juniper_connect(device_object)
if not device_con:
return False
try:
with Config(device_con, mode='private') as connection:
print "{}: connected, loading".format(time.strftime('%Y-%m-%d %H:%M:%S'))
try:
connection.load(configuration, format=config_format, overwrite=True)
except ConfigLoadError as error:
print_error("there was an issue loading configuration:\n{}".format(error))
return False
except XMLSyntaxError as error:
print_error("there was a syntax error:\n{}".format(error))
return False
except:
print_error("Something went wrong".format(error))
return False
print "{}: loaded, commiting the following change:".format(time.strftime('%Y-%m-%d %H:%M:%S'))
diff = connection.pdiff()
print diff
try:
connection.commit(comment='Adding description via api with xml file')
except CommitError as error:
print_error("There was an issue with the commit!{}".format(error))
return False
print "{}: commit complete".format(time.strftime('%Y-%m-%d %H:%M:%S'))
device_con.close()
except:
print_error("Something went wrong applying config")
return True
"""
Commented out as used within phase 1
def main():
device_details = Device_object('192.168.0.2', 'admin', 'cisco12345', '', 'juniper')
configuration = """
"""
extra comment block
<configuration>
<interfaces>
<interface>
<name>ge-0/0/4</name>
<description>DESCRIPTION SET VIA XML FILE AND SCRIPT at {}</description>
</interface>
</interfaces>
</configuration>
"""
""".format(time.strftime('%Y-%m-%d %H:%M:%S'))
# configuration = "This shouldn't work"
state = Juniper_config(configuration, device_details)
if not state:
print_error("Issue with Configuring quitting")
sys.exit()
print "checking Descriptions VIA SNMP"
GrabIntToDesc(device_details.ip)
if __name__ == "__main__":
main()
"""
| 36.425743 | 110 | 0.633052 |
4a216579c028a5325d227c81b723172b4be81da4
| 277 |
py
|
Python
|
settings_files/create_jobs/gamess_dft.py
|
tommason14/monash2018
|
8fc2de97172130ed5d532deb6f5bcca39ef3a6e3
|
[
"MIT"
] | 7 |
2020-06-05T01:55:09.000Z
|
2021-12-20T19:32:36.000Z
|
settings_files/create_jobs/gamess_dft.py
|
tommason14/monash2018
|
8fc2de97172130ed5d532deb6f5bcca39ef3a6e3
|
[
"MIT"
] | null | null | null |
settings_files/create_jobs/gamess_dft.py
|
tommason14/monash2018
|
8fc2de97172130ed5d532deb6f5bcca39ef3a6e3
|
[
"MIT"
] | 8 |
2020-06-06T10:03:17.000Z
|
2022-03-18T14:47:33.000Z
|
from autochem import Settings, GamessJob
import glob
xyz=glob.glob('*xyz')[0]
sett=Settings()
sett.input.mp2=None
sett.input.contrl.mplevl=None
sett.input.contrl.dfttyp='m062x'
sett.input.dft.method='grid'
GamessJob(using=xyz, fmo=True, frags_in_subdir=True, settings=sett)
| 21.307692 | 67 | 0.779783 |
4a2165f141335972539daba6c4714d2481c4c7b6
| 1,030 |
py
|
Python
|
python27/win32/Lib/site-packages/oss2/__init__.py
|
aliyun/oss-ftp
|
1670b67b6ce726314ca0081841567934435128d4
|
[
"MIT"
] | 69 |
2015-12-28T07:02:51.000Z
|
2022-03-31T13:36:42.000Z
|
python36/unix/lib/oss2/__init__.py
|
aliyun/oss-ftp
|
1670b67b6ce726314ca0081841567934435128d4
|
[
"MIT"
] | 23 |
2016-03-04T10:43:24.000Z
|
2021-03-17T09:58:19.000Z
|
python27/win32/Lib/site-packages/oss2/__init__.py
|
aliyun/oss-ftp
|
1670b67b6ce726314ca0081841567934435128d4
|
[
"MIT"
] | 24 |
2016-02-29T11:45:47.000Z
|
2021-12-24T08:41:37.000Z
|
__version__ = '2.1.1'
from . import models, exceptions
from .api import Service, Bucket
from .auth import Auth, AnonymousAuth, StsAuth
from .http import Session, CaseInsensitiveDict
from .iterators import (BucketIterator, ObjectIterator,
MultipartUploadIterator, ObjectUploadIterator, PartIterator)
from .resumable import resumable_upload, resumable_download, ResumableStore, ResumableDownloadStore, determine_part_size
from .resumable import make_upload_store, make_download_store
from .compat import to_bytes, to_string, to_unicode, urlparse, urlquote, urlunquote
from .utils import SizedFileAdapter, make_progress_adapter
from .utils import content_type_by_name, is_valid_bucket_name
from .utils import http_date, http_to_unixtime, iso8601_to_unixtime, date_to_iso8601, iso8601_to_date
from .models import BUCKET_ACL_PRIVATE, BUCKET_ACL_PUBLIC_READ, BUCKET_ACL_PUBLIC_READ_WRITE
from .models import OBJECT_ACL_DEFAULT, OBJECT_ACL_PRIVATE, OBJECT_ACL_PUBLIC_READ, OBJECT_ACL_PUBLIC_READ_WRITE
| 38.148148 | 120 | 0.834951 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.