content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import QueueLinkedList as queue
"""
n1
/\
n2 n3
/\ /\
n4 n5 n6 n7
"""
class BinaryTree:
def __init__(self, size) -> None:
self.customList = size * [None]
self.lastUsedIndex = 0
self.maxSize = size
def inserNode(self, value):
if self.lastUsedIndex + 1 == self.maxSize:
return "Full"
self.customList[self.lastUsedIndex + 1] = value
self.lastUsedIndex += 1
return "Inserted"
def searchNode(self, value):
if value in self.customList:
return "Success"
return "Not found"
def preOrderTraversal(self, index):
# root -> left -> right
if index > self.lastUsedIndex:
return
print(self.customList[index])
self.preOrderTraversal(index * 2)
self.preOrderTraversal(index * 2 + 1)
def inOrderTraversal(self, index):
# left -> root -> right
if index > self.lastUsedIndex:
return
self.inOrderTraversal(index * 2)
print(self.customList[index])
self.inOrderTraversal(index * 2 + 1)
def postOrderTraversal(self, index):
# left -> right -> root
if index > self.lastUsedIndex:
return
self.postOrderTraversal(index * 2)
self.postOrderTraversal(index * 2 + 1)
print(self.customList[index])
def levelOrderTraversal(self, index):
for i in range(index, self.lastUsedIndex + 1):
print(self.customList[i])
def deleteNode(self, value):
if self.lastUsedIndex == 0:
return "Nothing to delete"
for i in range(1, self.lastUsedIndex + 1):
if self.customList[i] == value:
self.customList[i] = self.customList[self.lastUsedIndex]
self.customList[self.lastUsedIndex] = None
self.lastUsedIndex -= 1
return "Deleted"
def deleteTree(self):
self.customList = None
return "Deleted"
newBT = BinaryTree(8)
print(newBT.inserNode("N1"))
print(newBT.inserNode("N2"))
print(newBT.inserNode("N3"))
print(newBT.inserNode("N4"))
print(newBT.inserNode("N5"))
print(newBT.inserNode("N6"))
print(newBT.inserNode("N7"))
print(newBT.inserNode("N8"))
print(newBT.searchNode("N1"))
print(newBT.searchNode("N8"))
print("preOrderTraversal")
newBT.preOrderTraversal(1)
print("inOrderTraversal")
newBT.inOrderTraversal(1)
print("postOrderTraversal")
newBT.postOrderTraversal(1)
print("levelOrderTraversal")
newBT.levelOrderTraversal(1)
print(newBT.deleteNode("N4"))
newBT.levelOrderTraversal(1)
print(newBT.deleteTree())
| 25.705882 | 72 | 0.621663 | [
"MIT"
] | eferroni/Data-Structure-and-Algorithms | Trees/BinaryTreePL.py | 2,622 | Python |
"""hackernews URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 35.136364 | 78 | 0.690815 | [
"MIT"
] | Saltiest-Hacker-News-Trolls-2/DS | hackernews/hackernews/urls.py | 773 | Python |
import numpy as np
import cv2 as cv
import math
from server.cv_utils import *
def filterGaussian(img,size=(5,5),stdv=0):
"""Summary of filterGaussian
This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth
the image to lower the sensitivity to noise. (The smaller the size the less visible the blur)
To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will
set each pixel value equal to the weighted average of its neighboor pixels
The Gaussian distribution:
Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2))
i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1)
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if not isinstance(size,tuple):
raise ValueError('filterGaussian: Size for Gaussian filter not tuple')
return cv.GaussianBlur(img,size,stdv)
def filterCanny(img,min_val=50,max_val=150,size=(5,5),stdv=0):
"""
The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection,
which will reduce complexity of the image much further.
The algorithm will detect sharp changes in luminosity and will define them as edges.
The algorithm has the following stages:
- Noise reduction
- Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal
- Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it
- Hysteresis thresholding
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if min_val >= max_val:
raise ValueError('filterCanny: Value order incorrect')
gray_scale = toGrayScale(img)
#cv.imshow('Gray Scale image',gray_scale)
gaussian = filterGaussian(gray_scale,size=size,stdv=stdv)
#cv.imshow('Gaussian filter',gaussian)
return cv.Canny(gaussian,min_val,max_val)
def segmentRegionOfInterest(img):
height = img.shape[0]
polygons = np.array([
[(200, height), (1100, height), (550, 250)]
])
mask = np.zeros_like(img)
# Fill poly-function deals with multiple polygon
cv.fillPoly(mask, polygons, 255)
# Bitwise operation between canny image and mask image
masked_image = cv.bitwise_and(img, mask)
return masked_image
def houghFilter(frame,distance_resolution=2,angle_resolution=np.pi/180,min_n_intersections=50,min_line_size=30,max_line_gap=5):
"""
Params:
frame
distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision
angle_resolution: angle of accumulator in radians, larger ==> less precision
min_n_intersections: minimum number of intersections
min_line_size: minimum length of line in pixels
max_line_gap: maximum distance in pixels between disconnected lines
"""
placeholder = np.array([])
hough = cv.HoughLinesP(frame,distance_resolution,angle_resolution,min_n_intersections,placeholder,min_line_size,max_line_gap)
return hough
def calculateLines(img,lines):
"""
Combines line segments into one or two lanes
Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)
"""
def calculateCoordinates(img,line_params):
"""
Calculates the coordinates for a road lane
"""
#y = m*x +b, m= slope, b=intercept
height, width, _ = img.shape
m, b = line_params
y1 = height
y2 = int(y1 * (1/2)) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - b) / m)))
x2 = max(-width, min(2 * width, int((y2 - b) / m)))
return np.array([x1,y1, x2,y2])
lane_lines = []
if lines is None:
return np.array(lane_lines)
height, width, _ = img.shape
left_lines, right_lines = [], []
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line in lines:
x1,y1, x2,y2 = line.reshape(4)
if x1 == x2:
#Vertical line
continue
#Fit a polynomial to the points to get the slope and intercept
line_params = np.polyfit((x1,x2), (y1,y2), 1)
slope,intercept = line_params[0], line_params[1]
if slope < 0: #left side
if x1 < left_region_boundary and x2 < left_region_boundary:
left_lines.append((slope,intercept))
else: #right
if x1 > right_region_boundary and x2 > right_region_boundary:
right_lines.append((slope,intercept))
left_lines_avg = np.average(left_lines,axis=0)
right_lines_avg = np.average(right_lines,axis=0)
if len(left_lines) > 0:
left_line = calculateCoordinates(img,left_lines_avg)
lane_lines.append(left_line)
if len(right_lines) > 0:
right_line = calculateCoordinates(img,right_lines_avg)
lane_lines.append(right_line)
return np.array(lane_lines)
def showMidLine(img,steering_angle,color=(0, 255, 0),thickness=5):
line_image = np.zeros_like(img)
height, width, _ = img.shape
# Note: the steering angle of:
# 0-89 degree: turn left
# 90 degree: going straight
# 91-180 degree: turn right
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv.line(line_image, (x1, y1), (x2, y2), color, thickness)
return line_image
def showLines(img,lines,color=(255,0,0),thickness=5):
line_img = np.zeros(img.shape, dtype=np.uint8)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv.line(line_img, (x1,y1), (x2,y2), color, thickness)
return line_img
def calculateSteeringAngle(img,lines):
if len(lines) == 0:
return -90
height, width, _ = img.shape
if len(lines) == 1:
x1, _, x2, _ = lines[0]
x_offset = x2 - x1
else: #2 lines
_, _, left_x2, _ = lines[0]
_, _, right_x2, _ = lines[1]
camera_mid_offset_percent = 0.0 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
# find the steering angle, which is angle between navigation direction to end of center line
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line
steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel
return steering_angle
def stabilizeSteeringAngle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=2, max_angle_deviation_one_lane=1):
"""
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
"""
if num_of_lane_lines == 1:
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
else:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
return stabilized_steering_angle
| 36.149123 | 152 | 0.665372 | [
"BSD-3-Clause"
] | eamorgado/Car-Self-driving-Simulator | CarlaDriving/server/lane_detection/utils.py | 8,242 | Python |
"""
A small utility aiming to create programatically sound.
"""
from __future__ import annotations
from importlib import metadata
__version__ = metadata.version("sarada")
| 19.222222 | 55 | 0.791908 | [
"MIT"
] | wikii122/sarada | sarada/__init__.py | 173 | Python |
import imp
from tkinter import *
from sys import exit
from teste.testeCores.corFunc import formatar
conta2x2 = 'x2y=5\n3x-5y=4'
root = Tk()
text = Text(root, width=20, height=10)
text.config(font='arial 20 bold')
text.insert(END, conta2x2)
text.pack()
def q_evento(event):
exit()
root.bind('q', q_evento)
cs = conta2x2.split('\n')
print('cs', cs)
posicao = cs[0].find('y')
print('posicao:', posicao)
p1 = p2 = '1.'
p1 += str(posicao)
p2 += str(posicao+1)
print('p1:', p1, 'p2:', p2)
conta = conta2x2.split('\n')
formatado = list()
text.config(background='black', foreground='white')
for i, c in enumerate(conta):
formatado.append(formatar(i, c))
fs = formatado[0][0]
print(fs)
print(fs['p1'])
for f1 in formatado:
for f in f1:
text.tag_add(f['nome'], f['p1'], f['p2'])
text.tag_config(f['nome'], foreground=f['fg'])
# text.tag_add("y1", p1, p2)
# text.tag_config("y1", background="black", foreground="green")
text.tag_config('1', foreground="green")
root.mainloop() | 21.76087 | 63 | 0.656344 | [
"MIT"
] | jonasht/python | 06-sistemaLinear/sistemaLinear_v11/teste/testeCores/cores2.py | 1,001 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import random
import signal
import sys
import threading
from cotyledon import _utils
LOG = logging.getLogger(__name__)
class Service(object):
"""Base class for a service
This class will be executed in a new child process/worker
:py:class:`ServiceWorker` of a :py:class:`ServiceManager`. It registers
signals to manager the reloading and the ending of the process.
Methods :py:meth:`run`, :py:meth:`terminate` and :py:meth:`reload` are
optional.
"""
name = None
"""Service name used in the process title and the log messages in additionnal
of the worker_id."""
graceful_shutdown_timeout = None
"""Timeout after which a gracefully shutdown service will exit. zero means
endless wait. None means same as ServiceManager that launch the service"""
def __init__(self, worker_id):
"""Create a new Service
:param worker_id: the identifier of this service instance
:type worker_id: int
The identifier of the worker can be used for workload repartition
because it's consistent and always the same.
For example, if the number of workers for this service is 3,
one will got 0, the second got 1 and the last got 2.
if worker_id 1 died, the new spawned process will got 1 again.
"""
super(Service, self).__init__()
self._initialize(worker_id)
def _initialize(self, worker_id):
if getattr(self, '_initialized', False):
return
self._initialized = True
if self.name is None:
self.name = self.__class__.__name__
self.worker_id = worker_id
self.pid = os.getpid()
self._signal_lock = threading.Lock()
# Only used by oslo_config_glue for now, so we don't need
# to have a list of hook
self._on_reload_internal_hook = self._noop_hook
def _noop_hook(self, service):
pass
def terminate(self):
"""Gracefully shutdown the service
This method will be executed when the Service has to shutdown cleanly.
If not implemented the process will just end with status 0.
To customize the exit code, the :py:class:`SystemExit` exception can be
used.
Any exceptions raised by this method will be logged and the worker will
exit with status 1.
"""
def reload(self):
"""Reloading of the service
This method will be executed when the Service receives a SIGHUP.
If not implemented the process will just end with status 0 and
:py:class:`ServiceRunner` will start a new fresh process for this
service with the same worker_id.
Any exceptions raised by this method will be logged and the worker will
exit with status 1.
"""
os.kill(os.getpid(), signal.SIGTERM)
def run(self):
"""Method representing the service activity
If not implemented the process will just wait to receive an ending
signal.
This method is ran into the thread and can block or return as needed
Any exceptions raised by this method will be logged and the worker will
exit with status 1.
"""
# Helper to run application methods in a safety way when signal are
# received
def _reload(self):
with _utils.exit_on_exception():
if self._signal_lock.acquire(False):
try:
self._on_reload_internal_hook(self)
self.reload()
finally:
self._signal_lock.release()
def _terminate(self):
with _utils.exit_on_exception(), self._signal_lock:
self.terminate()
sys.exit(0)
def _run(self):
with _utils.exit_on_exception():
self.run()
class ServiceConfig(object):
def __init__(self, service_id, service, workers, args, kwargs):
self.service = service
self.workers = workers
self.args = args
self.kwargs = kwargs
self.service_id = service_id
class ServiceWorker(_utils.SignalManager):
"""Service Worker Wrapper
This represents the child process spawned by ServiceManager
All methods implemented here, must run in the main threads
"""
@classmethod
def create_and_wait(cls, *args, **kwargs):
sw = cls(*args, **kwargs)
sw.wait_forever()
def __init__(self, config, service_id, worker_id, parent_pipe,
started_hooks, graceful_shutdown_timeout):
super(ServiceWorker, self).__init__()
self._ready = threading.Event()
_utils.spawn(self._watch_parent_process, parent_pipe)
# Reseed random number generator
random.seed()
args = tuple() if config.args is None else config.args
kwargs = dict() if config.kwargs is None else config.kwargs
self.service = config.service(worker_id, *args, **kwargs)
self.service._initialize(worker_id)
if self.service.graceful_shutdown_timeout is None:
self.service.graceful_shutdown_timeout = graceful_shutdown_timeout
self.title = "%(name)s(%(worker_id)d) [%(pid)d]" % dict(
name=self.service.name, worker_id=worker_id, pid=os.getpid())
# Set process title
_utils.setproctitle(
"%(pname)s: %(name)s worker(%(worker_id)d)" % dict(
pname=_utils.get_process_name(), name=self.service.name,
worker_id=worker_id))
# We are ready tell them
self._ready.set()
_utils.run_hooks('new_worker', started_hooks, service_id, worker_id,
self.service)
def _watch_parent_process(self, parent_pipe):
# This will block until the write end is closed when the parent
# dies unexpectedly
parent_pipe[1].close()
try:
parent_pipe[0].recv()
except EOFError:
pass
if self._ready.is_set():
LOG.info('Parent process has died unexpectedly, %s exiting'
% self.title)
if os.name == "posix":
os.kill(os.getpid(), signal.SIGTERM)
else:
# Fallback to process signal later
self._signals_received.appendleft(signal.SIGTERM)
else:
os._exit(0)
def _alarm(self):
LOG.info('Graceful shutdown timeout (%d) exceeded, '
'exiting %s now.' %
(self.service.graceful_shutdown_timeout,
self.title))
os._exit(1)
def _on_signal_received(self, sig):
# Code below must not block to return to select.select() and catch
# next signals
if sig == _utils.SIGALRM:
self._alarm()
elif sig == signal.SIGTERM:
LOG.info('Caught SIGTERM signal, '
'graceful exiting of service %s' % self.title)
if self.service.graceful_shutdown_timeout > 0:
if os.name == "posix":
signal.alarm(self.service.graceful_shutdown_timeout)
else:
threading.Timer(self.service.graceful_shutdown_timeout,
self._alarm).start()
_utils.spawn(self.service._terminate)
elif sig == _utils.SIGHUP:
_utils.spawn(self.service._reload)
def wait_forever(self):
LOG.debug("Run service %s" % self.title)
_utils.spawn(self.service._run)
super(ServiceWorker, self)._wait_forever()
| 33.303279 | 81 | 0.629461 | [
"Apache-2.0"
] | 1upon0/cotyledon | cotyledon/_service.py | 8,126 | Python |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def GetPrettyPrintErrors(input_api, output_api, cwd, rel_path, results):
"""Runs pretty-print command for specified file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'pretty_print.py', rel_path, '--presubmit',
'--non-interactive'], cwd=cwd)
if exit_code != 0:
error_msg = (
'%s is not formatted correctly; run git cl format to fix.' % rel_path)
results.append(output_api.PresubmitError(error_msg))
def GetPrefixErrors(input_api, output_api, cwd, rel_path, results):
"""Validates histogram prefixes in specified file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_prefix.py', rel_path], cwd=cwd)
if exit_code != 0:
error_msg = ('%s contains histogram(s) with disallowed prefix, please run '
'validate_prefix.py %s to fix.' % (rel_path, rel_path))
results.append(output_api.PresubmitError(error_msg))
def GetObsoleteXmlErrors(input_api, output_api, cwd, results):
"""Validates all histograms in the file are obsolete."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_obsolete_histograms.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'histograms_xml/obsolete_histograms.xml contains non-obsolete '
'histograms, please run validate_obsolete_histograms.py to fix.')
results.append(output_api.PresubmitError(error_msg))
def GetValidateHistogramsError(input_api, output_api, cwd, results):
"""Validates histograms format and index file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_format.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms are not well-formatted; please run %s/validate_format.py '
'and fix the reported errors.' % cwd)
results.append(output_api.PresubmitError(error_msg))
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_histograms_index.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms index file is not up-to-date. Please run '
'%s/histogram_paths.py to update it' % cwd)
results.append(output_api.PresubmitError(error_msg))
def ValidateSingleFile(input_api, output_api, file_obj, cwd, results):
"""Does corresponding validations if histograms.xml or enums.xml is changed.
Args:
input_api: An input_api instance that contains information about changes.
output_api: An output_api instance to create results of the PRESUBMIT check.
file_obj: A file object of one of the changed files.
cwd: Path to current working directory.
results: The returned variable which is a list of output_api results.
Returns:
A boolean that True if a histograms.xml or enums.xml file is changed.
"""
p = file_obj.AbsoluteLocalPath()
# Only do PRESUBMIT checks when |p| is under |cwd|.
if input_api.os_path.commonprefix([p, cwd]) != cwd:
return False
filepath = input_api.os_path.relpath(p, cwd)
if 'test_data' in filepath:
return False
# If the changed file is obsolete_histograms.xml, validate all histograms
# inside are obsolete.
if 'obsolete_histograms.xml' in filepath:
GetObsoleteXmlErrors(input_api, output_api, cwd, results)
# Return false here because we don't need to validate format if users only
# change obsolete_histograms.xml.
return False
# If the changed file is histograms.xml or histogram_suffixes_list.xml,
# pretty-print and validate prefix it.
elif ('histograms.xml' in filepath
or 'histogram_suffixes_list.xml' in filepath):
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
# TODO(crbug/1120229): Re-enable validate prefix check once all histograms
# are split.
# GetPrefixErrors(input_api, output_api, cwd, filepath, results)
return True
# If the changed file is enums.xml, pretty-print it.
elif 'enums.xml' in filepath:
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
return True
return False
def CheckChange(input_api, output_api):
"""Checks that histograms.xml is pretty-printed and well-formatted."""
results = []
cwd = input_api.PresubmitLocalPath()
xml_changed = False
# Only for changed files, do corresponding checks if the file is
# histograms.xml, enums.xml or obsolete_histograms.xml.
for file_obj in input_api.AffectedTextFiles():
is_changed = ValidateSingleFile(
input_api, output_api, file_obj, cwd, results)
xml_changed = xml_changed or is_changed
# Run validate_format.py and validate_histograms_index.py, if changed files
# contain histograms.xml or enums.xml.
if xml_changed:
GetValidateHistogramsError(input_api, output_api, cwd, results)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
| 37.15493 | 80 | 0.736164 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | Ron423c/chromium | tools/metrics/histograms/PRESUBMIT.py | 5,276 | Python |
import config
from twitter import *
def main():
t = Twitter(
auth=OAuth(config.TW_TOKEN, config.TW_TOKEN_SECRET, config.TW_CONSUMER_KEY, config.TW_CONSUMER_SECRET))
# Post a message
msg = 'テスト投稿ですm(_ _)m'
t.statuses.update(status=msg)
if __name__ == '__main__':
main()
| 19.375 | 111 | 0.66129 | [
"MIT"
] | maroemon58/twitter-bot | main.py | 324 | Python |
import cv2
import pose_detection as pose_d
pose_model = pose_d.load_pose_model('pre_trained\AFLW2000.pkl')
def detect_face(img_PATH, model_PATH):
# Load the cascade
face_cascade = cv2.CascadeClassifier(model_PATH)
# Read the input image
img = cv2.imread(img_PATH)
# Convert into grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
if len(faces) > 1:
print('Multiple faces detected')
return False
elif len(faces) < 1:
print('No faces detected')
return False
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display the output
#cv2_imshow(img)
cv2.waitKey()
return True # TO DO may want to return face at some point as well
def detect_face_video(pose_model):
# Load the cascade
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# To capture video from webcam.
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Get pose estimate
yaw, pitch, roll = pose_d.run_pose_detection(pose_model, pose_d.load_img(img))
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
#draw pose label
img = pose_d.draw_labels(yaw, pitch, roll, img)
# Display
cv2.imshow('img', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k==27:
break
# Release the VideoCapture object
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
detect_face_video(pose_model) | 31.875 | 103 | 0.645588 | [
"MIT"
] | HDWilliams/User_Verification_HPE | face_detection_cv2.py | 2,040 | Python |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the isBalanced function below.
def isBalanced(s):
left_symbol = [ '{', '[', '(']
right_symbol = [ '}', ']', ')']
# fast checking of symbol counting equality
for i in range(3):
left_count = s.count( left_symbol[i] )
right_count = s.count( right_symbol[i] )
if left_count != right_count:
return "NO"
_stack = []
for i in range( len(s) ):
char = s[i]
if char in { '{', '[', '(' } :
# push into stack
_stack.append( char )
if char in { '}', ']', ')' } :
# pop from stack and compare with left symbol
index_of_right = right_symbol.index( char )
index_of_left = left_symbol.index( _stack.pop(-1) )
if index_of_left == index_of_right:
# match of {}, [], or ()
pass
else:
return "NO"
if len(_stack) == 0:
return "YES"
else:
return "NO"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
s = input()
result = isBalanced(s)
fptr.write(result + '\n')
fptr.close()
| 17.702703 | 63 | 0.499237 | [
"MIT"
] | brianchiang-tw/HackerRank | Data Structures/Stack/Balanced Bracket/balanced_bracket.py | 1,310 | Python |
import unittest
import attr
import numpy as np
from robogym.randomization.env import (
EnvActionRandomizer,
EnvObservationRandomizer,
EnvParameterRandomizer,
EnvRandomization,
EnvSimulationRandomizer,
build_randomizable_param,
)
from robogym.randomization.observation import ObservationRandomizer
from robogym.randomization.parameters import FloatRandomizerParameter
class DummyRandomizerParameter(FloatRandomizerParameter):
def __init__(self, name, val):
super().__init__(
name, val, value_range=(-1.0, 1.0), delta=1.0,
)
@attr.s(auto_attribs=True)
class DummyNestedEnvParameter:
c: int = build_randomizable_param(1, low=-3, high=3)
@attr.s(auto_attribs=True)
class DummyEnvParameter:
a: int = build_randomizable_param(0, low=-5, high=5)
b: float = build_randomizable_param(0.0, low=-1.0, high=1.0)
x: int = 0 # Non randomizable parameter.
nested: DummyNestedEnvParameter = DummyNestedEnvParameter()
class DummyObservationRandomizer(ObservationRandomizer):
def __init__(self, name, val):
super().__init__(name)
self.val = self.register_parameter(val)
def _randomize(self, target, random_state):
target[self.val.name] = self.val.get_value()
return target
class TestRandomization(unittest.TestCase):
def setUp(self):
super().setUp()
self.random_state = np.random.RandomState()
def test_randomizer_parameters(self):
parameter = DummyRandomizerParameter("foo", 0.0)
assert parameter.get_value() == 0.0
assert parameter.get_range() == (-1.0, 1.0)
assert parameter.get_delta() == 1.0
parameter.set_value(1.0)
assert parameter.get_value() == 1.0
def test_randomizer_basic(self):
"""
Test functionality of basic randomizer.
"""
randomizer = EnvParameterRandomizer(DummyEnvParameter())
assert len(randomizer.get_parameters()) == 3
# Make sure register duplicate parameter is not allowed.
with self.assertRaises(AssertionError):
randomizer.register_parameter(DummyRandomizerParameter("a", 1))
randomizer.register_parameter(DummyRandomizerParameter("d", 1))
assert len(randomizer.get_parameters()) == 4
randomizer.get_parameter("a").set_value(1)
randomizer.get_parameter("b").set_value(0.5)
randomizer.get_parameter("c").set_value(2)
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
assert parameters.a == 1
assert parameters.b == 0.5
assert parameters.nested.c == 2
randomizer.disable()
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
randomizer.get_parameter("a").set_value(1)
assert parameters.a == 0
def test_observation_randomizer(self):
randomizer = EnvObservationRandomizer(
[
DummyObservationRandomizer("r1", DummyRandomizerParameter("foo", 0.0)),
DummyObservationRandomizer("r2", DummyRandomizerParameter("bar", 1.0)),
]
)
assert len(randomizer.get_randomizers()) == 2
assert len(randomizer.get_parameters()) == 2
obs = randomizer.randomize({}, self.random_state)
assert obs["foo"] == 0.0
assert obs["bar"] == 1.0
def test_env_randomization(self):
randomization = EnvRandomization(
parameter_randomizer=EnvParameterRandomizer(DummyEnvParameter()),
observation_randomizer=EnvObservationRandomizer(
[
DummyObservationRandomizer(
"r1", DummyRandomizerParameter("foo", 0.0)
),
]
),
action_randomizer=EnvActionRandomizer([]),
simulation_randomizer=EnvSimulationRandomizer([]),
)
randomization.update_parameter("observation.r1:foo", 0.5)
parameter = randomization.get_parameter("observation.r1:foo")
assert parameter.get_value() == 0.5
| 31.976563 | 87 | 0.658441 | [
"MIT"
] | 0xflotus/robogym | robogym/randomization/tests/test_randomization.py | 4,093 | Python |
# COMBINATION:
# combination is all the different ways that we can group something where the order does not matter.
# PERMUTATION:
# permutation is all the different ways that we can group something where the order does matter.
import itertools
my_list=[1,2,3]
my_combinations=itertools.combinations(my_list,2)# here first arguement is a list and second arguement is how many items we want in a group. it is r of nCr.
for c in my_combinations:
print(c)
my_permutations=itertools.permutations(my_list,2)
for c in my_permutations:
print(c)
# When we should use combination and permutation?
# if the order doent matter we should use combinations.
import itertools
my_list=[1,2,3,4,5,6]
my_combinations=itertools.combinations(my_list,3)
answer=[results for results in my_combinations if sum(results)==10]
for i in answer:
print(i)
# if the order does matter we should use permutations.
# word macthing game.
import itertools
word="sample"
my_letters="pslame"
my_permutations=itertools.permutations(my_letters,len(my_letters))
for p in my_permutations:
if "".join(p) == word:
print("Match!")
break
else:
print("No match")
| 29.35 | 156 | 0.748722 | [
"MIT"
] | ahammadshawki8/Proggraming-Terms | term06 (permutation and combination).py | 1,174 | Python |
import logging
from flask import request, flash, abort, Response
from flask_admin import expose
from flask_admin.babel import gettext, ngettext, lazy_gettext
from flask_admin.model import BaseModelView
from flask_admin.model.form import wrap_fields_in_fieldlist
from flask_admin.model.fields import ListEditableFieldList
from flask_admin._compat import iteritems, string_types
import mongoengine
import gridfs
from mongoengine.connection import get_db
from bson.objectid import ObjectId
from flask_admin.actions import action
from .filters import FilterConverter, BaseMongoEngineFilter
from .form import get_form, CustomModelConverter
from .typefmt import DEFAULT_FORMATTERS
from .tools import parse_like_term
from .helpers import format_error
from .ajax import process_ajax_references, create_ajax_loader
from .subdoc import convert_subdocuments
# Set up logger
log = logging.getLogger("flask-admin.mongo")
SORTABLE_FIELDS = set((
mongoengine.StringField,
mongoengine.IntField,
mongoengine.FloatField,
mongoengine.BooleanField,
mongoengine.DateTimeField,
mongoengine.ComplexDateTimeField,
mongoengine.ObjectIdField,
mongoengine.DecimalField,
mongoengine.ReferenceField,
mongoengine.EmailField,
mongoengine.UUIDField,
mongoengine.URLField
))
class ModelView(BaseModelView):
"""
MongoEngine model scaffolding.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of
:class:`flask_admin.contrib.mongoengine.filters.BaseFilter`
classes.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'))
"""
model_form_converter = CustomModelConverter
"""
Model form conversion class. Use this to implement custom
field conversion logic.
Custom class should be derived from the
`flask_admin.contrib.mongoengine.form.CustomModelConverter`.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
object_id_converter = ObjectId
"""
Mongodb ``_id`` value conversion function. Default is `bson.ObjectId`.
Use this if you are using String, Binary and etc.
For example::
class MyModelView(BaseModelView):
object_id_converter = int
or::
class MyModelView(BaseModelView):
object_id_converter = str
"""
filter_converter = FilterConverter()
"""
Field to filter converter.
Override this attribute to use a non-default converter.
"""
column_type_formatters = DEFAULT_FORMATTERS
"""
Customized type formatters for MongoEngine backend
"""
allowed_search_types = (mongoengine.StringField,
mongoengine.URLField,
mongoengine.EmailField)
"""
List of allowed search field types.
"""
form_subdocuments = None
"""
Subdocument configuration options.
This field accepts dictionary, where key is field name and value is either dictionary or instance of the
`flask_admin.contrib.EmbeddedForm`.
Consider following example::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.EmbeddedDocumentField(Comment)
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_columns': ('name',)
}
}
In this example, `Post` model has child `Comment` subdocument. When generating form for `Comment` embedded
document, Flask-Admin will only create `name` field.
It is also possible to use class-based embedded document configuration::
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
Arbitrary depth nesting is supported::
class SomeEmbed(EmbeddedForm):
form_excluded_columns = ('test',)
class CommentEmbed(EmbeddedForm):
form_columns = ('name',)
form_subdocuments = {
'inner': SomeEmbed()
}
class MyAdmin(ModelView):
form_subdocuments = {
'data': CommentEmbed()
}
There's also support for forms embedded into `ListField`. All you have
to do is to create nested rule with `None` as a name. Even though it
is slightly confusing, but that's how Flask-MongoEngine creates
form fields embedded into ListField::
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Post(db.Document):
text = db.StringField(max_length=30)
data = db.ListField(db.EmbeddedDocumentField(Comment))
class MyAdmin(ModelView):
form_subdocuments = {
'data': {
'form_subdocuments': {
None: {
'form_columns': ('name',)
}
}
}
}
"""
def __init__(self, model, name=None,
category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor
:param model:
Model class
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self._search_fields = []
super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self._primary_key = self.scaffold_pk()
def _refresh_cache(self):
"""
Refresh cache.
"""
# Process subdocuments
if self.form_subdocuments is None:
self.form_subdocuments = {}
self._form_subdocuments = convert_subdocuments(self.form_subdocuments)
# Cache other properties
super(ModelView, self)._refresh_cache()
def _process_ajax_references(self):
"""
AJAX endpoint is exposed by top-level admin view class, but
subdocuments might have AJAX references too.
This method will recursively go over subdocument configuration
and will precompute AJAX references for them ensuring that
subdocuments can also use AJAX to populate their ReferenceFields.
"""
references = super(ModelView, self)._process_ajax_references()
return process_ajax_references(references, self)
def _get_model_fields(self, model=None):
"""
Inspect model and return list of model fields
:param model:
Model to inspect
"""
if model is None:
model = self.model
return sorted(iteritems(model._fields), key=lambda n: n[1].creation_counter)
def scaffold_pk(self):
# MongoEngine models have predefined 'id' as a key
return 'id'
def get_pk_value(self, model):
"""
Return the primary key value from the model instance
:param model:
Model instance
"""
return model.pk
def scaffold_list_columns(self):
"""
Scaffold list columns
"""
columns = []
for n, f in self._get_model_fields():
# Verify type
field_class = type(f)
if (field_class == mongoengine.ListField and
isinstance(f.field, mongoengine.EmbeddedDocumentField)):
continue
if field_class == mongoengine.EmbeddedDocumentField:
continue
if self.column_display_pk or field_class != mongoengine.ObjectIdField:
columns.append(n)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns (name, field)
"""
columns = {}
for n, f in self._get_model_fields():
if type(f) in SORTABLE_FIELDS:
if self.column_display_pk or type(f) != mongoengine.ObjectIdField:
columns[n] = f
return columns
def init_search(self):
"""
Init search
"""
if self.column_searchable_list:
for p in self.column_searchable_list:
if isinstance(p, string_types):
p = self.model._fields.get(p)
if p is None:
raise Exception('Invalid search field')
field_type = type(p)
# Check type
if (field_type not in self.allowed_search_types):
raise Exception('Can only search on text columns. ' +
'Failed to setup search for "%s"' % p)
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, name):
"""
Return filter object(s) for the field
:param name:
Either field name or field instance
"""
if isinstance(name, string_types):
attr = self.model._fields.get(name)
else:
attr = name
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Find name
visible_name = None
if not isinstance(name, string_types):
visible_name = self.get_column_name(attr.name)
if not visible_name:
visible_name = self.get_column_name(name)
# Convert filter
type_name = type(attr).__name__
flt = self.filter_converter.convert(type_name,
attr,
visible_name)
return flt
def is_valid_filter(self, filter):
"""
Validate if the provided filter is a valid MongoEngine filter
:param filter:
Filter object
"""
return isinstance(filter, BaseMongoEngineFilter)
def scaffold_form(self):
"""
Create form from the model.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
extra_fields=self.form_extra_fields)
return form_class
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList,
validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [required()]}}
:param custom_fieldlist:
A WTForm FieldList class. By default, `ListEditableFieldList`.
"""
form_class = get_form(self.model,
self.model_form_converter(self),
base_class=self.form_base_class,
only=self.column_editable_list,
field_args=validators)
return wrap_fields_in_fieldlist(self.form_base_class,
form_class,
custom_fieldlist)
# AJAX foreignkey support
def _create_ajax_loader(self, name, opts):
return create_ajax_loader(self.model, name, name, opts)
def get_query(self):
"""
Returns the QuerySet for this view. By default, it returns all the
objects for the current model.
"""
return self.model.objects
def _search(self, query, search_term):
# TODO: Unfortunately, MongoEngine contains bug which
# prevents running complex Q queries and, as a result,
# Flask-Admin does not support per-word searching like
# in other backends
op, term = parse_like_term(search_term)
criteria = None
for field in self._search_fields:
flt = {'%s__%s' % (field.name, op): term}
q = mongoengine.Q(**flt)
if criteria is None:
criteria = q
else:
criteria |= q
return query.filter(criteria)
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True):
"""
Get list of objects from MongoEngine
:param page:
Page number
:param sort_column:
Sort column
:param sort_desc:
Sort descending
:param search:
Search criteria
:param filters:
List of applied filters
:param execute:
Run query immediately or not
"""
query = self.get_query()
# Filters
if self._filters:
for flt, flt_name, value in filters:
f = self._filters[flt]
query = f.apply(query, f.clean(value))
# Search
if self._search_supported and search:
query = self._search(query, search)
# Get count
count = query.count() if not self.simple_list_pager else None
# Sorting
if sort_column:
query = query.order_by('%s%s' % ('-' if sort_desc else '', sort_column))
else:
order = self._get_default_order()
if order:
query = query.order_by('%s%s' % ('-' if order[1] else '', order[0]))
# Pagination
if page is not None:
query = query.skip(page * self.page_size)
query = query.limit(self.page_size)
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model instance by its ID
:param id:
Model ID
"""
try:
return self.get_query().filter(pk=id).first()
except mongoengine.ValidationError as ex:
flash(gettext('Failed to get model. %(error)s',
error=format_error(ex)),
'error')
return None
def create_model(self, form):
"""
Create model helper
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self._on_model_change(form, model, True)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to create record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to create record.')
return False
else:
self.after_model_change(form, model, True)
return model
def update_model(self, form, model):
"""
Update model helper
:param form:
Form instance
:param model:
Model instance to update
"""
try:
form.populate_obj(model)
self._on_model_change(form, model, False)
model.save()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to update record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to update record.')
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model helper
:param model:
Model instance
"""
try:
self.on_model_delete(model)
model.delete()
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete record. %(error)s',
error=format_error(ex)),
'error')
log.exception('Failed to delete record.')
return False
else:
self.after_model_delete(model)
return True
# FileField access API
@expose('/api/file/')
def api_file_view(self):
pk = request.args.get('id')
coll = request.args.get('coll')
db = request.args.get('db', 'default')
if not pk or not coll or not db:
abort(404)
fs = gridfs.GridFS(get_db(db), coll)
data = fs.get(self.object_id_converter(pk))
if not data:
abort(404)
return Response(data.read(),
content_type=data.content_type,
headers={
'Content-Length': data.length
})
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
try:
count = 0
all_ids = [self.object_id_converter(pk) for pk in ids]
for obj in self.get_query().in_bulk(all_ids).values():
count += self.delete_model(obj)
flash(ngettext('Record was successfully deleted.',
'%(count)s records were successfully deleted.',
count,
count=count))
except Exception as ex:
if not self.handle_view_exception(ex):
flash(gettext('Failed to delete records. %(error)s', error=str(ex)),
'error')
| 31.095679 | 114 | 0.551663 | [
"Apache-2.0"
] | hexlism/css_platform | sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py | 20,150 | Python |
import requests, json, os, time
from PIL import Image
from io import BytesIO
import img2pdf
class jumpplus_downloader:
def __init__(self):
self.file=0
self.h=1200
self.w=760
def auto_list_download(self, url, next=False, sleeptime=20,pdfConversion=True):
self.json_download(url)
self.file=0
if os.path.isdir(self.list["readableProduct"]["title"])!=True:
os.mkdir(self.list["readableProduct"]["title"])
for page in self.list["readableProduct"]["pageStructure"]["pages"]:
time.sleep(sleeptime)
if page["type"]=="main":
self.h=page["height"]
self.w=page["width"]
self.download(page["src"],False)
self.processing()
self.output("./"+self.list["readableProduct"]["title"]+"/")
if pdfConversion:
self.convertToPdf()
if self.list["readableProduct"]["nextReadableProductUri"]!=None and next==True:
self.auto_list_download(self.list["readableProduct"]["nextReadableProductUri"],True)
def json_download(self,url):
#Counterfeit User agent for absolutely successfully connection.
session=requests.session()
headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"}
json_data=session.get(url+".json",headers=headers).text
self.list=json.loads(json_data)
def json_localread(self, filepath):
with open(filepath) as json_file:
json_data=json.load(json_file)
self.list=json_data
def download(self,url,fakeque=False):
if fakeque:
print("Emulating Download : " + url)
self.img=url
else:
session=requests.session()
headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"}
self.img=requests.get(url)
def processing(self):
readImage=Image.open(BytesIO(self.img.content))
imageSize=readImage.size
width=imageSize[0]-24
height=imageSize[1]-16
buff=[]
counterX=0
counterY=0
for wx in range(4):
inbuff=[]
for lx in range(4):
cropped=readImage.crop(box=(width/4*counterX,height/4*counterY, width/4*(counterX+1),height/4*(counterY+1)))
inbuff.append(cropped)
counterY+=1
buff.append(inbuff)
counterX+=1
counterY=0
self.converted_img=Image.new("RGB",(int(width),int(height)))
counterX=0
counterY=0
for wdx in buff:
for ldx in wdx:
print(str(counterY))
self.converted_img.paste(ldx, (int(width/4*counterX) , int(height/4*counterY)))
counterX+=1
counterX=0
print("Current Y Counter:"+str(counterY))
counterY+=1
def output(self, file="./"):
self.converted_img.save(file+str(self.file)+".png")
self.file+=1
def convertToPdf(self):
directory="./"+self.list["readableProduct"]["title"]+"/"
sourceDir=os.listdir(directory)
imgcount=0
img=[]
filextend=sourceDir[0].split(".")
filextend=(str(".")+str(filextend[1]))
for images in sourceDir:
img.append(directory + str(imgcount) + filextend )
imgcount=imgcount+1
with open("./"+self.list["readableProduct"]["title"]+".pdf","wb") as f:
f.write(img2pdf.convert(img))
#A simple Json Dumper for debugging.
def dumpSimplifiedJson(self,jsObject):
f=open("JSON.json","w")
json.dump(jsObject, f, ensure_ascii=False, indent=4, sort_keys=True, separators=(',',': '))
| 35.414414 | 159 | 0.583821 | [
"MIT"
] | Lutwidse/jumpplus-downloader | py/lib/jumpplus_downloader.py | 3,931 | Python |
import os
import sys
import numpy as np
from PIL import Image
import torch
#TODO - add save function, these functions can be used to check movement
def crop_image(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
return image_array[y_min:y_max, x_min:x_max]
#Keep image size, set pixel value outside of bounding box as 0
def crop_pad_image(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
mask_array = np.zeros(image_array.shape, dtype=int)
mask_array[y_min:y_max, x_min:x_max] = 1
zero_array = np.where(mask_array==0)
image_array_copy[zero_array[0],zero_array[1]] = 0
return image_array_copy
def set_bb_to_black(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
mask_array = np.zeros(image_array.shape, dtype=int)
mask_array[y_min:y_max, x_min:x_max] = 1
zero_array = np.where(mask_array==1)
image_array_copy[zero_array[0],zero_array[1]] = 0
return image_array_copy
def transform_img_for_model(image_array, transforms=None):
image_array_copy = np.copy(image_array)
#image_array_copy.unsqueeze_(0)
image_array_copy = np.expand_dims(image_array_copy, axis=2)
if(transforms is None):
image_array_copy = torch.from_numpy(image_array_copy).repeat(3, 1, 1)
else:
image_array_copy = transforms(image_array_copy).repeat(3, 1, 1)
return image_array_copy
def save_image_from_tensor(image_array, path):
og = Image.fromarray(image_array.numpy())
og = og.convert('RGB')
og.save(path)
def resize_image(image_array, width, height):
og = Image.fromarray(image_array.numpy())
og = og.convert('RGB')
og = og.resize((width, height))
og = og.convert('L')
return np.array(og)
| 31.764706 | 77 | 0.683333 | [
"MIT"
] | richielo/Medical_Localization_RL | pneumoRL/image_util.py | 2,160 | Python |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from VectorTestSequence import VectorTestSequence
from base.ChoicesModifier import ChoicesModifier
## This test verifies that vector register operands with different layouts don't overlap.
class MainSequence(VectorTestSequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mInstrList = (
'VNSRA.WI##RISCV',
'VNSRA.WV##RISCV',
'VNSRA.WX##RISCV',
'VNSRL.WI##RISCV',
'VNSRL.WV##RISCV',
'VNSRL.WX##RISCV',
'VWADD.VV##RISCV',
'VWADD.VX##RISCV',
'VWADD.WV##RISCV',
'VWADD.WX##RISCV',
'VWADDU.VV##RISCV',
'VWADDU.VX##RISCV',
'VWADDU.WV##RISCV',
'VWADDU.WX##RISCV',
'VWMACC.VV##RISCV',
'VWMACC.VX##RISCV',
'VWMACCSU.VV##RISCV',
'VWMACCSU.VX##RISCV',
'VWMACCU.VV##RISCV',
'VWMACCU.VX##RISCV',
'VWMACCUS.VX##RISCV',
'VWMUL.VV##RISCV',
'VWMUL.VX##RISCV',
'VWMULSU.VV##RISCV',
'VWMULSU.VX##RISCV',
'VWMULU.VV##RISCV',
'VWMULU.VX##RISCV',
'VWSUB.VV##RISCV',
'VWSUB.VX##RISCV',
'VWSUB.WV##RISCV',
'VWSUB.WX##RISCV',
'VWSUBU.VV##RISCV',
'VWSUBU.VX##RISCV',
'VWSUBU.WV##RISCV',
'VWSUBU.WX##RISCV',
)
## Set up the environment prior to generating the test instructions.
def _setUpTest(self):
choices_mod = ChoicesModifier(self.genThread)
# TODO(Noah): Remove the restriction on SEW when a mechanism to skip instructions with
# illegal vector layouts is implemented. For now, ensure vector element width is set to no
# more than 32 bits.
choice_weights = {'0x0': 10, '0x1': 10, '0x2': 10, '0x3': 0, '0x4': 0, '0x5': 0, '0x6': 0, '0x7': 0}
choices_mod.modifyRegisterFieldValueChoices('vtype.VSEW', choice_weights)
# Ensure vector register group size is no more than 4, as larger values are not legal for
# widening and narrowing instructions
vlmul_choice_weights = {'0x0': 10, '0x1': 10, '0x2': 10, '0x3': 0, '0x4': 0, '0x5': 10, '0x6': 10, '0x7': 10}
choices_mod.modifyRegisterFieldValueChoices('vtype.VLMUL', vlmul_choice_weights)
choices_mod.commitSet()
## Return the maximum number of test instructions to generate.
def _getMaxInstructionCount(self):
return 1000
## Return a list of test instructions to randomly choose from.
def _getInstructionList(self):
return self._mInstrList
## Verify additional aspects of the instruction generation and execution.
#
# @param aInstr The name of the instruction.
# @param aInstrRecord A record of the generated instruction.
def _performAdditionalVerification(self, aInstr, aInstrRecord):
vd_val = aInstrRecord['Dests']['vd']
vs1_val = aInstrRecord['Srcs'].get('vs1')
vs2_val = aInstrRecord['Srcs']['vs2']
if aInstr.startswith('VW'):
if vs1_val and (vd_val == (vs1_val & 0x1F)):
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
if ('.W' not in aInstr) and (vd_val == (vs2_val & 0x1F)):
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
elif aInstr.startswith('VN'):
if (vd_val & 0x1F) == vs2_val:
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
else:
self.error('Unexpected instruction %s' % aInstr)
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| 40.175439 | 124 | 0.624672 | [
"Apache-2.0"
] | Imperas/force-riscv | tests/riscv/vector/vector_wide_operand_conflict_force.py | 4,580 | Python |
import numpy as np
import matplotlib.pyplot as plt
def estimate(particles, weights):
"""returns mean and variance of the weighted particles"""
pos = particles
mean = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mean)**2, weights=weights, axis=0)
return mean, var
def simple_resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, np.random.rand(N))
# resample according to indexes
particles[:] = particles[indexes]
weights.fill(1.0 / N)
return particles,weights
x=0.1#初始真实状态
x_N=1#系统过程噪声的协方差(由于是一维的,这里就是方差)
x_R=1#测量的协方差
T=75#共进行75次
N=100#粒子数,越大效果越好,计算量也越大
V=2#初始分布的方差
x_P=x+np.random.randn(N)*np.sqrt(V)
#plt.hist(x_P,N, normed=True)
z_out=[x**2/20+np.random.randn(1)*np.sqrt(x_R)]#实际测量值
x_out=[x]#测量值的输出向量
x_est=x#估计值
x_est_out=[x_est]
#print(x_out)
for t in range(1,T):
x=0.5*x+25*x/(1+x**2)+8*np.cos(1.2*(t-1))+np.random.randn()*np.sqrt(x_N)
z=x**2/20+np.random.randn()*np.sqrt(x_R)
#更新粒子
x_P_update=0.5*x_P+25*x_P/(1+x_P**2)+8*np.cos(1.2*(t-1))+np.random.randn(N)*np.sqrt(x_N)
z_update=x_P_update**2/20+np.random.randn(N)*np.sqrt(x_R)
#print(z_update)
#计算权重
P_w=(1/np.sqrt(2*np.pi*x_R))*np.exp(-(z-z_update)**2/(2*x_R))
#估计
x_est,var=estimate(z_update,P_w)
#重采样
x_P,P_w=simple_resample(x_P,P_w)
#保存数据
x_out.append(x)
z_out.append(z)
x_est_out.append(x_est)
#print(x_out)
t=np.arange(0,T)
plt.plot(t,x_out,color='blue',label='true value')
plt.plot(t,x_est_out,color='red',label='estimate value')
plt.legend()
plt.show() | 29.05 | 93 | 0.650602 | [
"MIT"
] | zhyongquan/Automotive-Software-Blog | 002_Particle_Filter/Particle_Filter.py | 1,939 | Python |
#
# PySNMP MIB module ADIC-INTELLIGENT-STORAGE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADIC-INTELLIGENT-STORAGE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:13:36 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, NotificationType, iso, Counter64, ObjectIdentity, Counter32, Integer32, Unsigned32, enterprises, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, ModuleIdentity, MibIdentifier, Gauge32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "NotificationType", "iso", "Counter64", "ObjectIdentity", "Counter32", "Integer32", "Unsigned32", "enterprises", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "ModuleIdentity", "MibIdentifier", "Gauge32", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
adic = MibIdentifier((1, 3, 6, 1, 4, 1, 3764))
storage = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1))
intelligent = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1))
productAgentInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10))
globalData = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20))
components = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30))
software = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 100))
hardware = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200))
powerAndCooling = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200))
sml = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 300))
network = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 400))
notification = MibIdentifier((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500))
class Boolean(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("true", 1), ("false", 2))
class AdicMibVersion(DisplayString):
pass
class AdicREDIdentifier(Counter32):
pass
class AdicEnable(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enabled", 1), ("disabled", 2))
class AdicAgentStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("ok", 3), ("non-critical", 4), ("critical", 5), ("non-recoverable", 6))
class AdicOnlineStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("online", 1), ("offline", 2), ("shutdown", 3))
class AdicGlobalId(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class AdicComponentType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("mcb", 1), ("cmb", 2), ("ioBlade", 3), ("rcu", 4), ("networkChasis", 5), ("controlModule", 6), ("expansionModule", 7), ("powerSupply", 8))
class AdicInterfaceType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("scsi", 1), ("fibreChannel", 2))
class AdicSensorStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("nominal", 1), ("warningLow", 2), ("warningHigh", 3), ("alarmLow", 4), ("alarmHigh", 5), ("notInstalled", 6), ("noData", 7))
class AdicVoltageType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("dc", 1), ("ac", 2))
class AdicDateAndTime(OctetString):
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(8, 8), ValueSizeConstraint(11, 11), )
class AdicTrapSeverity(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("emergency", 1), ("alarm", 2), ("warning", 3), ("notice", 4), ("informational", 5))
class AdicDoorStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("open", 1), ("closed", 2), ("closedAndLocked", 3), ("closedAndUnlocked", 4), ("contollerFailed", 5), ("notInstalled", 6), ("noData", 7))
class AdicDriveStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("idle", 1), ("loading", 2), ("ejecting", 3), ("inserted", 4), ("removed", 5), ("notInstalled", 6), ("noData", 7))
class RowStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("active", 1), ("notInService", 2), ("notReady", 3), ("createAndGo", 4), ("createAndWait", 5), ("destroy", 6))
productMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 1), AdicMibVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productMibVersion.setDescription('MIB version identifier.')
productSnmpAgentVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productSnmpAgentVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productSnmpAgentVersion.setDescription('SNMP agent version identifier.')
productName = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productName.setStatus('mandatory')
if mibBuilder.loadTexts: productName.setDescription('Name of ADIC branded product. Uniquely identifies the product, independent of OEM.')
productDisplayName = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDisplayName.setStatus('mandatory')
if mibBuilder.loadTexts: productDisplayName.setDescription('Name of this agent for display purposes. May be customized for OEM.')
productDescription = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDescription.setStatus('mandatory')
if mibBuilder.loadTexts: productDescription.setDescription('A short description of this SNMP agent.')
productVendor = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productVendor.setStatus('mandatory')
if mibBuilder.loadTexts: productVendor.setDescription('Name of the product vendor or OEM.')
productVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productVersion.setDescription('String Format: MNNO.TVBBBPP Examples 1. 091a.TR054 Version 0.91, build 54 of the RCS test code for ADIC 2. 100A.GM052 Version 1.00, build 52 of the MCB GA candidate code for ADIC M Major version number NN Minor version number O OEM (Uppercase when release candidate, otherwise lowercase) A/a - ADIC Others - Reserved) T Target G - GA Candidate Release (labeled build that is a release candidate) T - Test build (labeled build used for formal testing) D - Dev build (labeled build used for unit testing) (lower case) - specifies developer of a local build V Variant S - System R - RCS M - MCB BBB Build number (3 digit sequential number specifying exact build) PP Patch Number (Optional alphanumeric characters denoting patch level of this build if necessary)')
productDisplayVersion = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productDisplayVersion.setStatus('mandatory')
if mibBuilder.loadTexts: productDisplayVersion.setDescription('The version identifier according to the vendor or OEM.')
productLibraryClass = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 10))).clone(namedValues=NamedValues(("basic", 1), ("intelligent", 2), ("virtual", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: productLibraryClass.setStatus('mandatory')
if mibBuilder.loadTexts: productLibraryClass.setDescription('Basic library includes minimal connectivity hardware. Intelligent library includes SAN appliances and value-added features.')
productSerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 10, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: productSerialNumber.setStatus('mandatory')
if mibBuilder.loadTexts: productSerialNumber.setDescription('The serial number of the entire library.')
agentGlobalStatus = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 1), AdicAgentStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentGlobalStatus.setStatus('mandatory')
if mibBuilder.loadTexts: agentGlobalStatus.setDescription('Current overall status of the agent.')
agentLastGlobalStatus = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 2), AdicAgentStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentLastGlobalStatus.setStatus('mandatory')
if mibBuilder.loadTexts: agentLastGlobalStatus.setDescription('The status before the current status which induced an initiative to issue a global status change trap.')
agentTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: agentTimeStamp.setDescription('The last time that the agent values have been updated. Universal time in seconds since UTC 1/1/70.')
agentGetTimeOut = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentGetTimeOut.setStatus('mandatory')
if mibBuilder.loadTexts: agentGetTimeOut.setDescription('Suggested time out in milliseconds for how long an SNMP management application should wait while attempting to poll the SNMP agent.')
agentModifiers = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentModifiers.setStatus('mandatory')
if mibBuilder.loadTexts: agentModifiers.setDescription('Agent functional modifiers, when set the modifier is active. ----------------------------------------------------- Bit 3 => Agent in debug mode. ----------------------------------------------------- All other bits are product specific.')
agentRefreshRate = MibScalar((1, 3, 6, 1, 4, 1, 3764, 1, 1, 20, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentRefreshRate.setStatus('mandatory')
if mibBuilder.loadTexts: agentRefreshRate.setDescription('Rate in seconds at which the agent cached data is being updated.')
componentTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10), )
if mibBuilder.loadTexts: componentTable.setStatus('mandatory')
if mibBuilder.loadTexts: componentTable.setDescription("General information about the system's components, including the unique identifiers. The structure this table is based on the Fibre Alliance MIB connUnitEntry.")
componentEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"))
if mibBuilder.loadTexts: componentEntry.setStatus('mandatory')
if mibBuilder.loadTexts: componentEntry.setDescription('A component entry containing objects for a particular component.')
componentId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 1), AdicGlobalId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentId.setStatus('mandatory')
if mibBuilder.loadTexts: componentId.setDescription('The unique identification for this component among those within this proxy domain.')
componentType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 2), AdicComponentType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentType.setStatus('mandatory')
if mibBuilder.loadTexts: componentType.setDescription('The type of this component.')
componentDisplayName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentDisplayName.setStatus('mandatory')
if mibBuilder.loadTexts: componentDisplayName.setDescription('Name of this component for display purposes. Different OEMs may have different display names for the same ADIC product.')
componentInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: componentInfo.setStatus('mandatory')
if mibBuilder.loadTexts: componentInfo.setDescription('A display string containing information about this component.')
componentLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentLocation.setStatus('mandatory')
if mibBuilder.loadTexts: componentLocation.setDescription('Location information for this component.')
componentVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentVendor.setStatus('mandatory')
if mibBuilder.loadTexts: componentVendor.setDescription('Name vendor of this component.')
componentSn = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 79))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentSn.setStatus('mandatory')
if mibBuilder.loadTexts: componentSn.setDescription('The serial number for this component.')
componentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("unused", 2), ("ok", 3), ("warning", 4), ("failed", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: componentStatus.setDescription('Overall status of the component.')
componentControl = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("resetColdStart", 1), ("resetWarmStart", 2), ("offline", 3), ("online", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: componentControl.setStatus('mandatory')
if mibBuilder.loadTexts: componentControl.setDescription("This object is used to control the addressed connUnit. NOTE: 'Cold Start' and 'Warm Start' are as defined in MIB II and are not meant to be a factory reset. resetColdStart: the addressed unit performs a 'Cold Start' reset. resetWarmStart: the addressed unit performs a 'Warm Start' reset. offline: the addressed unit puts itself into an implementation dependant 'offline' state. online: the addressed unit puts itself into an implementation dependant 'online' state.")
componentREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentREDId.setStatus('mandatory')
if mibBuilder.loadTexts: componentREDId.setDescription('Runtime Error Detection identifier for this power supply.')
componentFirmwareVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentFirmwareVersion.setStatus('mandatory')
if mibBuilder.loadTexts: componentFirmwareVersion.setDescription('Firmware version (or level) for this component.')
componentGeoAddrAisle = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrAisle.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrAisle.setDescription('The aisle number where this component is located. A negative value indicates that an aisle number is not applicable to this component.')
componentGeoAddrFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrFrame.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrFrame.setDescription('The frame number where this component is located. A negative value indicates that a frame number is not applicable to this component.')
componentGeoAddrRack = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrRack.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrRack.setDescription('The rack number where this component is located. A negative value indicates that a rack number is not applicable to this component.')
componentGeoAddrChassis = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrChassis.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrChassis.setDescription('The chassis number where this component is located. A negative value indicates that a chassis number is not applicable to this component.')
componentGeoAddrBlade = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentGeoAddrBlade.setStatus('mandatory')
if mibBuilder.loadTexts: componentGeoAddrBlade.setDescription('The blade number within the network chasis where this component is located. A negative value indicates that a blade number is not applicable to this component.')
componentIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 30, 10, 1, 17), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: componentIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: componentIpAddress.setDescription('IP address of this component. If the component has no IP address, this object returns 0.0.0.0. The address may refer to an internal network not accessible to an external management application.')
powerSupplyTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10), )
if mibBuilder.loadTexts: powerSupplyTable.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyTable.setDescription('** This table is optional ** Table of the power supplies.')
powerSupplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "powerSupplyIndex"))
if mibBuilder.loadTexts: powerSupplyEntry.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific power supply.')
powerSupplyIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyIndex.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyIndex.setDescription('** This object is optional ** Index of this power supply within the component specified by componentId.')
powerSupplyName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyName.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyName.setDescription('** This object is optional ** Display name of this power supply.')
powerSupplyWattage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyWattage.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyWattage.setDescription('** This object is optional ** What is maximum power output of this power supply. Units are Watts.')
powerSupplyType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 4), AdicVoltageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyType.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyType.setDescription('** This object is optional ** DC or AC power supply?')
powerSupplyREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 5), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyREDId.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this power supply.')
powerSupplyRatedVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyRatedVoltage.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyRatedVoltage.setDescription('** This object is optional ** Rated output voltage in millivolts of this power supply.')
powerSupplyLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 10, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyLocation.setStatus('optional')
if mibBuilder.loadTexts: powerSupplyLocation.setDescription('** This object is optional ** Physical location of this power supply.')
voltageSensorTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20), )
if mibBuilder.loadTexts: voltageSensorTable.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorTable.setDescription('** This table is optional ** Table of the voltage sensors.')
voltageSensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "powerSupplyIndex"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "voltageSensorIndex"))
if mibBuilder.loadTexts: voltageSensorEntry.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific voltage sensor.')
voltageSensorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorIndex.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorIndex.setDescription('** This object is optional ** Index of this voltage sensor within the component specified by componentId.')
voltageSensorName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorName.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorName.setDescription('** This object is optional ** Display name of this voltage sensor.')
voltageSensorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorStatus.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorStatus.setDescription('** This object is optional ** What is the state of this voltage sensor? Is the voltage in the nominal, warning or alarm region?')
voltageSensorMillivolts = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorMillivolts.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorMillivolts.setDescription('** This object is optional ** What is the voltage in millivolts of this voltage sensor?')
voltageSensorType = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 5), AdicVoltageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorType.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorType.setDescription('** This object is optional ** DC or AC voltage sensor?')
voltageSensorNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorNominalLo.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorNominalLo.setDescription('** This object is optional ** Lower voltage limit of the nominal state for this voltage sensor. Unit are millivolts.')
voltageSensorNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorNominalHi.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorNominalHi.setDescription('** This object is optional ** Upper voltage limit of the nominal state for this voltage sensor. Unit are millivolts.')
voltageSensorWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorWarningLo.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorWarningLo.setDescription('** This object is optional ** Lower voltage limit of the warning state for this voltage sensor. Unit are millivolts. If the voltage falls below this limit, the sensor enters the alarm state.')
voltageSensorWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorWarningHi.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorWarningHi.setDescription('** This object is optional ** Upper voltage limit of the warning state for this voltage sensor. Unit are millivolts. If the voltage rises above this limit, the sensor enters the alarm state.')
voltageSensorLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorLocation.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorLocation.setDescription('** This object is optional ** Physical location of the voltage sensor.')
voltageSensorREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 20, 1, 11), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: voltageSensorREDId.setStatus('optional')
if mibBuilder.loadTexts: voltageSensorREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this voltage sensor.')
temperatureSensorTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30), )
if mibBuilder.loadTexts: temperatureSensorTable.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorTable.setDescription('** This table is optional ** Table of the temperature sensors in each component.')
temperatureSensorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "temperatureSensorIndex"))
if mibBuilder.loadTexts: temperatureSensorEntry.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific sensor.')
temperatureSensorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorIndex.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorIndex.setDescription('** This object is optional ** Index of this temperatureSensor within the component specified by componentId.')
temperatureSensorName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorName.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorName.setDescription('** This object is optional ** Display name of this temperatureSensor.')
temperatureSensorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorStatus.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorStatus.setDescription('** This object is optional ** What is the state of this temperatureSensor? Is the temperature in the nominal, warning or alarm region?')
temperatureSensorDegreesCelsius = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorDegreesCelsius.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorDegreesCelsius.setDescription('** This object is optional ** The temperature in degrees Celsuis for this temperature sensor.')
temperatureSensorNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorNominalLo.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorNominalLo.setDescription('** This object is optional ** Lower temperature limit of the nominal state for this temperature sensor. Unit are degrees Celsius.')
temperatureSensorNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorNominalHi.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorNominalHi.setDescription('** This object is optional ** Upper temperature limit of the nominal state for this temperature sensor. Unit are degrees Celsius.')
temperatureSensorWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorWarningLo.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorWarningLo.setDescription('** This object is optional ** Lower temperature limit of the warning state for this temperature sensor. Unit are degrees Celsius. If the temperature falls below this limit, the sensor enters the alarm state.')
temperatureSensorWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorWarningHi.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorWarningHi.setDescription('** This object is optional ** Upper temperature limit of the warning state for this temperature sensor. Unit are degrees Celsius. If the temperature rises above this limit, the sensor enters the alarm state.')
temperatureSensorLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorLocation.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorLocation.setDescription('** This object is optional ** Physical location of this temperature sensor.')
temperatureSensorREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 30, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureSensorREDId.setStatus('optional')
if mibBuilder.loadTexts: temperatureSensorREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this temperature sensor.')
coolingFanTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40), )
if mibBuilder.loadTexts: coolingFanTable.setStatus('optional')
if mibBuilder.loadTexts: coolingFanTable.setDescription('** This table is optional ** Table of cooling fans in the library.')
coolingFanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), (0, "ADIC-INTELLIGENT-STORAGE-MIB", "coolingFanIndex"))
if mibBuilder.loadTexts: coolingFanEntry.setStatus('optional')
if mibBuilder.loadTexts: coolingFanEntry.setDescription('** This entry object is optional ** Each entry contains the information for a specific cooling fan.')
coolingFanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanIndex.setStatus('optional')
if mibBuilder.loadTexts: coolingFanIndex.setDescription('** This object is optional ** Index of this cooling fan within the component specified by componentId.')
coolingFanName = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanName.setStatus('optional')
if mibBuilder.loadTexts: coolingFanName.setDescription('** This object is optional ** Display name of this coolingFan.')
coolingFanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 3), AdicSensorStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanStatus.setStatus('optional')
if mibBuilder.loadTexts: coolingFanStatus.setDescription('** This object is optional ** Is the fan speed in the nominal, warning or alarm region?')
coolingFanRPM = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanRPM.setStatus('optional')
if mibBuilder.loadTexts: coolingFanRPM.setDescription('** This object is optional ** The fan speed in revolutions per minute.')
coolingFanNominalLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanNominalLo.setStatus('optional')
if mibBuilder.loadTexts: coolingFanNominalLo.setDescription('** This object is optional ** Lower fan speed limit of the nominal state for this fan. Units are RPM.')
coolingFanNominalHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanNominalHi.setStatus('optional')
if mibBuilder.loadTexts: coolingFanNominalHi.setDescription('** This object is optional ** Upper fan speed limit of the nominal state for this fan. Units are RPM.')
coolingFanWarningLo = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanWarningLo.setStatus('optional')
if mibBuilder.loadTexts: coolingFanWarningLo.setDescription('** This object is optional ** Lower fan speed limit of the warning state for this fan. Units are RPM. If the speed falls below this limit, the fan enters the alarmLow state.')
coolingFanWarningHi = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanWarningHi.setStatus('optional')
if mibBuilder.loadTexts: coolingFanWarningHi.setDescription('** This object is optional ** Upper fan speed limit of the warning state for this fan. Units are RPM. If the speed rises above this limit, the fan enters the alarmHigh state.')
coolingFanLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanLocation.setStatus('optional')
if mibBuilder.loadTexts: coolingFanLocation.setDescription('** This object is optional ** Physical location of this fan.')
coolingFanREDId = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 200, 200, 40, 1, 10), AdicREDIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: coolingFanREDId.setStatus('optional')
if mibBuilder.loadTexts: coolingFanREDId.setDescription('** This object is optional ** Runtime Error Detection identifier for this fan.')
trapPayloadTable = MibTable((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10), )
if mibBuilder.loadTexts: trapPayloadTable.setStatus('mandatory')
if mibBuilder.loadTexts: trapPayloadTable.setDescription('Defines objects common to all trap payloads.')
trapPayloadEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1), ).setIndexNames((0, "ADIC-INTELLIGENT-STORAGE-MIB", "trapSequenceNumber"))
if mibBuilder.loadTexts: trapPayloadEntry.setStatus('mandatory')
if mibBuilder.loadTexts: trapPayloadEntry.setDescription('Each entry contains the information for a specific cooling fan.')
trapSequenceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSequenceNumber.setStatus('mandatory')
if mibBuilder.loadTexts: trapSequenceNumber.setDescription('')
trapSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: trapSeverity.setDescription('')
trapSummaryText = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapSummaryText.setStatus('mandatory')
if mibBuilder.loadTexts: trapSummaryText.setDescription('')
trapIntendedUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 3764, 1, 1, 500, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("public", 1), ("triggerRefresh", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapIntendedUsage.setStatus('mandatory')
if mibBuilder.loadTexts: trapIntendedUsage.setDescription("The value of this qualifier aids the management application in determining how to respond to the trap. If the value is public(1), the information is intended to be propagated to external observers, such as sending email. If the value is triggerRefresh(2), the information is intended to update the management application's data model, but not necessarily propagated to external observers.")
startupSequenceComplete = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,500)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "trapSummaryText"))
if mibBuilder.loadTexts: startupSequenceComplete.setDescription('The component indicated by the value of componentId has successfully completed its startup sequence.')
shutdownSequenceInitiated = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,501)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "trapSummaryText"))
if mibBuilder.loadTexts: shutdownSequenceInitiated.setDescription('The component indicated by the value of componentId has initiated its shutdown sequence.')
componentAdded = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,502)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "componentType"))
if mibBuilder.loadTexts: componentAdded.setDescription('The component indicated by the value of componentId has been added to the library.')
componentRemoved = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,503)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "componentId"), ("ADIC-INTELLIGENT-STORAGE-MIB", "componentType"))
if mibBuilder.loadTexts: componentRemoved.setDescription('The component indicated by the value of componentId has been removed from the library.')
productLibraryClassChange = NotificationType((1, 3, 6, 1, 4, 1, 3764, 1, 1) + (0,504)).setObjects(("ADIC-INTELLIGENT-STORAGE-MIB", "productLibraryClass"), ("ADIC-INTELLIGENT-STORAGE-MIB", "productLibraryClass"))
if mibBuilder.loadTexts: productLibraryClassChange.setDescription('The product library class has changed. This occurs when connectivity hardware is added or removed. The payload contains the productLibraryClass before and after the change.')
mibBuilder.exportSymbols("ADIC-INTELLIGENT-STORAGE-MIB", powerSupplyTable=powerSupplyTable, powerSupplyEntry=powerSupplyEntry, sml=sml, powerSupplyREDId=powerSupplyREDId, temperatureSensorEntry=temperatureSensorEntry, componentLocation=componentLocation, voltageSensorNominalLo=voltageSensorNominalLo, temperatureSensorWarningHi=temperatureSensorWarningHi, intelligent=intelligent, RowStatus=RowStatus, AdicVoltageType=AdicVoltageType, software=software, agentModifiers=agentModifiers, shutdownSequenceInitiated=shutdownSequenceInitiated, coolingFanName=coolingFanName, voltageSensorTable=voltageSensorTable, trapSequenceNumber=trapSequenceNumber, trapIntendedUsage=trapIntendedUsage, componentIpAddress=componentIpAddress, globalData=globalData, temperatureSensorNominalHi=temperatureSensorNominalHi, productName=productName, powerSupplyRatedVoltage=powerSupplyRatedVoltage, AdicAgentStatus=AdicAgentStatus, voltageSensorWarningLo=voltageSensorWarningLo, agentGetTimeOut=agentGetTimeOut, coolingFanLocation=coolingFanLocation, AdicGlobalId=AdicGlobalId, voltageSensorStatus=voltageSensorStatus, AdicMibVersion=AdicMibVersion, powerSupplyLocation=powerSupplyLocation, productLibraryClassChange=productLibraryClassChange, AdicTrapSeverity=AdicTrapSeverity, storage=storage, componentEntry=componentEntry, coolingFanIndex=coolingFanIndex, temperatureSensorDegreesCelsius=temperatureSensorDegreesCelsius, voltageSensorLocation=voltageSensorLocation, agentRefreshRate=agentRefreshRate, coolingFanNominalHi=coolingFanNominalHi, AdicInterfaceType=AdicInterfaceType, componentId=componentId, temperatureSensorIndex=temperatureSensorIndex, coolingFanStatus=coolingFanStatus, AdicDriveStatus=AdicDriveStatus, coolingFanREDId=coolingFanREDId, trapPayloadEntry=trapPayloadEntry, agentTimeStamp=agentTimeStamp, componentREDId=componentREDId, powerAndCooling=powerAndCooling, voltageSensorEntry=voltageSensorEntry, coolingFanWarningHi=coolingFanWarningHi, AdicDateAndTime=AdicDateAndTime, componentGeoAddrBlade=componentGeoAddrBlade, notification=notification, productDisplayVersion=productDisplayVersion, componentControl=componentControl, AdicDoorStatus=AdicDoorStatus, componentGeoAddrChassis=componentGeoAddrChassis, productSnmpAgentVersion=productSnmpAgentVersion, components=components, agentLastGlobalStatus=agentLastGlobalStatus, temperatureSensorNominalLo=temperatureSensorNominalLo, voltageSensorType=voltageSensorType, componentGeoAddrAisle=componentGeoAddrAisle, network=network, componentDisplayName=componentDisplayName, temperatureSensorTable=temperatureSensorTable, powerSupplyType=powerSupplyType, temperatureSensorStatus=temperatureSensorStatus, AdicREDIdentifier=AdicREDIdentifier, voltageSensorIndex=voltageSensorIndex, componentTable=componentTable, componentStatus=componentStatus, powerSupplyIndex=powerSupplyIndex, AdicSensorStatus=AdicSensorStatus, agentGlobalStatus=agentGlobalStatus, componentVendor=componentVendor, AdicComponentType=AdicComponentType, componentFirmwareVersion=componentFirmwareVersion, coolingFanNominalLo=coolingFanNominalLo, coolingFanTable=coolingFanTable, temperatureSensorREDId=temperatureSensorREDId, coolingFanWarningLo=coolingFanWarningLo, powerSupplyName=powerSupplyName, hardware=hardware, voltageSensorName=voltageSensorName, productAgentInfo=productAgentInfo, Boolean=Boolean, voltageSensorNominalHi=voltageSensorNominalHi, temperatureSensorName=temperatureSensorName, componentSn=componentSn, powerSupplyWattage=powerSupplyWattage, voltageSensorMillivolts=voltageSensorMillivolts, voltageSensorWarningHi=voltageSensorWarningHi, startupSequenceComplete=startupSequenceComplete, productDisplayName=productDisplayName, productLibraryClass=productLibraryClass, componentGeoAddrRack=componentGeoAddrRack, productSerialNumber=productSerialNumber, adic=adic, coolingFanEntry=coolingFanEntry, AdicEnable=AdicEnable, temperatureSensorWarningLo=temperatureSensorWarningLo, componentType=componentType, componentAdded=componentAdded, productVendor=productVendor, componentRemoved=componentRemoved, productVersion=productVersion, voltageSensorREDId=voltageSensorREDId, productMibVersion=productMibVersion, componentGeoAddrFrame=componentGeoAddrFrame, temperatureSensorLocation=temperatureSensorLocation, trapPayloadTable=trapPayloadTable, trapSummaryText=trapSummaryText, AdicOnlineStatus=AdicOnlineStatus, trapSeverity=trapSeverity, componentInfo=componentInfo, coolingFanRPM=coolingFanRPM, productDescription=productDescription)
| 123.405028 | 4,464 | 0.774735 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp-with-texts/ADIC-INTELLIGENT-STORAGE-MIB.py | 44,179 | Python |
"""Tests to ensure that the html.parser tree builder generates good
trees."""
from pdb import set_trace
import pickle
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest
from bs4.builder import HTMLParserTreeBuilder
from bs4.builder._htmlparser import BeautifulSoupHTMLParser
class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
default_builder = HTMLParserTreeBuilder
def test_namespaced_system_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_namespaced_public_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_builder_is_pickled(self):
"""Unlike most tree builders, HTMLParserTreeBuilder and will
be restored after pickling.
"""
tree = self.soup("<a><b>foo</a>")
dumped = pickle.dumps(tree, 2)
loaded = pickle.loads(dumped)
self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
def test_redundant_empty_element_closing_tags(self):
self.assertSoupEquals('<br></br><br></br><br></br>', "<br/><br/><br/>")
self.assertSoupEquals('</br></br></br>', "")
def test_empty_element(self):
# This verifies that any buffered data present when the parser
# finishes working is handled.
self.assertSoupEquals("foo &# bar", "foo &# bar")
class TestHTMLParserSubclass(SoupTest):
def test_error(self):
"""Verify that our HTMLParser subclass implements error() in a way
that doesn't cause a crash.
"""
parser = BeautifulSoupHTMLParser()
parser.error("don't crash")
| 35.166667 | 79 | 0.690758 | [
"Unlicense"
] | AG371/bus-reservation-system | virtual/lib/python3.6/site-packages/bs4/tests/test_htmlparser.py | 1,688 | Python |
from flask import render_template,redirect,url_for,request,flash
from . import auth
from ..models import Group
from .forms import RegistrationForm,LoginForm
from .. import db
from flask_login import login_user,logout_user,login_required
@auth.route('/login', methods=["GET", "POST"])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
group = Group.query.filter_by( name=login_form.name.data).first()
if group is not None and group.verify_password(login_form.password.data):
login_user(group, login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.group', id=group.id))
flash('Invalid group name or password')
title="Login"
return render_template('auth/login.html', login_form=login_form, title=title)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
@auth.route('/register', methods=["GET", "POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
group = Group( name=form.name.data, password=form.password.data)
db.session.add(group)
db.session.commit()
return redirect(url_for('auth.login'))
title="New Account"
return render_template('auth/register.html', registration_form=form, title=title)
| 24.517857 | 91 | 0.697742 | [
"MIT"
] | mwerumuchai/jukebox | app/auth/views.py | 1,373 | Python |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
import settings.base
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'profiles.views.index', name='index'),
url(r'^accounts/', include('allauth.urls')),
# Examples:
# url(r'^$', 'explorind_project.views.home', name='home'),
# url(r'^explorind_project/', include('explorind_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^locations/', include('locations.urls')),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.base.STATIC_ROOT,
}),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.base.MEDIA_ROOT}),
url(r'^login$', 'profiles.views.login_view'), # login
url(r'^logout$', 'profiles.views.logout_view'), # logout
url(r'^signup$', 'profiles.views.signup'), # signup
url(r'^submit$', 'profiles.views.submit'),
url(r'^reviews$', 'profiles.views.public'),
url(r'^users/$', 'profiles.views.users'),
url(r'^users/(?P<username>.{0,30})/$', 'profiles.views.users'),
url(r'^follow$', 'profiles.views.follow'),
)
| 40.305556 | 73 | 0.669194 | [
"MIT"
] | Reinaldowijaya/explorind | explorind_project/explorind_project/urls.py | 1,451 | Python |
import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="volume", **kwargs):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.volume.
colorbar.Tickformatstop` instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.volume.colorbar.tickformatstopdefaults), sets
the default property values to use for elements
of volume.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.volume.colorbar.Ti
tle` instance or dict with compatible
properties
titlefont
Deprecated: Please use
volume.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's
font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use
volume.colorbar.title.side instead. Determines
the location of color bar's title with respect
to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
| 47.359833 | 79 | 0.526372 | [
"MIT"
] | 1abner1/plotly.py | packages/python/plotly/plotly/validators/volume/_colorbar.py | 11,319 | Python |
from django.urls import path, include
from . import views
app_name = 'users'
urlpatterns = [
path('login/', views.LoginView.as_view(), name = 'login'),
path('logout/', views.LogoutView.as_view(), name = 'logout'),
path('register/', views.RegisterView.as_view(), name = 'register'),
path('', include('django.contrib.auth.urls')),
]
| 26.923077 | 71 | 0.66 | [
"Apache-2.0"
] | CMPUT404-F21T0/CMPUT404-Project-BetterSocial | socialdistribution/users/urls.py | 350 | Python |
"""GoodWe PV inverter numeric settings entities."""
from __future__ import annotations
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
import logging
from goodwe import Inverter, InverterError
from homeassistant.components.number import NumberEntity, NumberEntityDescription
from homeassistant.const import ENTITY_CATEGORY_CONFIG, PERCENTAGE, POWER_WATT
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN, KEY_DEVICE_INFO, KEY_INVERTER
_LOGGER = logging.getLogger(__name__)
@dataclass
class GoodweNumberEntityDescriptionBase:
"""Required values when describing Goodwe number entities."""
getter: Callable[[Inverter], Awaitable[int]]
setter: Callable[[Inverter, int], Awaitable[None]]
@dataclass
class GoodweNumberEntityDescription(
NumberEntityDescription, GoodweNumberEntityDescriptionBase
):
"""Class describing Goodwe number entities."""
NUMBERS = (
GoodweNumberEntityDescription(
key="grid_export_limit",
name="Grid export limit",
icon="mdi:transmission-tower",
entity_category=ENTITY_CATEGORY_CONFIG,
unit_of_measurement=POWER_WATT,
getter=lambda inv: inv.get_grid_export_limit(),
setter=lambda inv, val: inv.set_grid_export_limit(val),
step=100,
min_value=0,
max_value=10000,
),
GoodweNumberEntityDescription(
key="battery_discharge_depth",
name="Depth of discharge (on-grid)",
icon="mdi:battery-arrow-down",
entity_category=ENTITY_CATEGORY_CONFIG,
unit_of_measurement=PERCENTAGE,
getter=lambda inv: inv.get_ongrid_battery_dod(),
setter=lambda inv, val: inv.set_ongrid_battery_dod(val),
step=1,
min_value=0,
max_value=99,
),
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the inverter select entities from a config entry."""
inverter = hass.data[DOMAIN][config_entry.entry_id][KEY_INVERTER]
device_info = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE_INFO]
entities = []
for description in NUMBERS:
try:
current_value = await description.getter(inverter)
except InverterError:
# Inverter model does not support this setting
_LOGGER.debug("Could not read inverter setting %s", description.key)
continue
entities.append(
InverterNumberEntity(device_info, description, inverter, current_value),
)
async_add_entities(entities)
class InverterNumberEntity(NumberEntity):
"""Inverter numeric setting entity."""
_attr_should_poll = False
entity_description: GoodweNumberEntityDescription
def __init__(
self,
device_info: DeviceInfo,
description: GoodweNumberEntityDescription,
inverter: Inverter,
current_value: int,
) -> None:
"""Initialize the number inverter setting entity."""
self.entity_description = description
self._attr_unique_id = f"{DOMAIN}-{description.key}-{inverter.serial_number}"
self._attr_device_info = device_info
self._attr_value = float(current_value)
self._inverter: Inverter = inverter
async def async_set_value(self, value: float) -> None:
"""Set new value."""
if self.entity_description.setter:
await self.entity_description.setter(self._inverter, int(value))
self._attr_value = value
self.async_write_ha_state()
| 32.081818 | 85 | 0.706999 | [
"Apache-2.0"
] | kubawolanin/core | homeassistant/components/goodwe/number.py | 3,529 | Python |
from unittest import TestCase
from doccano_transformer import utils
class TestUtils(TestCase):
def test_get_offsets(self):
text = ' This is Doccano Transformer . '
tokens = text.split()
result = utils.get_offsets(text, tokens)
expected = [1, 6, 9, 17, 29]
self.assertListEqual(result, expected)
def test_create_bio_tags(self):
tokens = ' This is Doccano Transformer . '.split()
offsets = [1, 6, 9, 17, 29]
labels = [[9, 28, 'SOFTWARE']]
result = utils.create_bio_tags(tokens, offsets, labels)
expected = ['O', 'O', 'B-SOFTWARE', 'I-SOFTWARE', 'O']
self.assertListEqual(result, expected)
def test_convert_tokens_and_offsets_to_spacy_tokens(self):
tokens = 'This is Doccano Transformer .'.split()
offsets = [0, 5, 8, 16, 28]
spacy_tokens = utils.convert_tokens_and_offsets_to_spacy_tokens(
tokens, offsets
)
for i, (spacy_token, token, offset) in enumerate(
zip(spacy_tokens, tokens, offsets)
):
self.assertEqual(str(spacy_token), token)
self.assertEqual(len(spacy_token), len(token))
self.assertEqual(spacy_token.i, i)
self.assertEqual(spacy_token.idx, offset)
| 35.722222 | 72 | 0.623639 | [
"MIT"
] | 7brokenmirrors/doccano-transformer | tests/test_utils.py | 1,286 | Python |
# -*- coding: utf-8 -*-
"""Configuration file for sniffer."""
# pylint: disable=superfluous-parens,bad-continuation
import time
import subprocess
from sniffer.api import select_runnable, file_validator, runnable
try:
from pync import Notifier
except ImportError:
notify = None
else:
notify = Notifier.notify
watch_paths = ["flask_api"]
class Options(object):
group = int(time.time()) # unique per run
show_coverage = False
rerun_args = None
targets = [
(('make', 'test'), "Run Tests", True),
(('make', 'check'), "Static Analysis", True),
(('make', 'doc'), None, True),
]
@select_runnable('run_targets')
@file_validator
def python_files(filename):
return filename.endswith('.py')
@select_runnable('run_targets')
@file_validator
def html_files(filename):
return filename.split('.')[-1] in ['html', 'css', 'js']
@runnable
def run_targets(*args):
"""Run targets for Python."""
Options.show_coverage = 'coverage' in args
count = 0
for count, (command, title, retry) in enumerate(Options.targets, start=1):
success = call(command, title, retry)
if not success:
message = "✅ " * (count - 1) + "❌"
show_notification(message, title)
return False
message = "✅ " * count
title = "All Targets"
show_notification(message, title)
show_coverage()
return True
def call(command, title, retry):
"""Run a command-line program and display the result."""
if Options.rerun_args:
command, title, retry = Options.rerun_args
Options.rerun_args = None
success = call(command, title, retry)
if not success:
return False
print("")
print("$ %s" % ' '.join(command))
failure = subprocess.call(command)
if failure and retry:
Options.rerun_args = command, title, retry
return not failure
def show_notification(message, title):
"""Show a user notification."""
if notify and title:
notify(message, title=title, group=Options.group)
def show_coverage():
"""Launch the coverage report."""
if Options.show_coverage:
subprocess.call(['make', 'read-coverage'])
Options.show_coverage = False
| 22.94898 | 78 | 0.636727 | [
"Apache-2.0",
"BSD-2-Clause"
] | EazeAI/AI-WS | scent.py | 2,255 | Python |
"""Provide useful functions for using PTLFlow."""
# =============================================================================
# Copyright 2021 Henrique Morimitsu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
__version__ = '0.2.5'
import logging
from argparse import Namespace
from pathlib import Path
from typing import List, Optional
import requests
import torch
from torch import hub
from ptlflow.models.base_model.base_model import BaseModel
from ptlflow.models.dicl.dicl import DICL
from ptlflow.models.fastflownet.fastflownet import FastFlowNet
from ptlflow.models.flownet.flownet2 import FlowNet2
from ptlflow.models.flownet.flownetc import FlowNetC
from ptlflow.models.flownet.flownetcs import FlowNetCS
from ptlflow.models.flownet.flownetcss import FlowNetCSS
from ptlflow.models.flownet.flownets import FlowNetS
from ptlflow.models.flownet.flownetsd import FlowNetSD
from ptlflow.models.gma.gma import GMA
from ptlflow.models.hd3.hd3 import HD3, HD3Context
from ptlflow.models.irr.pwcnet import IRRPWCNet
from ptlflow.models.irr.pwcnet_irr import IRRPWCNetIRR
from ptlflow.models.irr.irr_pwc import IRRPWC
from ptlflow.models.lcv.lcv_raft import LCV_RAFT, LCV_RAFTSmall
from ptlflow.models.liteflownet.liteflownet import LiteFlowNet
from ptlflow.models.liteflownet.liteflownet3 import (
LiteFlowNet3, LiteFlowNet3PseudoReg, LiteFlowNet3S, LiteFlowNet3SPseudoReg)
from ptlflow.models.liteflownet.liteflownet2 import LiteFlowNet2, LiteFlowNet2PseudoReg
from ptlflow.models.maskflownet.maskflownet import MaskFlownet, MaskFlownet_S
from ptlflow.models.pwcnet.pwcnet import PWCNet, PWCDCNet
from ptlflow.models.raft.raft import RAFT, RAFTSmall
from ptlflow.models.scopeflow.irr_pwc_v2 import ScopeFlow
from ptlflow.models.starflow.starflow import StarFlow
from ptlflow.models.vcn.vcn import VCN, VCNSmall
from ptlflow.utils.utils import config_logging
try:
from ptlflow.models.scv.scv import SCVEighth, SCVQuarter
except ImportError as e:
print(e)
SCVEighth = None
SCVQuarter = None
config_logging()
models_dict = {
'dicl': DICL,
'fastflownet': FastFlowNet,
'flownet2': FlowNet2,
'flownetc': FlowNetC,
'flownetcs': FlowNetCS,
'flownetcss': FlowNetCSS,
'flownets': FlowNetS,
'flownetsd': FlowNetSD,
'gma': GMA,
'hd3': HD3,
'hd3_ctxt': HD3Context,
'irr_pwc': IRRPWC,
'irr_pwcnet': IRRPWCNet,
'irr_pwcnet_irr': IRRPWCNetIRR,
'lcv_raft': LCV_RAFT,
'lcv_raft_small': LCV_RAFTSmall,
'liteflownet': LiteFlowNet,
'liteflownet2': LiteFlowNet2,
'liteflownet2_pseudoreg': LiteFlowNet2PseudoReg,
'liteflownet3': LiteFlowNet3,
'liteflownet3_pseudoreg': LiteFlowNet3PseudoReg,
'liteflownet3s': LiteFlowNet3S,
'liteflownet3s_pseudoreg': LiteFlowNet3SPseudoReg,
'maskflownet': MaskFlownet,
'maskflownet_s': MaskFlownet_S,
'pwcnet': PWCNet,
'pwcdcnet': PWCDCNet,
'raft': RAFT,
'raft_small': RAFTSmall,
'scopeflow': ScopeFlow,
'scv4': SCVQuarter,
'scv8': SCVEighth,
'starflow': StarFlow,
'vcn': VCN,
'vcn_small': VCNSmall,
}
def download_scripts(
destination_dir: Path = Path('ptlflow_scripts')
) -> None:
"""Download the main scripts and configs to start working with PTLFlow."""
github_url = 'https://raw.githubusercontent.com/hmorimitsu/ptlflow/main/'
script_names = [
'datasets.yml',
'infer.py',
'test.py',
'train.py',
'validate.py'
]
destination_dir.mkdir(parents=True, exist_ok=True)
for sname in script_names:
script_url = github_url + sname
data = requests.get(script_url)
if data.status_code == 200:
with open(destination_dir / sname, 'wb') as f:
f.write(data.content)
else:
logging.warning('Script %s was not found.', script_url)
logging.info('Downloaded scripts to %s.', str(destination_dir))
def get_model(
model_name: str,
pretrained_ckpt: Optional[str] = None,
args: Optional[Namespace] = None
) -> BaseModel:
"""Return an instance of a chosen model.
The instance can have configured by he arguments, and load some existing pretrained weights.
Note that this is different from get_model_reference(), which returns a reference to the model class. The instance,
returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to
"return get_model_reference()()", which looks confusing. This can be rewritten as
"model_ref = get_model_reference(); return model_ref()".
Parameters
----------
model_name : str
Name of the model to get an instance of.
pretrained_ckpt : Optional[str], optional
Name of the pretrained weight to load or a path to a local checkpoint file.
args : Optional[Namespace], optional
Some arguments that ill be provided to the model.
Returns
-------
BaseModel
The instance of the chosen model.
Raises
------
ValueError
If the given checkpoint name is not a valid choice.
ValueError
If a checkpoint name is given, but the model does not have any pretrained weights available.
See Also
--------
get_model_reference : To get a reference to the class of a model.
"""
model_ref = get_model_reference(model_name)
if args is None:
parser = model_ref.add_model_specific_args()
args = parser.parse_args([])
model = model_ref(args)
if pretrained_ckpt is None and args is not None and args.pretrained_ckpt is not None:
pretrained_ckpt = args.pretrained_ckpt
if pretrained_ckpt is not None:
if Path(pretrained_ckpt).exists():
ckpt_path = pretrained_ckpt
elif hasattr(model_ref, 'pretrained_checkpoints'):
ckpt_path = model_ref.pretrained_checkpoints.get(pretrained_ckpt)
if ckpt_path is None:
raise ValueError(
f'Invalid checkpoint name {pretrained_ckpt}. '
f'Choose one from {{{",".join(model.pretrained_checkpoints.keys())}}}')
else:
raise ValueError(f'Cannot find checkpoint {pretrained_ckpt} for model {model_name}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if Path(ckpt_path).exists():
ckpt = torch.load(ckpt_path, map_location=torch.device(device))
else:
model_dir = Path(hub.get_dir()) / 'ptlflow' / 'checkpoints'
ckpt = hub.load_state_dict_from_url(
ckpt_path, model_dir=model_dir, map_location=torch.device(device), check_hash=True)
state_dict = ckpt['state_dict']
model.load_state_dict(state_dict)
return model
def get_model_reference(
model_name: str
) -> BaseModel:
"""Return a reference to the class of a chosen model.
Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this
function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as
"model_ref = get_model_reference(); model_instance = model_ref()".
Parameters
----------
model_name : str
Name of the model to get a reference of.
Returns
-------
BaseModel
A reference to the chosen model.
Raises
------
ValueError
If the given name is not a valid choice.
See Also
--------
get_model : To get an instance of a model.
"""
try:
return models_dict[model_name]
except KeyError:
raise ValueError(f'Unknown model name: {model_name}. Choose from [{", ".join(models_dict.keys())}]')
def get_trainable_model_names() -> List[str]:
"""Return a list of model names that are able to be trained.
This function return the names of the model that have a loss function defined.
Returns
=======
List[str]
The list of the model names that can be trained.
"""
return [mname for mname in models_dict.keys() if get_model(mname).loss_fn is not None]
| 34.440476 | 123 | 0.689019 | [
"Apache-2.0"
] | hmorimitsu/ptlflow | ptlflow/__init__.py | 8,679 | Python |
# _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2018/5/12.
"""
from app import create_app
__author__ = 'Allen7D'
from app.models.base import db
from app.models.user import User
app = create_app()
with app.app_context():
with db.auto_commit():
# 创建一个超级管理员
user = User()
user.openid = '999'
user.email = '999@qq.com'
user.nickname = 'Super'
user.auth = 2
user.password = '123456'
db.session.add(user)
with db.auto_commit():
# 创建一个普通管理员
user = User()
user.openid = '777'
user.email = '777@qq.com'
user.nickname = 'Admin'
user.auth = 1
user.password = '123456'
db.session.add(user)
| 22.53125 | 34 | 0.568655 | [
"MIT"
] | HuaiGuang10/mini-shop-server | fake.py | 757 | Python |
import requests
import pprint
from config import API_KEY
base_url = f'https://api.telegram.org/bot{API_KEY}/'
api_response = requests.get(base_url + 'getUpdates').json()
for update in api_response['result']:
message = update['message']
chat_id = message['chat']['id']
text = message['text']
reply_message = {
'chat_id': chat_id,
'text': text
}
requests.post(base_url + 'sendMessage', json=reply_message)
# pprint.pprint(api_response['result'][0]) | 24.7 | 63 | 0.668016 | [
"Apache-2.0"
] | keys4words/bots | telegramsLibs/api_telega_intro.py | 494 | Python |
# Metafier V2: writes directly to output.mc
# Uses numpy and memoization to speed up a crap ton & compress data a bit
# ===REQUIRES metatemplate11.mc===
import golly as g
import numpy as np
from shutil import copyfile
#Get the selection
selection = g.getselrect()
if not selection: g.exit("No selection.")
#Get the cells in the selection
cells = g.getcells(selection)
if not cells: g.exit("No pattern in selection")
if len(cells) % 3: cells = cells[:-1]
selw = selection[2]
selh = selection[3]
patternsize = 1 << int(np.ceil(np.log2(selh | selw)))
metapattern = np.zeros((patternsize, patternsize))
#Pseudo-convolution, to detect diagonal neighbors
# +1 +0 +2
# +0 *16 +0
# +4 +0 +8
for cell in np.reshape(cells, (-1, 3)):
selx = cell[0] - selection[0]
sely = cell[1] - selection[1]
metapattern[sely][selx] += 16 * cell[2]
if sely:
if selx:
metapattern[sely - 1][selx - 1] += 8
if selx + 1 < selw:
metapattern[sely - 1][selx + 1] += 4
if sely + 1 < selh:
if selx:
metapattern[sely + 1][selx - 1] += 2
if selx + 1 < selw:
metapattern[sely + 1][selx + 1] += 1
#Remove all B/S cells
metapattern[metapattern < 32] = np.nan
metapattern += 5630 - 32 #5632 is starting point of 11s in template
metapattern[np.isnan(metapattern)] = 0
metapattern = metapattern.astype(int)
#Using metatemplate11, memoization, and some recursion
def createLine(pattern, outfile, linenum = [5726], memo = {}): #linenum and memo are mutable function arguments, which are only initialized during function definition
if tuple(pattern.ravel().tolist()) not in memo: #If we haven't seen this type of pattern before, let's remember it
if pattern.shape[0] == 2: #Pattern is a leaf, write leaf line
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
pattern[0, 0],
pattern[0, 1],
pattern[1, 0],
pattern[1, 1]))
else: #Pattern is a branch, keep going down quadtree
subpatterns = pattern.reshape(2, pattern.shape[0] >> 1, 2, pattern.shape[0] >> 1).swapaxes(1,2)
outfile.write('{} {} {} {} {}\n'.format(pattern.shape[0].bit_length() + 10,
createLine(subpatterns[0, 0], outfile),
createLine(subpatterns[0, 1], outfile),
createLine(subpatterns[1, 0], outfile),
createLine(subpatterns[1, 1], outfile)))
memo[tuple(pattern.ravel().tolist())] = linenum[0]
linenum[0] += 1
return memo[tuple(pattern.ravel().tolist())]
copyfile('metatemplate11.mc', 'output.mc')
with open('output.mc', 'a') as outputfile:
createLine(metapattern, outputfile)
#Display output.mc
g.addlayer()
g.open('output.mc')
#TODO: Use metatemplate10?
| 40.379747 | 167 | 0.551097 | [
"MIT"
] | IkeoluwaStat/QFT | MetafierV2.py | 3,190 | Python |
import os
import pickle
import math
import pandas as pd
from const import *
def middle_save(obj, inf):
pickle.dump(obj, open(inf, "wb"), True)
def middle_load(inf):
return pickle.load(open(inf, "rb"))
def word2idx(sents, word2idx):
return [[word2idx[w] if w in word2idx else UNK for w in s] for s in sents]
class Dictionary(object):
def __init__(self):
self.word2idx = {
WORD[PAD]: PAD,
WORD[UNK]: UNK,
WORD[BOS]: BOS,
WORD[EOS]: EOS
}
self.idx = len(self.word2idx)
def add(self, word):
if self.word2idx.get(word) is None:
self.word2idx[word] = self.idx
self.idx += 1
def __call__(self, sents, min_count):
words = [word for sent in sents for word in sent]
word_count = {w: 0 for w in set(words)}
for w in words:
word_count[w] += 1
ignored_word_count = 0
for word, count in word_count.items():
if count <= min_count:
ignored_word_count += 1
continue
self.add(word)
return ignored_word_count
def __len__(self):
return self.idx
def __str__(self):
return "%s(size = %d)".format(self.__class__.__name__, len(self.idx))
class Corpus(object):
def __init__(self, max_ori_len=128, max_sum_len=15, min_word_count=1):
self.dict = Dictionary()
self.max_ori_len = max_ori_len
self.max_sum_len = max_sum_len
self._min_word_count = min_word_count
self.parse_data("data/test.csv", False)
self.parse_data("data/train.csv")
self.save()
def parse_data(self, _file, is_train=True):
def cut(x, list, ignore, max_len, is_summ):
if isinstance(x, float) and math.isnan(x):
if is_summ:
list.append(WORD[EOS])
else:
list.append("")
else:
x = x.split()
if len(x) > max_len:
x = x[:max_len]
ignore[0] += 1
if is_summ:
x += [WORD[EOS]]
list.append(x)
origins, summurys = [], []
ignore_ori_nums = [0]
ignore_sum_nums = [0]
df = pd.read_csv(_file)
df["original"].apply(cut, args=(
origins, ignore_ori_nums, self.max_ori_len, False))
df["summary"].apply(cut, args=(
summurys, ignore_sum_nums, self.max_sum_len, True))
if is_train:
ori_ignore = self.dict(origins + summurys, self._min_word_count)
self.train_origins = origins
self.train_summurys = summurys
self.train_labels = df["score"].values - 1
print("Ignored origin counts - [{}]".format(ori_ignore))
print(
'Train data - ignore original lines - [{}]'.format(ignore_ori_nums[0]))
print(
'Train data - ignore summary lines - [{}]'.format(ignore_sum_nums[0]))
else:
self.test_origins = origins
self.test_summurys = summurys
self.test_labels = df["score"].values - 1
print(
'Test data - ignore original lines - [{}]'.format(ignore_ori_nums[0]))
print(
'Test data - ignore summary lines - [{}]'.format(ignore_sum_nums[0]))
def save(self):
data = {
'max_ori_len': self.max_ori_len,
'max_sum_len': self.max_sum_len + 1,
'dict': {
'dict': self.dict.word2idx,
'dict_size': len(self.dict),
},
'train': {
'original': word2idx(self.train_origins, self.dict.word2idx),
'summary': word2idx(self.train_summurys, self.dict.word2idx),
'label': self.train_labels
},
'test': {
'original': word2idx(self.test_origins, self.dict.word2idx),
'summary': word2idx(self.test_summurys, self.dict.word2idx),
'label': self.test_labels
}
}
middle_save(data, "data/corpus")
print('dict length - [{}]'.format(len(self.dict)))
if __name__ == "__main__":
Corpus()
| 29.689655 | 87 | 0.532172 | [
"MIT"
] | ne7ermore/deeping-flow | hierarchical-sc/corpus.py | 4,305 | Python |
from .Essential_Functions import URL_Maker, Negative_Detector
from .Scrape_Index import Scrape_Index
from .Scrape_StockInfo import StockInfo
from .Scrape_StockData_Realtime import Realtime_StockData
| 40.6 | 62 | 0.871921 | [
"MIT"
] | Farhad-Shabani/TSETMC_Dashboard | src/__init__.py | 203 | Python |
from boa3.builtin import public
@public
def test_raise(arg: int):
x = Exception
if arg < 0:
raise x
| 13.111111 | 31 | 0.627119 | [
"Apache-2.0"
] | CityOfZion/neo3-boa | boa3_test/test_sc/exception_test/RaiseVariableException.py | 118 | Python |
#!/bin/env
"""
Help new users configure the database for use with social networks.
"""
import os
from datetime import datetime
# Fix Python 2.x.
try:
input = raw_input
except NameError:
pass
import django
from django.conf import settings
from django.core.management.utils import get_random_secret_key
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
settings.configure(
DEBUG=True,
TEMPLATES=[dict(
# DEBUG = True,
BACKEND='django.template.backends.django.DjangoTemplates',
APP_DIRS=True,
DIRS=[
os.path.join(BASE_DIR, 'allauthdemo'),
],
)],
)
try:
django.setup() # for Django >= 1.7
except AttributeError:
pass # must be < Django 1.7
from django.template.loader import get_template
from django.template import engines
commands_template = engines['django'].from_string("""
Run these commands:
python manage.py makemigrations allauthdemo_auth
python manage.py migrate
python manage.py createsuperuser
{% if facebook %}# Facebook
python manage.py set_auth_provider facebook {{facebook.client_id}} {{facebook.secret}}{% endif %}
{% if google %}# Google
python manage.py set_auth_provider google {{google.client_id}} {{google.secret}}{% endif %}
{% if github %}# GitHub
python manage.py set_auth_provider github {{github.client_id}} {{github.secret}}{% endif %}
{% if vk %}# VK
python manage.py set_auth_provider vk {{vk.client_id}} {{vk.secret}}{% endif %}
If you have other providers you can add them in that way.
""")
settings_template = get_template("settings.template.py")
def heading(text):
text = text.strip()
line = '-' * len(text)
print("\n%s\n%s\n%s\n" % (line, text, line))
def print_list(ls):
max_len = max([len(i) for i in ls])
num = len(str(len(ls))) #TODO: full list providers
line = '-' * (2+num+3+max_len+2)
for i in range(len(ls)):
print(line)
print("| %d | %s "% (i+1, ls[i]))
def ask_text(need, default=None):
need = need.strip()
if default:
msg = "\n%s? Default: [%s] > " % (need, default)
else:
msg = "\n%s? > " % need
while True:
response = input(msg)
if response:
return response
elif default is not None:
return default
else:
pass # raw_input('Please enter a value.')
providers = ['facebook', 'google', 'github', 'vk']
if __name__ == "__main__":
context = {
'now': str(datetime.now()),
'secret_key': get_random_secret_key(),
}
print_list(providers)
print("Please list comma-separated providers. Example: 1,2,3,4")
corrct_providers = [int(i)-1 for i in input("Please enter: ").split(',')]
for i in corrct_providers:
p = providers[i]
heading(p)
secret = ask_text("{} Secret"%(p))
client_id = ask_text("{} Client ID"%(p))
context[p] = dict(secret=secret, client_id=client_id)
heading("Rendering settings...")
with open('allauthdemo/settings.py', 'w') as out:
out.write(settings_template.render(context, request=None))
print("OK")
heading("Next steps")
print(commands_template.render(context, request=None))
heading("Done")
| 26.699187 | 101 | 0.624848 | [
"MIT"
] | dodo325/demo-allauth-bootstrap-ru | configure_new.py | 3,284 | Python |
import os
import openprocurement.agreement.cfaua
from logging import getLogger
from pyramid.interfaces import IRequest
from openprocurement.api.interfaces import IContentConfigurator
from openprocurement.agreement.cfaua.interfaces import IClosedFrameworkAgreementUA
from openprocurement.agreement.cfaua.models.agreement import Agreement
from openprocurement.agreement.cfaua.adapters.configurator import CFAgreementUAConfigurator
from zope.configuration.xmlconfig import file as ZcmlFile
LOGGER = getLogger("openprocurement.agreement.cfaua")
def includeme(config):
LOGGER.info("Init agreement.cfaua plugin.")
config.add_agreement_type(Agreement)
config.registry.registerAdapter(
CFAgreementUAConfigurator, (IClosedFrameworkAgreementUA, IRequest), IContentConfigurator
)
config.scan("openprocurement.agreement.cfaua.views")
ZcmlFile(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "configure.zcml"),
package=openprocurement.agreement.cfaua,
)
| 32.645161 | 96 | 0.8083 | [
"Apache-2.0"
] | BohdanBorkivskyi/openprocurement.api | src/openprocurement/agreement/cfaua/includeme.py | 1,012 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script to demonstrate vaspy.incar functionality.
"""
import argparse
import vaspy
import vaspy.incar
from logging import DEBUG, INFO, Formatter, StreamHandler, getLogger
LOGLEVEL = DEBUG
logger = getLogger(__name__)
fmt = "%(asctime)s %(levelname)s %(name)s :%(message)s"
formatter = Formatter(fmt)
handler = StreamHandler()
handler.setLevel(LOGLEVEL)
logger.setLevel(LOGLEVEL)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-r",
help="""Show reformated INCAR (Use -i if edit in place)""",
action="store_true",
)
parser.add_argument("-i", help="""Edit the INCAR file in place""", action="store_true")
parser.add_argument(
"--lint",
help="""Tyny and private verion of code checker for vasp""",
action="store_true",
)
parser.add_argument("incar_file", metavar="INCAR_file", nargs=1)
args = parser.parse_args()
assert not (
args.lint and (args.i or args.r)
), "Lint option and re-format option (-i, -r) is exclusive."
logger.debug("args: {}".format(args))
incar: vaspy.incar.Incar = vaspy.load(args.incar_file[0])
if args.i:
with open(args.incar_file[0], mode="wt") as incar_file:
incar_file.write(incar.__str__())
if args.r:
print(incar)
if args.lint:
lint_msg = incar.lint_all()
if lint_msg: # if python 3.8 lint_msg:= incar.lint_all() can be used...
print(lint_msg)
else:
print("ALL OK. Submit the job!!")
| 27.310345 | 87 | 0.703283 | [
"BSD-3-Clause"
] | arafune/vaspy | scripts/vaspy-incar.py | 1,584 | Python |
'''
blackbody.py - Color of thermal blackbodies.
Description:
Calculate the spectrum of a thermal blackbody at an arbitrary temperature.
Constants:
PLANCK_CONSTANT - Planck's constant, in J-sec
SPEED_OF_LIGHT - Speed of light, in m/sec
BOLTZMAN_CONSTANT - Boltzman's constant, in J/K
SUN_TEMPERATURE - Surface temperature of the Sun, in K
Functions:
blackbody_specific_intensity (wl_nm, T_K) -
Get the monochromatic specific intensity for a blackbody -
wl_nm = wavelength [nm]
T_K = temperature [K]
This is the energy radiated per second per unit wavelength per unit solid angle.
Reference - Shu, eq. 4.6, p. 78.
blackbody_spectrum (T_K) -
Get the spectrum of a blackbody, as a numpy array.
blackbody_color (T_K) -
Given a temperature (K), return the xyz color of a thermal blackbody.
Plots:
blackbody_patch_plot (T_list, title, filename) -
Draw a patch plot of blackbody colors for the given temperature range.
blackbody_color_vs_temperature_plot (T_list, title, filename) -
Draw a color vs temperature plot for the given temperature range.
blackbody_spectrum_plot (T_K) -
Draw the spectrum of a blackbody at the given temperature.
References:
Frank H. Shu, The Physical Universe. An Introduction to Astronomy,
University Science Books, Mill Valley, California. 1982. ISBN 0-935702-05-9.
Charles Kittel and Herbert Kroemer, Thermal Physics, 2nd edition,
W. H. Freeman, New York, 1980. ISBN 0-7167-1088-9.
License:
Copyright (C) 2008 Mark Kness
Author - Mark Kness - mkness@alumni.utexas.net
This file is part of ColorPy.
ColorPy is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ColorPy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with ColorPy. If not, see <http://www.gnu.org/licenses/>.
'''
import math, numpy, pylab
import colormodels
import ciexyz
import plots
# Physical constants in mks units
PLANCK_CONSTANT = 6.6237e-34 # J-sec
SPEED_OF_LIGHT = 2.997925e+08 # m/sec
BOLTZMAN_CONSTANT = 1.3802e-23 # J/K
SUN_TEMPERATURE = 5778.0 # K
def blackbody_specific_intensity (wl_nm, T_K):
'''Get the monochromatic specific intensity for a blackbody -
wl_nm = wavelength [nm]
T_K = temperature [K]
This is the energy radiated per second per unit wavelength per unit solid angle.
Reference - Shu, eq. 4.6, p. 78.'''
# precalculations that could be made global
a = (PLANCK_CONSTANT * SPEED_OF_LIGHT) / (BOLTZMAN_CONSTANT)
b = (2.0 * PLANCK_CONSTANT * SPEED_OF_LIGHT * SPEED_OF_LIGHT)
wl_m = wl_nm * 1.0e-9
try:
exponent = a / (wl_m * T_K)
except ZeroDivisionError:
# treat same as large exponent
return 0.0
if exponent > 500.0:
# so large that the final result is nearly zero - avoid the giant intermediate
return 0.0
specific_intensity = b / (math.pow (wl_m, 5) * (math.exp (exponent) - 1.0))
return specific_intensity
def blackbody_spectrum (T_K):
'''Get the spectrum of a blackbody, as a numpy array.'''
spectrum = ciexyz.empty_spectrum()
(num_rows, num_cols) = spectrum.shape
for i in xrange (0, num_rows):
specific_intensity = blackbody_specific_intensity (spectrum [i][0], T_K)
# scale by size of wavelength interval
spectrum [i][1] = specific_intensity * ciexyz.delta_wl_nm * 1.0e-9
return spectrum
def blackbody_color (T_K):
'''Given a temperature (K), return the xyz color of a thermal blackbody.'''
spectrum = blackbody_spectrum (T_K)
xyz = ciexyz.xyz_from_spectrum (spectrum)
return xyz
#
# Figures
#
def blackbody_patch_plot (T_list, title, filename):
'''Draw a patch plot of blackbody colors for the given temperature range.'''
xyz_colors = []
color_names = []
for Ti in T_list:
xyz = blackbody_color (Ti)
xyz_colors.append (xyz)
name = '%g K' % (Ti)
color_names.append (name)
plots.xyz_patch_plot (xyz_colors, color_names, title, filename)
def blackbody_color_vs_temperature_plot (T_list, title, filename):
'''Draw a color vs temperature plot for the given temperature range.'''
num_T = len (T_list)
rgb_list = numpy.empty ((num_T, 3))
for i in xrange (0, num_T):
T_i = T_list [i]
xyz = blackbody_color (T_i)
rgb_list [i] = colormodels.rgb_from_xyz (xyz)
# note that b and g become negative for low T - MatPlotLib skips those on the semilog plot.
plots.color_vs_param_plot (
T_list,
rgb_list,
title,
filename,
plotfunc = pylab.semilogy,
tight = True,
xlabel = r'Temperature (K)',
ylabel = r'RGB Color')
def blackbody_spectrum_plot (T_K):
'''Draw the spectrum of a blackbody at the given temperature.'''
spectrum = blackbody_spectrum (T_K)
title = 'Blackbody Spectrum - T %d K' % (int (T_K))
filename = 'BlackbodySpectrum-%dK' % (int (T_K))
plots.spectrum_plot (
spectrum,
title,
filename,
xlabel = 'Wavelength (nm)',
ylabel = 'Specific Intensity')
#ylabel = 'Intensity ($W/m^2$)') # with LaTex symbols, the axis text gets too big...
# Create sample figures
def figures ():
'''Create some blackbody plots.'''
# patch plots
T_list_0 = plots.log_interpolate ( 1200.0, 20000.0, 48)
T_list_hot = plots.log_interpolate (10000.0, 40000.0, 24)
T_list_cool = plots.log_interpolate ( 950.0, 1200.0, 24)
blackbody_patch_plot (T_list_0, 'Blackbody Colors', 'Blackbody-Patch')
blackbody_patch_plot (T_list_hot, 'Hot Blackbody Colors', 'Blackbody-HotPatch')
blackbody_patch_plot (T_list_cool, 'Cool Blackbody Colors', 'Blackbody-CoolPatch')
# color vs temperature
blackbody_color_vs_temperature_plot (range (1200, 16000, 50), 'Blackbody Colors', 'Blackbody-Colors')
blackbody_color_vs_temperature_plot (range (10000, 40000, 100), 'Hot Blackbody Colors', 'Blackbody-HotColors')
blackbody_color_vs_temperature_plot (range (950, 1200, 1), 'Cool Blackbody Colors', 'Blackbody-CoolColors')
# spectrum of specific temperatures
blackbody_spectrum_plot (2000.0)
blackbody_spectrum_plot (3000.0) # Proxima Centauri
blackbody_spectrum_plot (SUN_TEMPERATURE) # Sun
blackbody_spectrum_plot (11000.0) # Rigel
blackbody_spectrum_plot (15000.0)
| 35.852632 | 116 | 0.695831 | [
"MIT"
] | gmweir/QuasiOptics | colorpy/colorpy-0.1.0/blackbody.py | 6,812 | Python |
import mugReader
from flask import Flask, request
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class readMugRFID(Resource):
def get(self):
return {'mugId': mugReader.readMug()}
api.add_resource(brewSettings, '/mugReader/')
if __name__ == "__main__":
#remove host for production
app.run(host= '192.168.1.183')
| 17.65 | 45 | 0.733711 | [
"MPL-2.0"
] | joelhaasnoot/MugsyDev | endpoints/mugReader.py | 353 | Python |
#!/usr/bin/env python
#from .core import *
import numpy as np
import pandas as pd
import shutil
import urllib
import urlparse
from os.path import splitext, basename
import os
from os import sys, path
from pprint import pprint
import StringIO
import db
from gp import *
from core import *
from IPython.core.debugger import Tracer
class Annotation(UploadCsvConvert):
def __init__(self, xe):
xe.attrib['newCols'] = 'gid,annotation_type_id,content,annotation_field1,ds,tax_id'
UploadCsvConvert.__init__(self,xe=xe,dest='annotation')
self.type_col = 'annotation_type_id'
def get_type_col_value_sql(self):
return 'SELECT annotation_type_id FROM %s.annotation_type WHERE annotation_type_name = ?' % SyncDB.DATABASE
| 25.965517 | 115 | 0.759628 | [
"Apache-2.0"
] | cozy9/Metascape | database/task_class/annotation.py | 753 | Python |
# -*- coding: utf-8 -*-
import io
import json
import os
import sys
import shutil
from os import path
import django
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from django.conf import settings
import blueapps
PY_VER = sys.version
class Command(TemplateCommand):
help = u"基于蓝鲸开发框架初始化开发样例"
def add_arguments(self, parser):
parser.add_argument('directory', nargs='?', default='./',
help='Optional destination directory')
def handle(self, **options):
target = options.pop('directory')
# 先获取原内容
if not path.exists('config/default.py'):
raise CommandError("config/default.py does not exist,"
" please init a django project first.")
if PY_VER[0] == '2':
old_file = open('config/default.py')
else:
old_file = open('config/default.py', encoding='utf-8')
# if some directory is given, make sure it's nicely expanded
top_dir = path.abspath(path.expanduser(target))
if not path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please init first." % top_dir)
if not path.exists(path.join(top_dir, 'manage.py')):
raise CommandError("Current directory '%s' is not "
"a django project dir, please init first. "
"(bk-admin init ${app_code})" %
top_dir)
base_subdir = 'weixin_template'
append_file_tuple = (('', 'requirements.txt'),)
# Setup a stub settings environment for template rendering
if not settings.configured:
settings.configure()
django.setup()
template_dir = path.join(blueapps.__path__[0], 'conf', base_subdir)
run_ver = None
if PY_VER[0] == '2':
conf_file = open(path.join(os.getcwd(), 'config', '__init__.py'))
else:
conf_file = open(path.join(os.getcwd(), 'config', '__init__.py'), encoding='utf-8')
for line in conf_file.readlines():
if line.startswith('RUN_VER'):
run_ver = line[11:-2]
conf_file.close()
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
relative_dir = root[prefix_length:]
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
flag = root.endswith('sites')
for dirname in dirs[:]:
if (
dirname.startswith('.') or
dirname == '__pycache__' or
(flag and dirname != run_ver)
):
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class', '.json')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir, filename)
for old_suffix, new_suffix in self.rewrite_template_suffixes:
if new_path.endswith(old_suffix):
new_path = new_path[:-len(old_suffix)] + new_suffix
break # Only rewrite once
with io.open(old_path, 'rb') as template_file:
content = template_file.read()
w_mode = 'wb'
for _root, _filename in append_file_tuple:
if _root == relative_dir and _filename == filename:
w_mode = 'ab'
with io.open(new_path, w_mode) as new_file:
new_file.write(content)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
# 修改文件
modify_default_file(old_file)
# 获取原先的 default 文件并对其进行追加和覆盖
def modify_default_file(old_file):
# 打开覆盖前的文件和替换的 json 文件
if PY_VER[0] == '2':
with open("%s/conf/weixin_template/config/default.json" % blueapps.__path__[0],
'r') as json_file:
get_default_content(old_file, json_file)
else:
with open("%s/conf/weixin_template/config/default.json" % blueapps.__path__[0],
'r', encoding='utf-8') as json_file:
get_default_content(old_file, json_file)
def get_default_content(old_file, json_file):
with old_file as old_file:
# 获取 json 数据内容
result_content = old_file.read()
json_dict = json.load(json_file)
# 根据 key 进行替换会追加内容
for replace_property in json_dict:
# 获得 key 值
propertys = json_dict.get(replace_property)
# 寻找 key 值所在位置
start_index = result_content.find(str(replace_property))
# 获得 key 的 content 内容
content = propertys.get('content')
# mode 为 add 追加内容
if propertys.get('mode') == 'add':
end_index = result_content.find(')', start_index) - 1
temp_content = result_content[start_index:end_index]
# 检查最后一个是不是,结尾
if temp_content[-1] == ',' or temp_content[-1] == '(':
temp_content += '\n'
else:
temp_content += ',\n'
# 内容替换 content 需要进行 str 方法转换
result_content = ''.join(
[result_content[:start_index], temp_content,
str(content),
result_content[end_index:]])
# mode 为 cover 进行覆盖内容
elif propertys.get('mode') == 'cover':
end_index = result_content.find('\n', start_index)
# 即最后一个是 True 不需要做任何覆盖
if result_content[start_index: end_index].strip() == 'IS_USE_CELERY = False':
continue
# 需要位移 start_index 防止覆盖变量名称
start_index += len(replace_property)
# 内容覆盖
result_content = ''.join(
[result_content[:start_index],
'%s' % str(content),
result_content[end_index:]])
else:
# 其他情况
break
if PY_VER[0] == '2':
with open('config/default.py', 'w') as default_file:
default_file.write(result_content)
else:
with open('config/default.py', 'w',
encoding='utf-8') as default_file:
default_file.write(result_content) | 39.093923 | 95 | 0.535048 | [
"Apache-2.0"
] | xianmao/bk-sops | blueapps/contrib/bk_commands/management/commands/startweixin.py | 7,390 | Python |
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.coins_forwarding_success_data_item import CoinsForwardingSuccessDataItem
globals()['CoinsForwardingSuccessDataItem'] = CoinsForwardingSuccessDataItem
class CoinsForwardingSuccessData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'product': (str,), # noqa: E501
'event': (str,), # noqa: E501
'item': (CoinsForwardingSuccessDataItem,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'product': 'product', # noqa: E501
'event': 'event', # noqa: E501
'item': 'item', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, product, event, item, *args, **kwargs): # noqa: E501
"""CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, product, event, item, *args, **kwargs): # noqa: E501
"""CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.587189 | 484 | 0.58624 | [
"MIT"
] | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | cryptoapis/model/coins_forwarding_success_data.py | 12,529 | Python |
#!usr/bin/env python3.7
#-*-coding:utf-8-*-
import json
import discord
PATH = "config.json"
def singleton(class_):
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
@singleton
class Config:
def __init__(self):
with open(PATH,"r") as configfile:
self.config = json.load(configfile)
self.token = self.config["token"]
self.owners = self.config["owner"]
self.guildID = None
if self.config["self-guild"].get("mode","load") == "load":
self.guildID = self.config["self-guild"]["ID"]
self.guildRegion = self.parseRegion(self.config["self-guild"]["region"])
self.guild = None
self.adminrole = None
def __getitem__(self,item):
return self.config[item]
def initGuild(self, guild):
self.guild = guild
self.adminrole = discord.utils.get(self.guild.roles, name="Masakaki")
@classmethod
def parseRegion(cl, regionString):
key = regionString.lower()
if (key == "amsterdam"): return discord.VoiceRegion.amsterdam
elif (key == "brazil"): return discord.VoiceRegion.brazil
elif (key == "eu_central"): return discord.VoiceRegion.eu_central
elif (key == "eu_west"): return discord.VoiceRegion.eu_west
elif (key == "frankfurt"): return discord.VoiceRegion.frankfurt
elif (key == "hongkong"): return discord.VoiceRegion.hongkong
elif (key == "india"): return discord.VoiceRegion.india
elif (key == "japan"): return discord.VoiceRegion.japan
elif (key == "london"): return discord.VoiceRegion.london
elif (key == "russia"): return discord.VoiceRegion.russia
elif (key == "singapore"): return discord.VoiceRegion.singapore
elif (key == "southafrica"): return discord.VoiceRegion.southafrica
elif (key == "sydney"): return discord.VoiceRegion.sydney
elif (key == "us_central"): return discord.VoiceRegion.us_central
elif (key == "us_east"): return discord.VoiceRegion.us_east
elif (key == "us_south"): return discord.VoiceRegion.us_south
elif (key == "us_west"): return discord.VoiceRegion.us_west
return None
| 38.85 | 80 | 0.640927 | [
"MIT"
] | ttgc/zigotoland | src/utils/config.py | 2,331 | Python |
from __future__ import unicode_literals
import pytest
import itertools
import boto
import boto3
from botocore.exceptions import ClientError
from boto.exception import EC2ResponseError
from boto.ec2.instance import Reservation
import sure # noqa
from moto import mock_ec2_deprecated, mock_ec2
import pytest
from tests import EXAMPLE_AMI_ID
@mock_ec2_deprecated
def test_add_tag():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
instance.add_tag("a key", "some value", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
instance.add_tag("a key", "some value")
chain = itertools.chain.from_iterable
existing_instances = list(
chain([res.instances for res in conn.get_all_reservations()])
)
existing_instances.should.have.length_of(1)
existing_instance = existing_instances[0]
existing_instance.tags["a key"].should.equal("some value")
@mock_ec2_deprecated
def test_remove_tag():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
with pytest.raises(EC2ResponseError) as ex:
instance.remove_tag("a key", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set"
)
instance.remove_tag("a key")
conn.get_all_tags().should.have.length_of(0)
instance.add_tag("a key", "some value")
conn.get_all_tags().should.have.length_of(1)
instance.remove_tag("a key", "some value")
@mock_ec2_deprecated
def test_get_all_tags():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_with_special_characters():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("a key", "some<> value")
tags = conn.get_all_tags()
tag = tags[0]
tag.name.should.equal("a key")
tag.value.should.equal("some<> value")
@mock_ec2_deprecated
def test_create_tags():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
tag_dict = {
"a key": "some value",
"another key": "some other value",
"blank key": "",
}
with pytest.raises(EC2ResponseError) as ex:
conn.create_tags(instance.id, tag_dict, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set"
)
conn.create_tags(instance.id, tag_dict)
tags = conn.get_all_tags()
set([key for key in tag_dict]).should.equal(set([tag.name for tag in tags]))
set([tag_dict[key] for key in tag_dict]).should.equal(
set([tag.value for tag in tags])
)
@mock_ec2_deprecated
def test_tag_limit_exceeded():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
tag_dict = {}
for i in range(51):
tag_dict["{0:02d}".format(i + 1)] = ""
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags(instance.id, tag_dict)
cm.value.code.should.equal("TagLimitExceeded")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
instance.add_tag("a key", "a value")
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags(instance.id, tag_dict)
cm.value.code.should.equal("TagLimitExceeded")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
tags = conn.get_all_tags()
tag = tags[0]
tags.should.have.length_of(1)
tag.name.should.equal("a key")
tag.value.should.equal("a value")
@mock_ec2_deprecated
def test_invalid_parameter_tag_null():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as cm:
instance.add_tag("a key", None)
cm.value.code.should.equal("InvalidParameterValue")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_invalid_id():
conn = boto.connect_ec2("the_key", "the_secret")
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags("ami-blah", {"key": "tag"})
cm.value.code.should.equal("InvalidID")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
with pytest.raises(EC2ResponseError) as cm:
conn.create_tags("blah-blah", {"key": "tag"})
cm.value.code.should.equal("InvalidID")
cm.value.status.should.equal(400)
# cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_get_all_tags_resource_id_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"resource-id": instance.id})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
tags = conn.get_all_tags(filters={"resource-id": image_id})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(image_id)
tag.res_type.should.equal("image")
tag.name.should.equal("an image key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_resource_type_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"resource-type": "instance"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
tags = conn.get_all_tags(filters={"resource-type": "image"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(image_id)
tag.res_type.should.equal("image")
tag.name.should.equal("an image key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_key_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"key": "an instance key"})
tag = tags[0]
tags.should.have.length_of(1)
tag.res_id.should.equal(instance.id)
tag.res_type.should.equal("instance")
tag.name.should.equal("an instance key")
tag.value.should.equal("some value")
@mock_ec2_deprecated
def test_get_all_tags_value_filter():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.add_tag("an instance key", "some value")
reservation_b = conn.run_instances(EXAMPLE_AMI_ID)
instance_b = reservation_b.instances[0]
instance_b.add_tag("an instance key", "some other value")
reservation_c = conn.run_instances(EXAMPLE_AMI_ID)
instance_c = reservation_c.instances[0]
instance_c.add_tag("an instance key", "other value*")
reservation_d = conn.run_instances(EXAMPLE_AMI_ID)
instance_d = reservation_d.instances[0]
instance_d.add_tag("an instance key", "other value**")
reservation_e = conn.run_instances(EXAMPLE_AMI_ID)
instance_e = reservation_e.instances[0]
instance_e.add_tag("an instance key", "other value*?")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.add_tag("an image key", "some value")
tags = conn.get_all_tags(filters={"value": "some value"})
tags.should.have.length_of(2)
tags = conn.get_all_tags(filters={"value": "some*value"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": "*some*value"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": "*some*value*"})
tags.should.have.length_of(3)
tags = conn.get_all_tags(filters={"value": r"*value\*"})
tags.should.have.length_of(1)
tags = conn.get_all_tags(filters={"value": r"*value\*\*"})
tags.should.have.length_of(1)
tags = conn.get_all_tags(filters={"value": r"*value\*\?"})
tags.should.have.length_of(1)
@mock_ec2_deprecated
def test_retrieved_instances_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
reservation.should.be.a(Reservation)
reservation.instances.should.have.length_of(1)
instance = reservation.instances[0]
reservations = conn.get_all_reservations()
reservations.should.have.length_of(1)
reservations[0].id.should.equal(reservation.id)
instances = reservations[0].instances
instances.should.have.length_of(1)
instances[0].id.should.equal(instance.id)
conn.create_tags([instance.id], tags_to_be_set)
reservations = conn.get_all_reservations()
instance = reservations[0].instances[0]
retrieved_tags = instance.tags
# Cleanup of instance
conn.terminate_instances([instances[0].id])
# Check whether tag is present with correct value
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_retrieved_volumes_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2("the_key", "the_secret")
volume = conn.create_volume(80, "us-east-1a")
all_volumes = conn.get_all_volumes()
volume = all_volumes[0]
conn.create_tags([volume.id], tags_to_be_set)
# Fetch the volume again
all_volumes = conn.get_all_volumes()
volume = all_volumes[0]
retrieved_tags = volume.tags
volume.delete()
# Check whether tag is present with correct value
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_retrieved_snapshots_must_contain_their_tags():
tag_key = "Tag name"
tag_value = "Tag value"
tags_to_be_set = {tag_key: tag_value}
conn = boto.connect_ec2(
aws_access_key_id="the_key", aws_secret_access_key="the_secret"
)
volume = conn.create_volume(80, "eu-west-1a")
snapshot = conn.create_snapshot(volume.id)
conn.create_tags([snapshot.id], tags_to_be_set)
# Fetch the snapshot again
all_snapshots = conn.get_all_snapshots()
snapshot = [item for item in all_snapshots if item.id == snapshot.id][0]
retrieved_tags = snapshot.tags
conn.delete_snapshot(snapshot.id)
volume.delete()
# Check whether tag is present with correct value
retrieved_tags[tag_key].should.equal(tag_value)
@mock_ec2_deprecated
def test_filter_instances_by_wildcard_tags():
conn = boto.connect_ec2(
aws_access_key_id="the_key", aws_secret_access_key="the_secret"
)
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance_a = reservation.instances[0]
instance_a.add_tag("Key1", "Value1")
reservation_b = conn.run_instances(EXAMPLE_AMI_ID)
instance_b = reservation_b.instances[0]
instance_b.add_tag("Key1", "Value2")
reservations = conn.get_all_reservations(filters={"tag:Key1": "Value*"})
reservations.should.have.length_of(2)
reservations = conn.get_all_reservations(filters={"tag-key": "Key*"})
reservations.should.have.length_of(2)
reservations = conn.get_all_reservations(filters={"tag-value": "Value*"})
reservations.should.have.length_of(2)
@mock_ec2
def test_create_volume_with_tags():
client = boto3.client("ec2", "us-west-2")
response = client.create_volume(
AvailabilityZone="us-west-2",
Encrypted=False,
Size=40,
TagSpecifications=[
{
"ResourceType": "volume",
"Tags": [{"Key": "TEST_TAG", "Value": "TEST_VALUE"}],
}
],
)
assert response["Tags"][0]["Key"] == "TEST_TAG"
@mock_ec2
def test_create_snapshot_with_tags():
client = boto3.client("ec2", "us-west-2")
volume_id = client.create_volume(
AvailabilityZone="us-west-2",
Encrypted=False,
Size=40,
TagSpecifications=[
{
"ResourceType": "volume",
"Tags": [{"Key": "TEST_TAG", "Value": "TEST_VALUE"}],
}
],
)["VolumeId"]
snapshot = client.create_snapshot(
VolumeId=volume_id,
TagSpecifications=[
{
"ResourceType": "snapshot",
"Tags": [{"Key": "TEST_SNAPSHOT_TAG", "Value": "TEST_SNAPSHOT_VALUE"}],
}
],
)
expected_tags = [{"Key": "TEST_SNAPSHOT_TAG", "Value": "TEST_SNAPSHOT_VALUE"}]
assert snapshot["Tags"] == expected_tags
@mock_ec2
def test_create_tag_empty_resource():
# create ec2 client in us-west-1
client = boto3.client("ec2", region_name="us-west-1")
# create tag with empty resource
with pytest.raises(ClientError) as ex:
client.create_tags(Resources=[], Tags=[{"Key": "Value"}])
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter resourceIdSet"
)
@mock_ec2
def test_delete_tag_empty_resource():
# create ec2 client in us-west-1
client = boto3.client("ec2", region_name="us-west-1")
# delete tag with empty resource
with pytest.raises(ClientError) as ex:
client.delete_tags(Resources=[], Tags=[{"Key": "Value"}])
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter resourceIdSet"
)
@mock_ec2
def test_retrieve_resource_with_multiple_tags():
ec2 = boto3.resource("ec2", region_name="us-west-1")
blue, green = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=2, MaxCount=2)
ec2.create_tags(
Resources=[blue.instance_id],
Tags=[
{"Key": "environment", "Value": "blue"},
{"Key": "application", "Value": "api"},
],
)
ec2.create_tags(
Resources=[green.instance_id],
Tags=[
{"Key": "environment", "Value": "green"},
{"Key": "application", "Value": "api"},
],
)
green_instances = list(ec2.instances.filter(Filters=(get_filter("green"))))
green_instances.should.equal([green])
blue_instances = list(ec2.instances.filter(Filters=(get_filter("blue"))))
blue_instances.should.equal([blue])
def get_filter(color):
return [
{"Name": "tag-key", "Values": ["application"]},
{"Name": "tag-value", "Values": ["api"]},
{"Name": "tag-key", "Values": ["environment"]},
{"Name": "tag-value", "Values": [color]},
]
| 33.60198 | 137 | 0.687194 | [
"Apache-2.0"
] | monty16597/moto | tests/test_ec2/test_tags.py | 16,969 | Python |
from celery.loaders.base import BaseLoader
class AppLoader(BaseLoader):
def on_worker_init(self):
self.import_default_modules()
def read_configuration(self):
return {}
| 17.818182 | 42 | 0.709184 | [
"BSD-3-Clause"
] | frac/celery | celery/loaders/app.py | 196 | Python |
class AvalaraExceptionResponse(Exception):
pass | 25.5 | 42 | 0.823529 | [
"MIT"
] | SendOutCards/py-avalara | avalara/exceptions.py | 51 | Python |
'''define the config file for ade20k and resnet101os16'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'ade20k',
'rootdir': os.path.join(os.getcwd(), 'ADE20k'),
})
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 130
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify segmentor config
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 150,
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'deeplabv3_resnet101os16_ade20k_train',
'logfilepath': 'deeplabv3_resnet101os16_ade20k_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'deeplabv3_resnet101os16_ade20k_test',
'logfilepath': 'deeplabv3_resnet101os16_ade20k_test/test.log',
'resultsavepath': 'deeplabv3_resnet101os16_ade20k_test/deeplabv3_resnet101os16_ade20k_results.pkl'
}
) | 25.913043 | 106 | 0.71896 | [
"MIT"
] | CharlesPikachu/sssegmentation | ssseg/cfgs/deeplabv3/cfgs_ade20k_resnet101os16.py | 1,192 | Python |
# Generated by Django 2.1.2 on 2018-11-01 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("jbank", "0024_auto_20180425_1704"),
]
operations = [
migrations.AddField(
model_name="payout",
name="reference",
field=models.CharField(blank=True, default="", max_length=32, verbose_name="recipient reference"),
),
migrations.AlterField(
model_name="payout",
name="messages",
field=models.TextField(blank=True, default="", verbose_name="recipient messages"),
),
]
| 26.833333 | 110 | 0.604037 | [
"MIT"
] | bachvtuan/django-jbank | jbank/migrations/0025_auto_20181101_1430.py | 644 | Python |
import os
import sys
import numpy as np
import caffe
import argparse
parser = argparse.ArgumentParser(description='Computes 5-fold cross-validation results over Twitter five-agrees dataset')
parser.add_argument('-ov', '--oversampling', help='Enables (1) or disables (0) oversampling')
args = parser.parse_args()
if args.oversampling == 0:
oversampling = False
elif args.oversampling == 1:
oversampling = True
else:
sys.exit("oversampling must be 0 or 1")
subsets = ['test1', 'test2', 'test3', 'test4', 'test5']
mean_file = 'ilsvrc_2012_mean.npy'
accuracies = []
output_string = ""
for subset in subsets:
# Update paths for this subset
deploy_path = 'sentiment_deploy.prototxt'
caffemodel_path = 'twitter_finetuned_' + subset + '_iter_180.caffemodel'
ground_truth = 'ground_truth/' + subset + '/test.txt'
instanceList = []
correctLabels = 0
incorrectLabels = 0
positiveLabels = 0
negativeLabels = 0
positivePredictions = 0
negativePredictions = 0
gt_file = open(ground_truth, "r")
# Store images in a list
while (True):
line = gt_file.readline()
# Check if we have reached the end
if (len(line) == 0):
break
# Add the line to the list
instanceList.append(line)
# Load network
net = caffe.Classifier(deploy_path,
caffemodel_path,
mean=np.load(mean_file).mean(1).mean(1),
image_dims=(256, 256),
channel_swap=(2, 1, 0),
raw_scale=255)
# Loop through the ground truth file, predict each image's label and store the wrong ones
counter = 0
for instance in instanceList:
values = instance.split()
image_path = values[0]
sentiment = int(values[1])
# Load image
im = caffe.io.load_image(image_path)
# Make a forward pass and get the score
prediction = net.predict([im], oversample=oversampling)
# Check if the prediction was correct or not
if prediction[0].argmax() == sentiment:
correctLabels += 1
else:
incorrectLabels += 1
# Update label counter
if sentiment == 0:
negativeLabels += 1
else:
positiveLabels += 1
# Update prediction counter (negative = 0, positive = 1)
if prediction[0].argmax() == 0:
negativePredictions += 1
else:
positivePredictions += 1
counter += 1
if counter % 40 == 0:
print subset + ', ' + str(counter)
sys.stdout.flush()
gt_file.close()
accuracy = 100. * correctLabels / (correctLabels + incorrectLabels)
accuracies.append(accuracy)
# Print accuracy results
print '------------- ' + subset + ' -------------'
print 'Accuracy = ', str(accuracy)
print '---------------------------------'
output_string += 'Subset: {0}: \n Positive images: {1}\n Negative images: {2}\n Positive predictions: {3}\n Negative predictions: {4}\n'.format(
subset, str(positiveLabels), str(negativeLabels), str(positivePredictions), str(negativePredictions))
print '\nRESULTS:'
for i in range(0, 5):
print subsets[i] + ': ' + str(accuracies[i]) + '%'
print '\nMean accuracy = ' + str(1. * sum(accuracies) / len(accuracies))
print "\n-------------------------------------\n"
print output_string
| 31.590909 | 160 | 0.58964 | [
"MIT"
] | imatge-upc/sentiment-2015-asm | compute_cross_validation_accuracy.py | 3,475 | Python |
"""
Make sure that tiddler fields which are not strings
are stringified, otherwise, the text serialization will
assplode.
"""
from tiddlyweb.serializer import Serializer
from tiddlyweb.model.tiddler import Tiddler
def setup_module(module):
pass
def test_float_field():
tiddler = Tiddler('foo', 'bar')
tiddler.fields['float'] = 100.5
serializer = Serializer('text')
serializer.object = tiddler
assert '100.5' in '%s' % serializer
| 20.909091 | 56 | 0.719565 | [
"BSD-3-Clause"
] | funkyeah/tiddlyweb | test/test_tiddler_fields_as_strings.py | 460 | Python |
# a simple script to rename multiple files
import os
import re
path = 'myimages/'
files = os.listdir(path)
files.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
for i, file in enumerate(files):
os.rename(path + file, path + "rename_{}".format(i)+".jpg")
print('done!')
| 27.166667 | 100 | 0.653374 | [
"CC0-1.0"
] | sagorbrur/sagorbrur.github.io | simple_task_python/rename.py | 326 | Python |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# vcdomainprovisioningconfig filter
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
VCFilter = self.db.mock_model(model_name="VCFilter", db_table="vc_vcfilter")
self.db.add_column(
"vc_vcdomainprovisioningconfig",
"vc_filter",
models.ForeignKey(
VCFilter, verbose_name="VC Filter", null=True, blank=True, on_delete=models.CASCADE
),
)
| 33.076923 | 99 | 0.495349 | [
"BSD-3-Clause"
] | ewwwcha/noc | vc/migrations/0010_vcdomainprobvisioningconfig_vcfilter.py | 860 | Python |
# Overloading Methods
class Point():
def __init__(self, x=0, y=0):
self.x = x
self.y = y
self.coords = (self.x, self.y)
def move(self, x, y):
self.x += x
self.y += y
# Overload __dunder__
def __add__(self, p):
return Point(self.x + p.x, self.y + p.y)
def __sub__(self, p):
return Point(self.x - p.x, self.y - p.y)
def __mul__(self, p):
return Point(self.x * p.x, self.y * p.y)
def length(self):
import math
return math.sqrt(self.x**2 + self.y**2)
def __gt__(self, p):
return self.length() > p.length()
def __ge__(self, p):
return self.length() >= p.length()
def __lt__(self, p):
return self.length() < p.length()
def __le__(self, p):
return self.length() <= p.length()
def __eq__(self, p):
# this math does not alway work out correctly remeber float comparisions
#return self.length() == p.length
return self.x == p.x and self.y == p.y
# need to __str__ to represent the output of #overloaded
def __str__(self):
return "(" + str(self.x) + "," + str(self.y) + ")"
p1 = Point(3,4)
p2 = Point(3,2)
p3 = Point(1,3)
p4 = Point(0,1)
p5 = p1 + p2
p6 = p4 - p1
p7 = p2 * p3
print(p5, p6, p7)
print(p1 == p2)
print(p1 > p2)
print(p4 <= p3)
| 21.52381 | 80 | 0.539823 | [
"MIT"
] | JeffreyAsuncion/TWT_TheCompletePythonCourse | ObjectOrientedProgramming/OOPpart4.py | 1,356 | Python |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| 36.64 | 81 | 0.683816 | [
"Apache-2.0"
] | 13927729580/TensorFlowOnSpark | examples/cifar10/cifar10.py | 14,656 | Python |
# Funcoes servem para quando tivermos coisas repetitivas poder simplificar o programa
def lin(): # para definir um afuncao ela tem que ter parenteses no finalk
print('=-'*30)
lin()
print('Bem Vindo')
lin()
nome = str(input('Qual seu nome? '))
lin()
print(f'Tenha um otimo dia {nome}!')
lin()
def mensagem(msg):
print('-'*30)
print(msg) # A mensagem que vai aparecer aqui o usuario vai digitar quando chamar a funcao
print('-'*30)
mensagem('SISTEMA DE ALUNOS')
| 20.416667 | 98 | 0.677551 | [
"MIT"
] | GabrielBrotas/Python | modulo 3/aulas/4.0 - Funcoes.py | 490 | Python |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
from .generation import (
TRANSFORM_FACTORIES,
colorspace_factory,
group_transform_factory,
look_factory,
named_transform_factory,
produce_transform,
transform_factory,
transform_factory_clf_transform_to_group_transform,
transform_factory_default,
view_transform_factory,
)
from .generation import (
ConfigData,
VersionData,
deserialize_config_data,
generate_config,
serialize_config_data,
validate_config,
)
from .reference import (
build_aces_conversion_graph,
classify_aces_ctl_transforms,
conversion_path,
ctl_transform_to_node,
discover_aces_ctl_transforms,
filter_ctl_transforms,
filter_nodes,
node_to_ctl_transform,
plot_aces_conversion_graph,
print_aces_taxonomy,
unclassify_ctl_transforms,
)
from .reference import (
ColorspaceDescriptionStyle,
generate_config_aces,
)
from .cg import generate_config_cg
__all__ = [
"TRANSFORM_FACTORIES",
"colorspace_factory",
"group_transform_factory",
"look_factory",
"named_transform_factory",
"produce_transform",
"transform_factory",
"transform_factory_clf_transform_to_group_transform",
"transform_factory_default",
"view_transform_factory",
]
__all__ += [
"ConfigData",
"VersionData",
"deserialize_config_data",
"generate_config",
"serialize_config_data",
"validate_config",
]
__all__ += [
"build_aces_conversion_graph",
"classify_aces_ctl_transforms",
"conversion_path",
"ctl_transform_to_node",
"discover_aces_ctl_transforms",
"filter_ctl_transforms",
"filter_nodes",
"node_to_ctl_transform",
"plot_aces_conversion_graph",
"print_aces_taxonomy",
"unclassify_ctl_transforms",
]
__all__ += [
"ColorspaceDescriptionStyle",
"generate_config_aces",
]
__all__ += ["generate_config_cg"]
| 24.185185 | 57 | 0.743236 | [
"BSD-3-Clause"
] | michdolan/OpenColorIO-Config-ACES | opencolorio_config_aces/config/__init__.py | 1,959 | Python |
import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
from flask import Flask, request, current_app
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_babel import Babel, lazy_gettext as _l
from elasticsearch import Elasticsearch
from redis import Redis
import rq
from config import Config
db = SQLAlchemy()
migrate = Migrate()
login = LoginManager()
login.login_view = 'auth.login'
login.login_message = _l('Please log in to access this page.')
mail = Mail()
bootstrap = Bootstrap()
moment = Moment()
babel = Babel()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
db.init_app(app)
migrate.init_app(app, db)
login.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
babel.init_app(app)
app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
if app.config['ELASTICSEARCH_URL'] else None
app.redis = Redis.from_url(app.config['REDIS_URL'])
app.task_queue = rq.Queue('microblog-tasks', connection=app.redis)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
if not app.debug and not app.testing:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='Microblog Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if app.config['LOG_TO_STDOUT']:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
else:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/microblog.log',
maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
return app
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(current_app.config['LANGUAGES'])
from app import models | 33.686869 | 79 | 0.664168 | [
"MIT"
] | natalia-rios/flask-mega-tutorial | app/__init__.py | 3,335 | Python |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 15:49:49 2017
@author: tkoller
"""
import numpy as np
import numpy.linalg as nLa
from ..utils import unavailable
try:
import matplotlib.pyplot as plt
_has_matplotlib = True
except:
_has_matplotlib = False
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_3D(p, q, ax, n_points=100):
""" Plot an ellipsoid in 3D
Based on
https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
assert np.shape(p) == (3, 1), "p needs to be a 3x1 vector"
assert np.shape(q) == (3, 3), "q needs to be a spd 3x3 matrix"
assert np.allclose(q, 0.5 * (q + q.T), "q needs to be spd")
# transform to radius/center parametrization
U, s, rotation = linalg.svd(q)
assert np.all(s > 0), "q needs to be positive definite"
radii = 1.0 / np.sqrt(s)
# get x,y,z of sphere and transform
u = np.linspace(0.0, 2.0 * np.pi, n_points)
v = np.linspace(0.0, np.pi, n_points)
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
[x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]],
rotation) + center
# plot the result
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2)
return ax
@unavailable(not _has_matplotlib, "matplotlib")
def plot_ellipsoid_2D(p, q, ax, n_points=100, color="r"):
""" Plot an ellipsoid in 2D
TODO: Untested!
Parameters
----------
p: 3x1 array[float]
Center of the ellipsoid
q: 3x3 array[float]
Shape matrix of the ellipsoid
ax: matplotlib.Axes object
Ax on which to plot the ellipsoid
Returns
-------
ax: matplotlib.Axes object
The Ax containing the ellipsoid
"""
plt.sca(ax)
r = nLa.cholesky(q).T; # checks spd inside the function
t = np.linspace(0, 2 * np.pi, n_points);
z = [np.cos(t), np.sin(t)];
ellipse = np.dot(r, z) + p;
handle, = ax.plot(ellipse[0, :], ellipse[1, :], color)
return ax, handle
| 26.791667 | 82 | 0.591757 | [
"MIT"
] | Pathetiue/safe-exploration | safe_exploration/visualization/utils_visualization.py | 2,572 | Python |
import sys
import os
import argparse
import json
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import inputparser
import clustermaker
def write_ssms(variants, outfn):
_stringify = lambda A: ','.join([str(V) for V in A])
mu_r = 0.999
cols = ('id', 'gene', 'a', 'd', 'mu_r', 'mu_v')
with open(outfn, 'w') as outf:
print(*cols, sep='\t', file=outf)
for V in variants.values():
assert len(set(V['omega_v'])) == 1
variant = {
'id': 's%s' % int(V['id'][1:]),
'gene': V['name'],
'a': _stringify(V['ref_reads']),
'd': _stringify(V['total_reads']),
'mu_r': mu_r,
'mu_v': np.mean(1 - V['omega_v']),
}
print(*[variant[K] for K in cols], sep='\t', file=outf)
def write_params(sampnames, outfn):
with open(outfn, 'w') as outf:
json.dump({'samples': sampnames}, outf)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--use-supervars', dest='use_supervars', action='store_true')
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
parser.add_argument('pwgs_ssm_fn')
parser.add_argument('pwgs_params_fn')
args = parser.parse_args()
variants = inputparser.load_ssms(args.ssm_fn)
params = inputparser.load_params(args.params_fn)
if args.use_supervars:
variants = clustermaker.make_cluster_supervars(params['clusters'], variants)
write_ssms(variants, args.pwgs_ssm_fn)
write_params(params['samples'], args.pwgs_params_fn)
if __name__ == '__main__':
main()
| 28.824561 | 83 | 0.656726 | [
"MIT"
] | J-Moravec/pairtree | comparison/pwgs/convert_inputs.py | 1,643 | Python |
from pandas import DataFrame
import matplotlib.pyplot as plt
import seaborn as sns
import csv,sys
ExperimentName=sys.argv[1]
with open(ExperimentName+'.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)
if ExperimentName == "pod-delete":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Stress: 3600/1s','Memory/CPU footprint for the stress run','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','With Force','Without Force','Different base image(alpine/nginx/centos)']
Cols = ['Is the test added?']
plt.title("Pod Delete Experiment", fontsize =20)
elif ExperimentName == "container-kill":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Large Duration and Interval']
Cols = ['Is the test added?']
plt.title("Container Kill Experiment", fontsize =20)
elif ExperimentName == "disk-fill":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Application image(nginx/centos/alpine)']
Cols = ['Is the test added?']
plt.title("Disk Fill Experiment", fontsize =20)
elif ExperimentName == "pod-cpu-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)']
Cols = ['Is the test added?']
plt.title("Pod CPU Hog Experiment", fontsize =20)
elif ExperimentName == "pod-memory-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)']
Cols = ['Is the test added?']
plt.title("Pod Memory Hog Experiment", fontsize =20)
elif ExperimentName == "pod-network-corruption":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host','With Target Container']
Cols = ['Is the test added?']
plt.title("Pod Network Corruption Experiment", fontsize =20)
elif ExperimentName == "pod-network-duplication":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host','With Target Container']
Cols = ['Is the test added?']
plt.title("Pod Network Duplication Experiment", fontsize =20)
elif ExperimentName == "pod-network-latency":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host']
Cols = ['Is the test added?']
plt.title("Pod Network Latency Experiment", fontsize =20)
elif ExperimentName == "pod-network-loss":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host']
Cols = ['Is the test added?']
plt.title("Pod Network Loss Experiment", fontsize =20)
elif ExperimentName == "pod-autoscaler":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With less replicas(say 5)','with more replicas(say 20)']
Cols = ['Is the test added?']
plt.title("Pod Autoscaler Experiment", fontsize =20)
elif ExperimentName == "kubelet-service-kill":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Target Node Specified','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different lib image(ubuntu/centos)','Without appinfo']
Cols = ['Is the test added?']
plt.title("Kubelet Service Kill", fontsize =20)
elif ExperimentName == "node-cpu-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node CPU Hog", fontsize =20)
elif ExperimentName == "node-memory-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node Memory Hog", fontsize =20)
elif ExperimentName == "node-drain":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Target node specified','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node Drain Experiment", fontsize =20)
elif ExperimentName == "node-taint":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Target node specified','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node Taint Experiment", fontsize =20)
elif ExperimentName == "node-io-stress":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo','W/ filesystem utilisation bytes specified','w/ filesystem utilisation percentage specified']
Cols = ['Is the test added?']
plt.title("Node IO Stress", fontsize =20)
elif ExperimentName == "pod-io-stress":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodAffectedPercentage is 0','PodAffectedPercentage is 100','PodAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo','W/ filesystem utilisation bytes specified','w/ filesystem utilisation percentage specified','w/ Volume mouth path specified']
Cols = ['Is the test added?']
plt.title("Pod IO Stress", fontsize =20)
else:
print("Experiment %s not supported",ExperimentName)
df = DataFrame(data, index=Index, columns=Cols)
df = df[df.columns].astype(float)
print(df)
svm = sns.heatmap(df, cmap="Reds")
figure = svm.get_figure()
plt.subplots_adjust(left=0.218,bottom=0.095,right=0.9,top=0.88,wspace=0.2,hspace=0.2)
figure.set_figheight(10)
figure.set_figwidth(15)
plt.savefig(ExperimentName+'-heatmap.png', dpi=250)
| 136.659574 | 675 | 0.763973 | [
"Apache-2.0"
] | ved432/test | utils/heatmap-coverage.py | 12,846 | Python |
# -*- coding: utf-8 -*-
"""
Created on 2013-2014
Author : Edouard Cuvelier
Affiliation : Université catholique de Louvain - ICTEAM - UCL Crypto Group
Address : Place du Levant 3, 1348 Louvain-la-Neuve, BELGIUM
email : firstname.lastname@uclouvain.be
"""
from numpy import *
import gmpy
from Crypto.Random.random import randint
import random as rd
import tools.fingexp as fingexp
import tools.utils as utils
class Field(fingexp.FingExp):
'Class for Field'
def __init__(self,p):
'''Defines the modulus p which must be a prime
'''
self.F = self
self.p = gmpy.mpz(p) # prime modulus
self.char = self.p # characteristic
self.q = self.p+1 # order+1 #TODO : correct?
assert gmpy.is_prime(p)
self.rep = None
self.g = None
'''
g is a random quadratic residue used to compute square roots and it is
initialized the first time a square root is computed
'''
self.to_fingerprint = ["p"]
self.to_export = {"fingerprint": [],"value": ["p"]}
super(Field, self).__init__()
def load(self, data, fingerprints):
self.p = utils.b64tompz(data["p"])
def one(self):
'unit element for multiplication'
return FieldElem(1, self)
def zero(self):
'unit element for addition'
return FieldElem(0,self)
def elem(self,x):
''' return an element of value x
'''
if isinstance(x,FieldElem):
assert x.F == self
return x
m = gmpy.mpz(1)
assert isinstance(x,int) or isinstance(x, long) or type(x)==type(m)
return FieldElem(x,self)
def random(self,low=1,high=None):
''' Return a random element of the Field
'''
if high == None :
high = int(self.p-1)
rand = randint(low,high)
return self.elem(rand)
def __eq__(self, other):
'testing if we are working in the same field'
try:
return (self.p == other.p)
except:
return False
def add(self, a, b):
'''
field operation: addition mod p
'''
return FieldElem((a.val + b.val) % self.p, self)
def sub(self, a, b):
'''
field operation: substraction mod p
'''
return FieldElem((a.val - b.val) % self.p, self)
def neg(self, a):
'''
field operation: opposite mod p
'''
return FieldElem((self.p - a.val ) % self.p, self)
def mul(self, a, b):
'''
field operation: multiplication of field elements
'''
"""
if isinstance(a,FieldElem) and isinstance(b, FieldElem) and not a.F == b.F :
raise Exception("multiplication between elements of different fields")
"""
if not isinstance(b,FieldElem) :
# Multiplication by a scalar
if b<0:
return self.smul(-a,-b)
return self.smul(a,b)
else:
return self.pmul(a,b)
def smul(self,a,b):
''' Return a*b where a or b is scalar
'''
if not isinstance(b,FieldElem):
# b is scalar
#return self.dbleAndAdd(a,a,b)
return FieldElem((gmpy.mpz(b)*a.val)%(self.p),self)
#return self.pmul(a,a.F.elem(b))
else :
# a is scalar
#return self.dbleAndAdd(b,b,a)
return self.smul(b,a)
def sm(self,b,a):
''' Quick multiplication between a field element a and a scalar b
'''
return FieldElem((gmpy.mpz(b)*a.val)%(self.p),self)
def pmul(self,a,b):
''' product between two field element in Fp
'''
return FieldElem((a.val * b.val) % self.p, self)
def dbleAndAdd(self,P,Pp,n):
'return n*P using double and add technique'
#print "dblaad"
if n == 0 :
return self.zero();
if n == 1 :
return P
elif n%2 == 1 :
Q = self.dbleAndAdd(P,Pp,(n-1)/2)
return P+Q+Q
elif n%2 == 0 :
Q = self.dbleAndAdd(P,Pp,n/2)
return Q+Q
def powop(self, a, b):
'return a**b'
m = gmpy.mpz(1)
#self.count = 0
'exponentiation by a scalar'
if not isinstance(b, int) and not isinstance(b, long) and not type(b)==type(m):
raise Exception("Exponentation by a non integer, long or mpz")
c = b
if c > self.char-1 or c<0:
c = b%(self.char-1)
#elif :
# return self.powop(a.invert(),(-c))
if c == 0 :
assert not a.val%self.char == 0
return self.one()
elif c == 1 :
return a
else :
return self.sqrtAndMultply(a,a, c)
#return FieldElem(pow(a.val,b,self.char))
def sqrtAndMultply(self,P,Pp,n):
'return P**n using square and multiply technique'
if n == 0 :
return self.one()
elif n == 1 :
return P
elif n%2 == 1 :
Q = self.sqrtAndMultply(P,Pp,(n-1)/2)
return P*self.square(Q)
elif n%2 == 0 :
Q = self.sqrtAndMultply(P,Pp,n/2)
return self.square(Q)
def square(self,a):
'''
This method returns the square of a
'''
return FieldElem(pow(a.val,2, self.p), self)
def invert(self,a):
assert not (a.val%self.p == 0) # Do not invert zero!
return FieldElem(gmpy.invert(a.val, self.p), self)
#def invertible(self,a):
#return not int(a.invert().val) == 0
def div(self,a,b):
assert not (b.val%self.p == 0) # Do not invert zero!
return FieldElem((a.val*self.invert(b).val % self.p),self)
def findnonresidue(self):
'''
find a random non quadratic residue in the Field F,
that is, find g that is not a square in F, this is
needed to compute square roots
'''
g=self.random()
while g.isquadres():
#print g, " is quad res in ", self
g = self.random()
return g
def __str__(self):
return "F_"+str(self.p)
def jsonable(self):
return {'type': 'FqField', 'p': self.p}
class FieldElem():
def __init__(self, val, F):
'''Creating a new field element.
'''
#assert isinstance(F,Field)
self.F = F
self.val = gmpy.mpz(val)
self.poly = polynom(self.F,[self])
#self.to_fingerprint = ["F", "val"]
#self.to_export = {"fingerprint": ["F"],
# "value": ["val"]}
#super(FieldElem, self).__init__()
def __eq__(self, other):
try:
return ((self.val%self.F.char) == (other.val%self.F.char) and self.F == other.F)
except:
return False
def __add__(self, other):
return self.F.add(self, other)
def __neg__(self):
return self.F.neg(self)
def __sub__(self, other):
return self.F.sub(self, other)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
return self.F.mul(self, other)
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, e):
return self.F.powop(self, e)
def __div__(self,other):
return self.F.div(self,other)
def __truediv__(self,other):
return self.F.div(self,other)
def __str__(self):
return str(self.val)
def iszero(self):
return self == self.F.zero()
def invert(self):
return self.F.invert(self)
def invertible(self):
return self.F.invertible(self)
def isquadres(self):
''' This method return True if the element is a quadratic residue mod q
different than zero
it returns False otherwhise
'''
if (self+self.F.zero()).iszero() :
# case of element is zero
return False
else :
# If F's order is prime we use Euler's criterium
c = self**((self.F.q-1)/2) #TODO: Optimize this
return c==self.F.one()
def squareroot(self):
''' This method returns the positive square root of
an element of the field
using the Tonelli-Shanks algorithm
Carefull : if the element has no square root, the method does not
check this case and raises an error. Verification has to be done
before calling the method.
'''
g = self.F.g
if g == None :
g = self.F.findnonresidue()
self.F.g = g
q = self.F.q
s=0
t=self.F.q-1
while t%2==0:
s=s+1
t=t/2
# q-1 = (2**s)*t
e = 0
for i in range(2,s+1):
b = 2**(i-1)
b1 = b*2 # b1 = 2**i
c = ((self)*(g**(-e)))**((q-1)/b1)
if not c==self.F.one() :
e = e+b
h = self*(g**(-e))
b = (g**(e/2))*(h**((t+1)/2))
assert b**2 == self # FAILURE to find square root
return b
def fingerprint(self):
return fingexp.fingerprint(self.val)
def jsonable(self):
return {'type': 'FieldElem', 'F': self.F, 'val': self.val}
class ExtensionField(Field):
'''
This class defines extension fields and inherits field methods.
Depending on the degree of the extension field, we use
different algorithms to optimize the operations
'''
def __init__(self,F,irpoly,g=None,rep=None):
'''Define the base Field or extension Field and the irreducible polynomial
F is the base field on top of which the extension
field is built
irpoly is the irreducible polynomial used to build
the extension field as F/irpoly
g is a non quadratic residue used to compute square
roots, if it is set to None, computing a square root
will initialize g
rep is the representation of the root of irpoly
(note that letter 'A' is reserved for the Complex extension field)
'''
self.F = F
self.irpoly = irpoly
self.deg = len(irpoly.coef) # degree of the irreducible polynomial + 1
assert self.deg > 0
self.q = self.F.q**(self.deg-1) # order of the Field
self.tabular = self.table()
if rep == None :
self.rep = rd.choice(['B','C','D','E','F','G','H','J','K','L'])
#Choose a random representation letter
else :
self.rep = rep
self.char = F.char
self.primefield = gmpy.is_prime(self.char)
self.g = g # g is needed to compute square roots, it is a non quadratic residue
self.to_fingerprint = ["F","irpoly"]
self.to_export = {"fingerprint": [],"value": ["F","irpoly"]}
def one(self):
'unit element for multiplication'
One = [self.F.zero()]*(self.deg-1)
One[self.deg-2]= self.F.one()
return ExtensionFieldElem(self,polynom(self.F,One))
def zero(self):
'unit element for addition'
Zero = [self.F.zero()]*(self.deg-1)
return ExtensionFieldElem(self,polynom(self.F,Zero))
def unit(self):
''' root of the irreducible polynomial
e.g. return element 1*A+0 (or the complex value i) if the irpoly is X**2+1
'''
I = self.zero()
I.poly.coef[-2]=self.F.one()
return I
def elem(self,x):
''' Provided that x belongs to F, return an element of the extension field
of value x
'''
P = self.zero()
P.poly.coef[-1] = x
return P
def random(self):
''' Return a random element of the Extension Field
'''
polycoef = [0]*(self.deg-1)
for i in range(self.deg-1):
polycoef[i] = self.F.random()
poly = polynom(self.F,polycoef)
return ExtensionFieldElem(self,poly)
def __eq__(self, other):
'testing if we are working in the same extension field'
try:
return (self.F == other.F and self.irpoly == other.irpoly)
except:
return False
def add(self, a, b):
'''
field operation: addition of polynomial > addition of coefficients in the appropriate field
'''
#assert a.F == b.F and a.F.F == self.F
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
polysum = [0]*a.deg
for i in range(a.deg):
polysum[i]=a.poly.coef[i]+b.poly.coef[i]
P = polynom(self.F,polysum)
return ExtensionFieldElem(self,P)
def sub(self, a, b):
'''
field operation: substraction of polynomials > substraction of each coefficient in the appropriate field
'''
#assert a.F == b.F and a.F.F == self.F
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
c = self.neg(b)
return self.add(a,c)
def neg(self, a):
'''
field operation: opposite of a polynomial > opposite of each coefficient in appropriate field
'''
#assert a.F.F == self.F
ap = [0]*a.deg
for i in range(a.deg):
ap[i] = -a.poly.coef[i]
P = polynom(self.F,ap)
return ExtensionFieldElem(self,P)
def smul(self,a,b):
''' Return a*b where a or b is scalar
'''
if not isinstance(b,FieldElem):
# b is scalar
A = a.poly.coef
Pc = [0]*len(A)
for i in range(len(Pc)):
Pc[i] = A[i]*gmpy.mpz(b)
return ExtensionFieldElem(self,polynom(self.F,Pc))
else :
# a is scalar
return self.smul(b,a)
def pmul(self,a,b):
'''Multiplication between polynomials
'''
#assert a.F == b.F and a.F.F == self.F
if not a.deg == b.deg :
a = self.reduc(a)
b = self.reduc(b)
# Simpler notations for reading
A = a.poly.coef
B = b.poly.coef
k = self.deg-1 # degree of the externsion field
if k == 2 and self.F.rep =='A':
# We are in the case that the extension field is Fp2
# We assume here that the irreductible polynom is X**2+1 (beta=-1)
# Complex multiplication
a0,a1,b0,b1 = A[0].val,A[1].val,B[0].val,B[1].val
p = self.char
v0 = a0*b0
v1 = a1*b1
c0 = ((a0+a1)*(b0+b1)-v0-v1)%p
c1 = (v1-v0)%p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
cp = polynom(self.F,[c0e,c1e])
C = ExtensionFieldElem(self,cp)
return C
elif k == 2:
# In this case, use Karatsuba multiplication algorithm
# notations
a0 = A[0]
a1 = A[1]
b0 = B[0]
b1 = B[1]
beta = -self.irpoly.coef[-1]
v0 = self.F.pmul(a0,b0)
v1 = self.F.pmul(a1,b1)
c0 = self.F.pmul((a0+a1),(b0+b1))-v0-v1 # coefficient of X
c1 = v1 + self.F.pmul(v0,beta) # independant term
cp = polynom(self.F,[c0,c1])
C = ExtensionFieldElem(self,cp)
return C
elif k == 3:
# In this case, use Karatsuba multiplication algorithm
# notations
a0,a1,a2 = A
b0,b1,b2 = B
beta = -self.irpoly.coef[-1]
v0,v1,v2 = self.F.pmul(a0,b0), self.F.pmul(a1,b1), self.F.pmul(a2,b2)
c0 = self.F.pmul((a0+a2),(b0+b2))-v0+v1-v2 # coefficient of X**2
c1 = self.F.pmul((a2+a1),(b2+b1))-v2-v1+self.F.pmul(beta,v0) # coefficient of X
c2 = v2+self.F.pmul(beta,(self.F.pmul((a1+a0),(b1+b0))-v1-v0)) # independant term
cp = polynom(self.F,[c0,c1,c2])
C = ExtensionFieldElem(self,cp)
return C
else :
prod = convolve(A,B)
return self.reduc2(prod) # return EProd % ired. polynomial
def square(self,a):
''' This algortihm returns the square of a in the field
using different methods if the degree of the extension
is 2,3 or more
'''
#print a.F
#print self
assert a.F == self
if not a.deg == self.deg-1 :
a = self.reduc(a)
#notations
A = a.poly.coef
k = self.deg-1 # degree of the extension
if k == 2 and self.F.rep == 'A':
# Using the complex multiplication
# We are in the case that the extension field is Fp2
# We assume here that the irreductible polynom is X**2+1 (beta=-1)
a1, a0 = A[0].val,A[1].val
p = self.char
v0 = a0*a1
c0 = ((a0+a1)*(a0-a1))%p
c1 = (v0+v0)%p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
cp = polynom(self.F,[c1e,c0e])
C = ExtensionFieldElem(self,cp)
return C
elif k == 2:
# Using the complex multiplication
a1, a0 = A
beta = -self.irpoly.coef[-1]
v0 = self.F.pmul(a0,a1)
c0 = self.F.pmul((a0+a1),(a0+self.F.pmul(a1,beta)))-v0-self.F.pmul(beta,v0)
c1 = v0+v0
cp = polynom(self.F,[c1,c0])
return ExtensionFieldElem(self,cp)
elif k == 3:
# Using Chung-Hasan Squaring2
a2,a1,a0 = A
#print a0
#print 'a0',a0.F, a0.F.deg-1
#print 'self',self.F, self.F.deg-1
assert a0.F == self.F
beta = -self.irpoly.coef[-1]
s0 = self.F.square(a0)
t1 = self.F.pmul(a0,a1)
s1 = t1+t1
s2 = self.F.square((a0-a1+a2))
t3 = a1*a2
s3 = t3+t3
s4 = self.F.square(a2)
c0 = s0 + self.F.pmul(beta,s3)
c1 = s1 + self.F.pmul(beta,s4)
c2 = s1 + s2 + s3 - s0 -s4
cp = polynom(self.F,[c2,c1,c0])
return ExtensionFieldElem(self,cp)
else :
return self.F.pmul(a,a)
def invert(self,a):
''' Ths method returns the inverse of a in the field
The inverse is computed by determining the Bezout coefficient using the
extended Euclide's algorithm or by specialized algorithms depending
on the degree of the extension (2 or 3)
'''
#assert self.invertible(a) #The element must be invertible
assert a.F == self
k = self.deg-1
if k == 2 and self.F.rep == 'A':
# inversion in a field of characteristic 2 over prime field
# We are in the case that the extension field is Fp2
# We assume here that the irreductible polynom is X**2+1 (mod=-1)
A = a.poly.coef
a1,a0 = A[0].val,A[1].val # a = a0+a1*i
p = self.char
norm = a0*a0+a1*a1
invnorm = gmpy.invert(norm,p)
c0 = (a0*invnorm) % p
c1 = (-a1*invnorm) % p
c0e = FieldElem(c0,self.F)
c1e = FieldElem(c1,self.F)
invap = polynom(self.F,[c1e,c0e])
inva = ExtensionFieldElem(self,invap)
return inva
elif k == 2 :
# inversion in a field of characteristic 2 over prime field
A = a.poly.coef
a1,a0 = A[0],A[1] # a = a0+a1*i
#print 'A',A
#print 'a1',a1
mod = self.irpoly.coef[-1] # i**2 = -mod
#a1b,a0b,modb = self.F.elem(a1), self.F.elem(a0),self.F.elem(mod)
#print 'a1b',a1b
#a1b2 = self.F.square(a1b)
a12 = self.F.square(a1)
#mid = self.F.pmul(a1b2,modb)
mid = self.F.pmul(a12,mod)
#norm = self.F.square(a0b)+mid
norm = self.F.square(a0)+mid
#invnorm = self.F.invert(a0**2+mod*a1**2)
#invnorm = self.F.invert(norm.poly.coef[-1])
invnorm = self.F.invert(norm)
c = self.F.pmul(a0,invnorm) # c = -a1/(a0**2+mod*a1**2)
d = -self.F.pmul(a1,invnorm)
invap = polynom(self.F,[d,c])
inva = ExtensionFieldElem(self,invap)
return inva
elif k == 3 :
# inversion in char. 3 field
A = a.poly.coef
a2,a1,a0 = A[0],A[1],A[2]
mod = -self.irpoly.coef[-1]
z0 = self.F.zero()
z1 = self.F.one()
if a0 == z0:
#a0 = 0
if a1 == z0:
#a1 = 0
c0,c1,c2 = z0, self.F.invert(self.F.pmul(a2,mod)), z0
elif a2 == z0:
#a2 = 0
c0,c1,c2 = z0,z0,self.F.invert(self.F.pmul(a1,mod))
else :
#a1,a2 != 0
a22 = self.F.square(a2)
a12 = self.F.square(a1)
c2 = self.F.pmul(a12,self.F.invert((self.F.pmul(self.F.pmul(a22,a2),mod)+self.F.pmul(self.F.pmul(a12,a1),mod))))
c1 = self.F.pmul((z1-self.F.pmul(self.F.pmul(a1,c2),mod)),self.F.invert(self.F.pmul(a2,mod)))
c0 = self.F.pmul((-(self.F.pmul(self.F.pmul(a2,mod),c2))),self.F.invert(a1))
else :
#a0 != 0
if a1 == z0 and a2 == z0:
#a1 = 0 , a2 = 0
c0,c1,c2 = self.F.invert(a0),z0,z0
else :
a12 = self.F.pmul(a1,a2)
a12m = self.F.pmul(a12,mod)
a00 = self.F.square(a0)
abis = a00-a12m
if abis == z0:
#a0**2-(a1*a2*mod) = 0
a11 = self.F.square(a1)
a22 = self.F.square(a2)
a02 = self.F.pmul(a0,a2)
a01 = self.F.pmul(a0,a1)
c2 = self.F.pmul(-a,self.F.invert(self.F.pmul((a02-a11),mod)))
c1 = self.F.pmul(-a2,self.F.invert(a01-self.F.pmul(a22,mod)))
a1c2 = self.F.pmul(a1,c2)
a2c1 = self.F.pmul(a2,c1)
c0 = self.F.pmul((z1-self.F.pmul(a1c2+a2c1,mod)),self.F.invert(a0))
else :
#a0**2-(a1*a2*mod) != 0
if a1 == z0:
#a1 = 0
inva0 = self.F.invert(a0)
a02 = self.F.pmul(a0,a2)
a000 = self.F.pmul(a00,a0)
a22 = self.F.square(a2)
a222 = self.F.pmul(a22,a2)
mm = self.F.square(mod)
a222mm = self.F.pmul(a222,mm)
c2 = self.F.pmul(-a02,self.F.invert(a000+a222mm))
a02m = self.F.pmul(a02,mod)
a02mc2 = self.F.pmul(a02m,c2)
inva00 = self.F.square(inva0)
c1 = self.F.pmul(-a02mc2,inva00)
a2m = self.F.pmul(a2,mod)
a2mc1 = self.F.pmul(a2m,c1)
c0 = self.F.pmul(z1-a2mc1,inva0)
elif a2 == z0:
#a2 = 0
a11 = self.F.square(a1)
a111 = self.F.pmul(a11,a1)
a000 = self.F.pmul(a00,a0)
a111m = self.F.pmul(a111,mod)
inva0 = self.F.invert(a0)
c2 = self.F.pmul(a11,self.F.invert(a111m+a000))
a11m = self.F.pmul(a11,mod)
a11mc2 = self.F.pmul(a11m,c2)
inva00 = self.F.square(inva0)
c1 = self.F.pmul(a11mc2-a1,inva00)
a1m = self.F.pmul(a1,mod)
a1mc2 = self.F.pmul(a1m,c2)
c0 = self.F.pmul(z1-a1mc2,inva0)
else :
#a1,a2 != 0
a01 = self.F.pmul(a0,a1)
a22 = self.F.square(a2)
a22m = self.F.pmul(a22,mod)
a02 = self.F.pmul(a0,a2)
a11 = self.F.square(a1)
abus = a01-a22m
abos = self.F.pmul(a02-a11,mod)
invabis = self.F.invert(abis)
abb = self.F.pmul(abus,invabis)
abb1 = self.F.pmul(abb,a1)
abbbos = self.F.pmul(abb,abos)
c2 = self.F.pmul(abb1-a2,self.F.invert(abis-abbbos))
abosc2 = self.F.pmul(abos,c2)
c1 = self.F.pmul(-a1-abosc2,invabis)
a1c2 = self.F.pmul(a1,c2)
a2c1 = self.F.pmul(a2,c1)
c0 = self.F.pmul(z1-self.F.pmul(a1c2+a2c1,mod),self.F.invert(a0))
invap = polynom(self.F,[c2,c1,c0])
inva = ExtensionFieldElem(self,invap)
return inva
else :
# inversion in a field of char. != 2,3
# this inversion takes a longer time (than previous method)
# it uses extended Euclid's algorithm
P = ExtensionFieldElem(self,self.irpoly)
r,u,v = self.extendedeuclide(P,a)
n,d = r.poly.truedeg()
assert n == self.deg-2
c = r.poly.coef[len(r.poly.coef)-1].invert()
cp = polynom(self.F,[c])
ce = ExtensionFieldElem(self,cp)
return ce*v
def invertible(self,a):
''' Return True if a is invertible
'''
return not self.reduc(a)==self.zero()
def div(self,a,b):
return a*self.invert(b)
def eucldiv(self,a,b):
''' Return a/b and a%b
a and b are of length d-1 where d is the degree of the irreducible polynomial
'''
zero = self.F.zero()
izero = self.zero()
d = self.deg
assert not b.poly.iszero() # Do not divide by zero
if a.poly.iszero() :
return izero, izero # quotient is zero, remain is zero
elif a == b:
return self.one(), izero # quotient is one, remain is zero
#Notations
A = a.poly.coef
B = b.poly.coef
n, da = a.poly.truedeg() # position of first non zero elem of a and degree of a
m, db = b.poly.truedeg() # same for b
if da<db :
# deg(a)<deg(b)
return izero, a # quotient is zero, remain is a
elif da==db:
#deg(a)=deg(b)
deg = max(d-1,da)
rc = [zero]*(deg)
qc = [zero]*(deg)
q = A[n]/B[m]
for i in range(1,deg):
rc[i] = A[n+i]-q*B[m+i]
qc[deg-1] = q
rp = polynom(self.F,rc)
qp = polynom(self.F,qc)
remain = ExtensionFieldElem(self,rp)
quotient = ExtensionFieldElem(self,qp)
return quotient, remain
else :
# deg(a)>deg(b)
deg = max(d-1,da)
p = deg - da
rc = [zero]*(deg)
qc = [zero]*(deg)
rc[deg-da:] = A[n:]
pm=0
while p+pm+db<deg+1:
#k is the position of the index of the quotient
k = deg-(da-db)-1+pm
qc[k] = rc[p+pm]/B[m]
for i in range(db):
rc[i+p+pm] = rc[i+p+pm]- qc[k]*B[m+i]
pm=pm+1
rp = polynom(self.F,rc)
qp = polynom(self.F,qc)
remain = ExtensionFieldElem(self,rp)
quotient = ExtensionFieldElem(self,qp)
return quotient, remain
def reduc(self,a):
''' Return a % self.irpoly
The polynomial a = [a_0,...,a_n-1] is returned modulo the irreducible polynomial
The reduced polynomial has length at most d-1 where d is the length
of the irreducible polynomial
'''
assert a.F.F == self.F
if a.poly.iszero() :
return self.zero()
elif a.poly == self.irpoly :
return self.zero()
elif a.deg < self.deg :
c = [self.F.zero()]*(self.deg-1-a.deg)
newacoef = c+a.poly.coef
newapoly= polynom(self.F, newacoef)
newaelem = ExtensionFieldElem(self, newapoly)
return newaelem
else :
# Case where a is not zero or the irreducible polynomial and deg(a)>=deg(irpoly)
q,r = self.eucldiv(a,ExtensionFieldElem(self,self.irpoly))
r = self.trunc(r)
return self.reduc(r)
def reduc2(self,a):
''' a is a list of length (d-1)*2-1 (polynomial length)
this method returns the equivalent element of length d-1
using the table of equivalences (build from the irreducible polynomial)
in the function self.table()
'''
As = a[:(self.deg-2)]
Ad = a[(self.deg-2):]
b = list(dot(As,self.tabular)+Ad)
newapoly = polynom(self.F,b)
newa = ExtensionFieldElem(self,newapoly)
return newa
def trunc(self,a):
'''Return an ExtensionFieldElem of length d-1 where d = deg(irpoly)
'''
d = self.deg
if a.deg == d-1:
return a
c = a.poly.coef[a.deg-d+1:] # the (d-1) last elements of a
cp = polynom(self.F,c)
return ExtensionFieldElem(self,cp)
def table(self):
''' This method returns a table (usually) stored in self.tabular
which is used to compute reduction after a multiplication
between two elements
'''
d = self.deg
T = zeros((d-2,d-1),dtype=object_)
Pc = self.irpoly.coef[1:]
for i in range(0,d-2):
Qc = [self.F.zero()]*(2*(d-1)-1)
Qc[i+1:i+d] = Pc
Qp = polynom(self.F,Qc)
Qe = ExtensionFieldElem(self,Qp)
Q = self.reduc(-Qe)
T[i] = array(Q.poly.coef)
return T
def extendedeuclide(self,a,b):
'''Return s,u,v such as s = ua + vb, s is the gcd of a and b
This method is used to compute the inverse of a mod b (when s=1)
'''
#init
one = self.one()
zero = self.zero()
s = a
u = one
v = zero
sp = b
up = zero
vp = one
#loop : invariants are s = ua+vb and sp = up*a+vp*b
while not sp.poly.iszero() :
q,r = self.eucldiv(s,sp)
s,u,v,sp,up,vp = sp, up, vp, r, u-up*q,v-vp*q
return self.reduc(s),self.reduc(u),self.reduc(v)
def __str__(self):
return str(self.F)+"/"+str(self.irpoly)
def jsonable(self):
return {'type': 'Field Extension', 'F': self.F, 'irpoly': self.irpoly, 'degree':self.deg-1}
class ExtensionFieldElem(FieldElem):
def __init__(self,F,poly):
'''Define the Extension Field and the representative polynomial
'''
self.F = F
self.poly = poly
self.siz = len(poly.coef)
self.deg = self.siz
def __str__(self):
x = self.F.rep
p = self.poly
s = '('
if self.siz == 1 :
s = s+str(p.coef[0])
if self.siz == 2 :
s = s+str(p.coef[0])+'*'+x+' + '+str(p.coef[1])
if self.siz > 2 :
s =s+str(p.coef[0])+'*'+x+'**'+str(self.siz-1)
for i in range(1,self.siz-2):
s = s+' + '+str(p.coef[i])+'*'+x+'**'+str(self.siz-1-i)
s = s+' + '+str(p.coef[self.siz-2])+'*'+x +' + '+str(p.coef[self.siz-1])
return s+')'
def __eq__(self,other):
try:
return self.F == other.F and self.poly == other.poly
except:
return False
def fingerprint(self):
return self.poly.fingerprint()
def jsonable(self):
return {'type': 'ExtensionFieldElem', 'F': self.F, 'poly': self.poly, 'size': self.siz}
class polynom:
''' This class represents a polynomial written P = c_nX**n+...c_1X+c_0
c_0,...,c_n are in the Field F (which can be an ExtensionField) so they are either FieldElem or ExtensionFieldElem
coef is a list : coef = [c_n,...,c_0] of length n+1
'''
def __init__(self,F,coef):
self.F = F # The field in which coeficients belong
if isinstance(coef,list):
self.coef = coef # A list of coeficient in decreasing order (by convention) of the polynomial's degree
self.deg = len(coef) # The degree+1 of the polynomial
else :
#coef is not a list but a single element
self.coef = [coef]
self.deg = 1
def __eq__(self,other):
try:
return (self.F == other.F and self.coef == other.coef)
except:
return False
def __str__(self):
# Not consistent with representation letter of the fields
x = self.F.rep
if x == None:
x = 'X'
s = '('
if self.deg == 1 :
s = s+str(self.coef[0])
if self.deg == 2 :
s = s+str(self.coef[0])+'*'+x+' + '+str(self.coef[1])
if self.deg > 2 :
s =s+str(self.coef[0])+'*'+x+'**'+str(self.deg-1)
for i in range(1,self.deg-2):
s = s+' + '+str(self.coef[i])+'*'+x+'**'+str(self.deg-1-i)
s = s+' + '+str(self.coef[self.deg-2])+'*'+x +' + '+str(self.coef[self.deg-1])
return s+')'
def fingerprint(self):
L = []
for c in self.coef:
L.append(c.fingerprint())
return fingexp.fingerprint(L)
def iszero(self):
'''Return True if it is a zero polynomial (each coefficient is zero)
This does not return True if the polynomial is the polynomial that generates the extension field
'''
cond = True
for i in self.coef:
pcond = i.iszero()
cond = pcond*cond
return cond
def truedeg(self):
'''Return the position of the first non zero coefficient and the actual degree of the polynomial
'''
if self.iszero():
return 0,0
n = 0
while self.coef[n]==self.F.zero():
n = n+1
# n is the position of the first non zero coeff of the polynomial
return n, self.deg-n # position and actual degree of the polynomial
def jsonable(self):
return {'type': 'polynomial', 'F': self.F, 'coeficients': self.coef, 'degree': self.deg}
| 33.584054 | 132 | 0.494208 | [
"Apache-2.0"
] | ecuvelier/PPAT | mathTools/field.py | 34,962 | Python |
# -*- coding: utf-8 -*-
# Copyright (C) 2019 - 2020 by Pedro Mendes, Rector and Visitors of the
# University of Virginia, University of Heidelberg, and University
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2017 - 2018 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2006 - 2007 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CFunctionParameter(unittest.TestCase):
def setUp(self):
self.functions=COPASI.CRootContainer.getFunctionList()
self.function=self.functions.findFunction("Iso Uni Uni")
self.assert_(self.function!=None)
self.assert_(self.function.__class__==COPASI.CFunction)
self.parameters=self.function.getVariables()
self.assert_(self.parameters!=None)
self.assert_(self.parameters.__class__==COPASI.CFunctionParameters)
index=self.parameters.findParameterByName("Keq",COPASI.CFunctionParameter.DataType_FLOAT64)
self.parameter=self.parameters.getParameter(index)
self.assert_(self.parameter!=None)
self.assert_(self.parameter.__class__==COPASI.CFunctionParameter)
def test_getKey(self):
key=self.parameter.getKey()
self.assert_(type(key)==str)
def test_getType(self):
b=self.parameter.getType()
self.assert_(type(b)==int)
self.assert_(b==COPASI.CFunctionParameter.DataType_FLOAT64)
def test_setType(self):
t=COPASI.CFunctionParameter.DataType_INT32
self.parameter.setType(t)
self.assert_(self.parameter.getType()==t)
def test_getUsage(self):
b=self.parameter.getUsage()
self.assert_(type(b)==int)
self.assert_(b==COPASI.CFunctionParameter.Role_PARAMETER)
def test_setUsage(self):
t=COPASI.CFunctionParameter.Role_VOLUME
self.parameter.setUsage(t)
self.assert_(self.parameter.getUsage()==t)
def suite():
tests=[
"test_getKey"
,"test_getType"
,"test_setType"
,"test_getUsage"
,"test_setUsage"
]
return unittest.TestSuite(map(Test_CFunctionParameter,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
| 32.162791 | 95 | 0.721981 | [
"Artistic-2.0"
] | MedAnisse/COPASI | copasi/bindings/python/unittests/Test_CFunctionParameter.py | 2,766 | Python |
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_apimanagementapiexport_info
version_added: '2.9'
short_description: Get ApiExport info.
description:
- Get info of ApiExport.
options:
resource_group:
description:
- The name of the resource group.
required: true
type: str
service_name:
description:
- The name of the API Management service.
required: true
type: str
api_id:
description:
- >-
API revision identifier. Must be unique in the current API Management
service instance. Non-current revision has ;rev=n as a suffix where n is
the revision number.
required: true
type: str
format:
description:
- >-
Format in which to export the Api Details to the Storage Blob with Sas
Key valid for 5 minutes.
required: true
type: str
export:
description:
- Query parameter required to export the API details.
required: true
type: str
id:
description:
- ResourceId of the API which was exported.
type: str
value:
description:
- The object defining the schema of the exported Api Detail
type: dict
suboptions:
link:
description:
- >-
Link to the Storage Blob containing the result of the export
operation. The Blob Uri is only valid for 5 minutes.
type: str
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: ApiManagementGetApiExportInOpenApi2dot0
azure_rm_apimanagementapiexport_info:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
format: swagger-link
export: 'true'
- name: ApiManagementGetApiExportInOpenApi3dot0
azure_rm_apimanagementapiexport_info:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
format: openapi-link
export: 'true'
'''
RETURN = '''
api_export:
description: >-
A list of dict results where the key is the name of the ApiExport and the
values are the facts for that ApiExport.
returned: always
type: complex
contains:
apiexport_name:
description: The key is the name of the server that the values relate to.
type: complex
contains:
id:
description:
- ResourceId of the API which was exported.
returned: always
type: str
sample: null
format:
description:
- >-
Format in which the Api Details are exported to the Storage Blob
with Sas Key valid for 5 minutes.
returned: always
type: str
sample: null
value:
description:
- The object defining the schema of the exported Api Detail
returned: always
type: dict
sample: null
contains:
link:
description:
- >-
Link to the Storage Blob containing the result of the export
operation. The Blob Uri is only valid for 5 minutes.
returned: always
type: str
sample: null
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
from msrestazure.azure_exceptions import CloudError
class AzureRMApiExportInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=true
),
service_name=dict(
type='str',
required=true
),
api_id=dict(
type='str',
required=true
),
format=dict(
type='str',
required=true
),
export=dict(
type='str',
required=true
)
)
self.resource_group = None
self.service_name = None
self.api_id = None
self.format = None
self.export = None
self.id = None
self.value = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-01-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMApiExportInfo, self).__init__(self.module_arg_spec, supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.resource_group is not None and
self.service_name is not None and
self.api_id is not None and
self.format is not None and
self.export is not None):
self.results['api_export'] = self.format_item(self.get())
return self.results
def get(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.ApiManagement' +
'/service' +
'/{{ service_name }}' +
'/apis' +
'/{{ api_name }}')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ service_name }}', self.service_name)
self.url = self.url.replace('{{ api_name }}', self.name)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results['temp_item'] = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return results
def format_item(item):
return item
def main():
AzureRMApiExportInfo()
if __name__ == '__main__':
main()
| 29.980315 | 113 | 0.548785 | [
"MIT"
] | audevbot/autorest.cli.debug | generated/intermediate/ansible-module-rest/azure_rm_apimanagementapiexport_info.py | 7,615 | Python |
# TODO: By PySCF-1.5 release
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1. code style
# * Indent: 3 -> 4
# * Constant should be all uppercase
# * Function/method should be all lowercase
# * Line wrap around 80 columns
# * Use either double quote or single quote, not mix
#
# 2. Conventions required by PySCF
# * Use PYSCF_TMPDIR to replace _TmpDir
#
# 3. Use proper functions provided by PySCF
#
# This file is adapted with permission from the wmme program of Gerald Knizia.
# See http://sites.psu.edu/knizia/software/
#====================================================
from __future__ import print_function
import numpy as np
from numpy import dot, array
from os import path
from sys import version_info
def GetModulePath():
# (hopefully) return the path of the .py file.
# idea is to leave wmme.py in the same directory as the wmme executable,
# and import invoke the scripts using it via, for example,
# PYTHONPATH=$HOME/dev/wmme:$PYTHONPATH python myscriptfile.py
import inspect
return path.dirname(path.abspath(inspect.getfile(inspect.currentframe())))
if 0:
# set executable/basis library directory explicitly.
_WmmeDir = "/home/cgk/dev/wmme"
else:
# set executable/basis library from path of wmme.py
_WmmeDir = None
_TmpDir = None # if None: use operating system default
_BasisLibDir = None # if None: same as _WmmeDir/bases
#ToAng = 0.5291772108
ToAng = 0.529177209 # molpro default.
def ElementNameDummy():
ElementNames = "X H He Li Be B C N O F Ne Na Mg Al Si P S Cl Ar K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe Cs Ba La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi Po At Rn".split()
ElementNumbers = dict([(o,i) for (i,o) in enumerate(ElementNames)])
return ElementNames, ElementNumbers
ElementNames, ElementNumbers = ElementNameDummy()
def mdot(*args):
"""chained matrix product: mdot(A,B,C,..) = A*B*C*...
No attempt is made to optimize the contraction order."""
r = args[0]
for a in args[1:]:
r = dot(r,a)
return r
def dot2(A,B): return dot(A.flatten(),B.flatten())
def nCartY(l):
return ((l+1)*(l+2))/2
class FAtom(object):
def __init__(self, Element, Position, Index):
self.Element = Element
self.Pos = Position
self.Index = Index
@property
def Label(self):
# return element and center index combined.
return "%2s%3s"%(self.Element,1 + self.Index)
@property
def iElement(self):
return ElementNumbers[self.Element]
def __str__(self):
return "%s (%6.3f,%6.3f,%6.3f)"%(self.Label, self.Pos[0], self.Pos[1], self.Pos[2])
class FAtomSet(object):
def __init__(self, Positions, Elements, Orientations=None, Name=None):
"""Positions: 3 x nAtom matrix. Given in atomic units (ABohr).
Elements: element name (e.g., H) for each of the positions.
Orientations: If given, a [3,3,N] array encoding the standard
orientation of the given atoms (for replicating potentials!). For
each atom there is a orthogonal 3x3 matrix denoting the ex,ey,ez
directions."""
self.Pos = Positions
assert(self.Pos.shape[0] == 3 and self.Pos.shape[1] == len(Elements))
self.Elements = Elements
self.Orientations = Orientations
self.Name = Name
def MakeXyz(self,NumFmt = "%15.8f",Scale=1.):
Lines = []
for i in range(len(self.Elements)):
Lines.append(" %5s {0} {0} {0}".format(NumFmt) % (\
self.Elements[i], Scale*self.Pos[0,i], Scale*self.Pos[1,i], Scale*self.Pos[2,i]))
return "\n".join(Lines)
def nElecNeutral(self):
"""return number of electrons present in the total system if neutral."""
return sum([ElementNumbers[o] for o in self.Elements])
def fCoreRepulsion1(self, iAt, jAt):
if iAt == jAt: return 0. # <- a core doesn't repulse itself.
ChA, ChB = [ElementNumbers[self.Elements[o]] for o in [iAt, jAt]]
return ChA * ChB / np.sum((self.Pos[:,iAt] - self.Pos[:,jAt])**2)**.5
def fCoreRepulsion(self):
N = len(self.Elements)
Charges = array([ElementNumbers[o] for o in self.Elements])
fCoreEnergy = 0
for i in range(N):
for j in range(i):
fCoreEnergy += self.fCoreRepulsion1(i,j)
#fCoreEnergy += Charges[i] * Charges[j] / np.sum((self.Pos[:,i] - self.Pos[:,j])**2)**.5
return fCoreEnergy
def __str__(self):
Caption = " %5s%15s %15s %15s" % ("ATOM", "POS/X", "POS/Y", "POS/Z")
return Caption + "\n" + self.MakeXyz()
def __len__(self): return len(self.Elements)
def __getitem__(self,key): return FAtom(self.Elements[key], self.Pos[:,key], key)
def __iter__(self):
for (iAt,(Type,Xyz)) in enumerate(zip(self.Elements, self.Pos.T)):
#yield (Type,Xyz)
yield FAtom(Type, Xyz, iAt)
class FBasisShell(object):
"""A generally contracted shell of spherical harmonic basis functions."""
def __init__(self, l, Exp, Co):
self.l = l
assert(isinstance(l,int) and l >= 0 and l <= 8)
self.Exp = np.array(Exp)
assert(self.Exp.ndim == 1)
self.Co = np.array(Co)
assert(self.Co.ndim == 2 and self.Co.shape[0] == len(self.Exp))
self.Element = None # designated element for the basis function
self.Comment = None # comment on the basis function (e.g., literature reference)
@property
def nExp(self):
return len(self.Exp)
@property
def nCo(self):
return self.Co.shape[1]
@property
def nFn(self):
return self.nCo * (2*self.l + 1)
@property
def nFnCa(self):
return self.nCo * nCartY(self.l)
@property
def AngMom(self): return self.l
def __str__(self):
Lines = []
Lines.append("BasisShell [l = %i, nExp = %i, nCo = %i]" % (self.l, self.nExp, self.nCo))
def FmtA(L):
return ", ".join("%12.5f" % o for o in L)
Lines.append(" Exps = [%s]" % FmtA(self.Exp))
for iCo in range(self.nCo):
Lines.append(" Co[%2i] = [%s]" % (iCo, FmtA(self.Co[:,iCo])))
return "\n".join(Lines)
class FBasisShell1(object):
"""A FBasisShell which is placed on a concrete atom."""
def __init__(self, Atom, ShellFn):
self.Atom = Atom
self.Fn = ShellFn
assert(isinstance(self.Fn, FBasisShell))
@property
def Pos(self):
return self.Atom.Pos
@property
def iAtom(self):
return self.Atom.Index
@property
def l(self): return self.Fn.l
@property
def nExp(self): return self.Fn.nExp
@property
def Exp(self): return self.Fn.Exp
@property
def nCo(self): return self.Fn.nCo
@property
def Co(self): return self.Fn.Co
@property
def nFn(self): return self.Fn.nFn
@property
def nFnCa(self): return self.Fn.nFnCa
class FBasisSet(object):
def __init__(self, Shells, Atoms):
# list of FBasisShell1 objects.
self.Shells = Shells
self.Atoms = Atoms
@property
def nFn(self):
n = 0
for Sh in self.Shells:
n += Sh.nFn
return n
@property
def nFnCa(self):
n = 0
for Sh in self.Shells:
n += Sh.nFnCa
return n
def __str__(self):
Lines = []
for o in self.Shells:
Lines.append("Atom %s %s" % (o.Atom, o.Fn))
return "\n".join(Lines)
def FmtCr(self):
#f = 1./ToAng
f = 1.
Lines = []
def Emit(s):
Lines.append(s)
def EmitArray(Name, A):
#Emit(" " + Name + "<" + " ".join("%.16e"%o for o in A) + ">")
Emit(" " + Name + "<" + " ".join("%r"%o for o in A) + ">")
# collect all unique FBasisShell objects.
BasisFns = []
BasisFnIds = {} # map id(BasisFn)->(index)
for Shell in self.Shells:
if id(Shell.Fn) not in BasisFnIds:
BasisFnIds[id(Shell.Fn)] = len(BasisFns)
BasisFns.append(Shell.Fn)
pass
Emit("Basis<Version<0.1> nFns<%i> nShells<%i>" % (len(BasisFns), len(self.Shells)))
# store the function declarations...
def EmitBasisFn(Fn):
Emit(" Fn<Id<%i> l<%i> nExp<%i> nCo<%i>" % (
BasisFnIds[id(Fn)], Fn.l, Fn.nExp, Fn.nCo))
EmitArray("Exp", Fn.Exp)
for Co in Fn.Co.T:
EmitArray("Co", Co)
Emit(" >")
pass
for Fn in BasisFns:
EmitBasisFn(Fn)
# ...and their distribution amongst atoms.
def EmitShell(Sh):
#Emit(" Shell<iAt<%i> x<%.16e> y<%.16e> z<%.16e> FnId<%i>>" % (
Emit(" Shell<iAt<%i> x<%r> y<%r> z<%r> FnId<%i>>" % (
Sh.Atom.Index, f*Sh.Atom.Pos[0], f*Sh.Atom.Pos[1], f*Sh.Atom.Pos[2], BasisFnIds[id(Sh.Fn)]))
pass
for Shell in self.Shells:
EmitShell(Shell)
Emit(">") # end of Basis
return "\n".join(Lines)
def GetAngmomList(self):
# list of all basis function angular momenta in the basis, for converting basis function orders and types.
ls = []
for Shell in self.Shells:
for iCo in range(Shell.nCo):
ls.append(Shell.l)
return ls
class FIntegralContext(object):
"""contains data describing how to evaluate quantum chemistry matrix
elements on electronic system as defined by the given atoms and basis
sets.
Note: Basis sets must either be basis set names (i.e., library names)
or FBasisSet objects.
"""
def __init__(self, Atoms, OrbBasis, FitBasis=None, BasisLibs=None):
self.Atoms = Atoms
self.OrbBasis = OrbBasis
self.FitBasis = FitBasis
self.BasisLibs = BasisLibs
def _InvokeBfint(self, Args, Outputs=None, Inputs=None, MoreBases=None):
Bases = {}
if self.OrbBasis: Bases['--basis-orb'] = self.OrbBasis
if self.FitBasis: Bases['--basis-fit'] = self.FitBasis
if MoreBases:
Bases = dict(list(Bases.items()) + list(MoreBases.items()))
return _InvokeBfint(self.Atoms, Bases, self.BasisLibs, Args, Outputs, Inputs)
def MakeBaseIntegrals(self, Smh=True, MakeS=False):
"""Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),
Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb)"""
# assemble arguments to integral generation program
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
# ^- calculate integrals in symmetrically orthogonalized AO basis
Outputs = []
Outputs.append(("--save-coreh", "INT1E"))
Outputs.append(("--save-fint2e", "INT2E"))
Outputs.append(("--save-overlap", "OVERLAP"))
CoreH, Int2e, Overlap = self._InvokeBfint(Args, Outputs)
nOrb = CoreH.shape[0]
Int2e = Int2e.reshape((Int2e.shape[0], nOrb, nOrb))
CoreEnergy = self.Atoms.fCoreRepulsion()
if MakeS:
return CoreEnergy, CoreH, Int2e, Overlap
else:
return CoreEnergy, CoreH, Int2e
def MakeOverlaps2(self, OrbBasis2):
"""calculate overlap between current basis and a second basis, as
described in OrbBasis2. Returns <1|2> and <2|2> matrices."""
Args = []
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs = []
Outputs.append(("--save-overlap-2", "OVERLAP_2"))
Outputs.append(("--save-overlap-12", "OVERLAP_12"))
#Outputs.append(("--save-overlap", "OVERLAP"))
Overlap2, Overlap12 = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap2, Overlap12
def MakeOverlap(self, OrbBasis2=None):
"""calculate overlap within main orbital basis, and, optionally, between main
orbital basis and a second basis, as described in OrbBasis2.
Returns <1|1>, <1|2>, and <2|2> matrices."""
Args = []
Outputs = []
Outputs.append(("--save-overlap", "OVERLAP_1"))
if OrbBasis2 is not None:
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs.append(("--save-overlap-12", "OVERLAP_12"))
Outputs.append(("--save-overlap-2", "OVERLAP_2"))
return self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
else:
MoreBases = None
Overlap, = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap
def MakeNuclearAttractionIntegrals(self, Smh=True):
"""calculate nuclear attraction integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-vnucN", "VNUC_N"))
VNucN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(VNucN.shape[0]**.5 + .5)
assert(nOrb**2 == VNucN.shape[0])
assert(VNucN.shape[1] == len(self.Atoms))
return VNucN.reshape(nOrb, nOrb, VNucN.shape[1])
def MakeNuclearSqDistanceIntegrals(self, Smh=True):
"""calculate <mu|(r-rA)^2|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-rsqN", "RSQ_N"))
RsqN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(RsqN.shape[0]**.5 + .5)
assert(nOrb**2 == RsqN.shape[0])
assert(RsqN.shape[1] == len(self.Atoms))
return RsqN.reshape(nOrb, nOrb, RsqN.shape[1])
def MakeKineticIntegrals(self, Smh=True):
"""calculate <mu|-1/2 Laplace|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-kinetic", "EKIN"))
Op = self._InvokeBfint(Args, Outputs)[0]
return Op
def MakeDipoleIntegrals(self, Smh=True):
r"""calculate dipole operator matrices <\mu|w|\nu> (w=x,y,z) in
main basis, for each direction. Returns nAo x nAo x 3 array."""
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
Outputs = []
Outputs.append(("--save-dipole", "DIPN"))
DipN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(DipN.shape[0]**.5 + .5)
assert(nOrb**2 == DipN.shape[0])
assert(DipN.shape[1] == 3)
return DipN.reshape(nOrb, nOrb, 3)
def MakeOrbitalsOnGrid(self, Orbitals, Grid, DerivativeOrder=0):
"""calculate values of molecular orbitals on a grid of 3d points in space.
Input:
- Orbitals: nAo x nOrb matrix, where nAo must be compatible with
self.OrbBasis. The AO dimension must be contravariant AO (i.e., not SMH).
- Grid: 3 x nGrid array giving the coordinates of the grid points.
- DerivativeOrder: 0: only orbital values,
1: orbital values and 1st derivatives,
2: orbital values and up to 2nd derivatives.
Returns:
- nGrid x nDerivComp x nOrb array. If DerivativeOrder is 0, the
DerivComp dimension is omitted.
"""
Args = [("--eval-orbitals-dx=%s" % DerivativeOrder)]
Inputs = [("--eval-orbitals", "ORBITALS.npy", Orbitals)]\
+ [("--grid-coords", "GRID.npy", Grid)]
Outputs = [("--save-grid-values", "ORBS_ON_GRID")]
(ValuesOnGrid,) = self._InvokeBfint(Args, Outputs, Inputs)
nComp = [1,4,10][DerivativeOrder]
if nComp != 1:
ValuesOnGrid = ValuesOnGrid.reshape((Grid.shape[1], nComp, Orbitals.shape[1]))
return ValuesOnGrid
def MakeRaw2eIntegrals(self, Smh=True, Kernel2e="coulomb"):
"""compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),
where the fitting metric is *not* absorbed into the 2e integrals."""
# assemble arguments to integral generation program
Args = []
if Smh:
Args.append("--orb-trafo=Smh")
# ^- calculate integrals in symmetrically orthogonalized AO basis
Args.append("--kernel2e='%s'" % Kernel2e)
Args.append("--solve-fitting-eq=false")
Outputs = []
Outputs.append(("--save-fint2e", "INT2E_3IX"))
Outputs.append(("--save-fitting-metric", "INT2E_METRIC"))
Int2e_Frs, Int2e_FG = self._InvokeBfint(Args, Outputs)
nOrb = int(Int2e_Frs.shape[1]**.5 + .5)
assert(nOrb**2 == Int2e_Frs.shape[1])
Int2e_Frs = Int2e_Frs.reshape((Int2e_Frs.shape[0], nOrb, nOrb))
assert(Int2e_Frs.shape[0] == Int2e_FG.shape[0])
assert(Int2e_FG.shape[0] == Int2e_FG.shape[1])
return Int2e_FG, Int2e_Frs
def _InvokeBfint(Atoms, Bases, BasisLibs, BaseArgs, Outputs, Inputs=None):
"""Outputs: an array of tuples (cmdline-arguments,filename-base).
We will generate arguments for each of them and try to read the
corresponding files as numpy arrays and return them in order."""
from tempfile import mkdtemp
from shutil import rmtree
#from commands import getstatusoutput
from subprocess import check_output, CalledProcessError
# make a directory to store our input/output in.
BasePath = mkdtemp(prefix="wmme.", dir=_TmpDir)
def Cleanup():
rmtree(BasePath)
pass
BfIntDir = _WmmeDir
if BfIntDir is None: BfIntDir = GetModulePath()
BasisLibDir = _BasisLibDir
if BasisLibDir is None:
BasisLibDir = path.join(BfIntDir,"bases")
MakeIntegralsExecutable = path.join(BfIntDir,"wmme")
# assemble arguments to integral generation program
FileNameXyz = path.join(BasePath, "ATOMS")
Args = [o for o in BaseArgs]
Args.append("--matrix-format=npy")
for BasisLib in BasisLibs:
Args.append("--basis-lib=%s" % path.join(BasisLibDir, BasisLib))
Args.append("--atoms-au=%s" % FileNameXyz)
iWrittenBasis = 0
for (ParamName, BasisObj) in Bases.items():
if BasisObj is None:
continue
if isinstance(BasisObj, FBasisSet):
# basis is given as an explicit FBasisSet object.
# Write the basis set to disk and supply the file name as argument
BasisFile = path.join(BasePath, "BASIS%i" % iWrittenBasis)
iWrittenBasis += 1
with open(BasisFile, "w") as File:
File.write(BasisObj.FmtCr())
Args.append("%s='!%s'" % (ParamName, BasisFile))
else:
assert(isinstance(BasisObj, str))
# it's just a basis set name: append the name to the arguments.
# (set will be read from library by wmme itself)
Args.append("%s=%s" % (ParamName, BasisObj))
pass
# make file names and arguments for output arrays
FileNameOutputs = []
for (ArgName,FileNameBase) in Outputs:
FileName = path.join(BasePath, FileNameBase)
FileNameOutputs.append(FileName)
Args.append("%s='%s'" % (ArgName, FileName))
XyzLines = "%i\n\n%s\n" % (len(Atoms), Atoms.MakeXyz("%24.16f"))
# ^- note on the .16f: it actually does make a difference. I had .8f
# there before, and it lead to energy changes on the order of 1e-8
# when treating only non-redundant subsystem out of a symmetric
# arrangement.
try:
with open(FileNameXyz, "w") as File:
File.write(XyzLines)
# save input arrays if provided.
if Inputs:
for (ArgName,FileNameBase,Array) in Inputs:
FileName = path.join(BasePath, FileNameBase)
np.save(FileName,Array)
Args.append("%s='%s'" % (ArgName, FileName))
Cmd = "%s %s" % (MakeIntegralsExecutable, " ".join(Args))
#print("!Invoking %s\n" % Cmd)
#iErr, Output = getstatusoutput(Cmd)
#if ( iErr != 0 ):
try:
Output = check_output(Cmd, shell=True)
if (version_info) >= (3,0):
# it returns a byte string in Python 3... which wouldn't be a problem
# if not all OTHER literals were converted to unicode implicitly.
Output = Output.decode("utf-8")
except CalledProcessError as e:
raise Exception("Integral calculation failed. Output was:\n%s\nException was: %s" % (e.output, str(e)))
OutputArrays = []
for FileName in FileNameOutputs:
OutputArrays.append(np.load(FileName))
except:
Cleanup()
raise
# got everything we need. Delete the temporary directory.
Cleanup()
return tuple(OutputArrays)
def ReadXyzFile(FileName,Scale=1./ToAng):
Text = open(FileName,"r").read()
Lines = Text.splitlines()
# allowed formats: <nAtoms> \n Desc \n <atom-list>
# or: <atom-list> (without any headers)
# in the first case, only the first nAtoms+2 lines are read, in the
# second case everything which does not look like a xyz line is
# ignored.
nAtoms = None
r = 0,-1
if ( len(Lines[0].split()) == 1 ):
nAtoms = int(Lines[0].split()[0])
r = 2,nAtoms+2
Atoms = []
Xyz = []
for Line in Lines:
ls = Line.split()
try:
Atom = ls[0]
x,y,z = float(ls[1]), float(ls[2]), float(ls[3])
except:
continue
Atom = Atom[0].upper() + Atom[1:].lower()
# maybe we should allow for (and ignore) group numbers after the
# elements?
if Atom not in ElementNames:
raise Exception("while reading '%s': unrecognized element '%s'." % (FileName,Atom))
Atoms.append(Atom)
Xyz.append((x,y,z))
Xyz = Scale*array(Xyz).T
if 0:
print("*read '%s':\n%s" % (FileName, str(FAtomSet(Xyz, Atoms))))
return Xyz, Atoms
def ReadAtomsFromXyzFile(FileName, Scale=1./ToAng):
Xyz,Elements = ReadXyzFile(FileName, Scale)
return FAtomSet(Xyz, Elements)
| 37.158516 | 274 | 0.621103 | [
"Apache-2.0"
] | JFurness1/pyscf | pyscf/tools/Molpro2Pyscf/wmme.py | 22,035 | Python |
__author__ = 'aakilomar'
import requests, json, time
from timeit import default_timer as timer
requests.packages.urllib3.disable_warnings()
host = "https://localhost:8443"
def cancel_event(eventid):
post_url = host + "/api/event/cancel/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def add_user(phone):
post_url = host + "/api/user/add/" + str(phone)
return requests.post(post_url,None, verify=False).json()
def rsvp(eventid,userid,message):
post_url = host + "/api/event/rsvp/" + str(eventid) + "/" + str(userid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def voteRequired(userid):
post_url = host + "/api/event/voterequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def upcomingVotes(groupid):
post_url = host + "/api/event/upcoming/vote/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def upcomingMeeting(groupid):
post_url = host + "/api/event/upcoming/meeting/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def votesPerGroupForEvent(groupid, eventid):
post_url = host + "/api/event/rsvp/totalspergroup/" + str(groupid) + "/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def addLogBook(userid, groupid, message):
post_url = host + "/api/logbook/add/" + str(userid) + "/" + str(groupid) + "/" + message
return requests.post(post_url,None, verify=False).json()
def addLogBookWithDate(userid, groupid, message, actionbydate):
post_url = host + "/api/logbook/addwithdate/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + actionbydate
return requests.post(post_url,None, verify=False).json()
def addLogBookWithDateAndAssign(userid, groupid, message, actionbydate, assigntouserid):
post_url = host + "/api/logbook/addwithdateandassign/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + actionbydate + "/" + str(assigntouserid)
return requests.post(post_url,None, verify=False).json()
def addLogBook(userid, groupid, message, replicate):
post_url = host + "/api/logbook/add/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + str(replicate)
return requests.post(post_url,None, verify=False).json()
def listReplicated(groupid):
post_url = host + "/api/logbook/listreplicated/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def listReplicated(groupid, completed):
post_url = host + "/api/logbook/listreplicated/" + str(groupid) + "/" + str(completed)
return requests.get(post_url,None, verify=False).json()
def setInitiatedSession(userid):
post_url = host + "/api/user/setinitiatedsession/" + str(userid)
return requests.post(post_url,None, verify=False).json()
def listReplicatedMessage(groupid, message):
post_url = host + "/api/logbook/listreplicatedbymessage/" + str(groupid) + "/" + message
return requests.get(post_url,None, verify=False).json()
def createAccount(userid,groupid,accountname):
post_url = host + "/api/account/add/" + str(userid) + "/" + str(groupid) + "/" + str(accountname)
return requests.post(post_url,None, verify=False).json()
def ussdStart(phonenumber,enteredUssd):
post_url = host + "/ussd/start?msisdn=" + str(phonenumber)
return requests.get(post_url,None, verify=False)
def add_user_to_group(userid,groupid):
post_url = host + "/api/group/add/usertogroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def remove_user_from_group(userid,groupid):
post_url = host + "/api/group/remove/userfromgroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def get_user_join_group(userid,groupid):
post_url = host + "/api/group/get/userjoingroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).content
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def voteRequired(userid):
post_url = host + "/api/event/voterequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def add_event(userid,groupid, name):
post_url = host + "/api/event/add/" + str(userid) + "/" + str(groupid) + "/" + name
return requests.post(post_url,None, verify=False).json()
#print cancel_event(5166)
#user = add_user("0823333332")
#user = add_user("0821111111")
#print "user-->" + str(user)
#print rsvp(5167,user['id'],"no")
#print rsvpRequired(user['id'])
#print voteRequired(user['id'])
#print upcomingVotes(231)
#print votesPerGroupForEvent(194,5103)
#print addLogBook(1,85,"X must do Y")
#print addLogBook(1,88,"Somebody must Y",True) # has sub groups
#print addLogBook(1,85,"Somebody must do X",True) # no subgroups
#print listReplicated(88,False)
#print addLogBookWithDateAndAssign(1,21,"aakil must do Y","2015-12-13 08:45:00",588)
#print addLogBookWithDate(1,21,"someone must do Y","2015-12-13 08:45:00")
#print setInitiatedSession(588)
#print(listReplicatedMessage(88,"Somebody must X"))
#print(createAccount(1,21,"acc 21"))
#for i in range(1,7,1):
## start = timer()
# print ussdStart("0826607134","")
# end = timer()
# print(end - start)
#print add_user_to_group(588,82)
#print remove_user_from_group(588,82)
#print get_user_join_group(588,82)
#print voteRequired(817)
print rsvpRequired(817)
print "klaarie"
| 40.485714 | 159 | 0.702541 | [
"BSD-3-Clause"
] | Siyanda-Mzam/grassroot-platform | docs/tests/adhoc_requests.py | 5,668 | Python |
CLOUDFORMATION_TEMPLATE = """
AWSTemplateFormatVersion: '2010-09-09'
Parameters:
s3FileName:
Type: String
environment:
Type: String
deploymentBucket:
Type: String
Resources:
# Place your AWS resources here
"""
| 13.764706 | 38 | 0.709402 | [
"MIT"
] | totalpunch/TPD-Pete | tpd_pete/template/template.py | 234 | Python |
from .attributions import Attributions, LIGAttributions
from .explainer import BaseExplainer
from .explainers.question_answering import QuestionAnsweringExplainer
from .explainers.sequence_classification import SequenceClassificationExplainer
| 48.6 | 79 | 0.901235 | [
"Apache-2.0"
] | MichalMalyska/transformers-interpret | transformers_interpret/__init__.py | 243 | Python |
#!---------------------------------------------------------------------!
#! Written by Madu Manathunga on 07/01/2021 !
#! !
#! Copyright (C) 2020-2021 Merz lab !
#! Copyright (C) 2020-2021 Götz lab !
#! !
#! This source file is a part of QUICK-GenInt code generator and !
#! is subjected to the terms of the Mozilla Public License, v. 2.0. !
#! If a copy of the MPL was not distributed with this file, you can !
#! obtain one at http://mozilla.org/MPL/2.0/. !
#!_____________________________________________________________________!
#!---------------------------------------------------------------------!
#! This source file contains classes necessary for generating one !
#! electron integrals. Note that we use vertical recurrence relations !
#! algorithm developed by Obara and Saika. See J. Chem. Phys. 1986, 84,!
#! 3963−3974 paper for theoretical details. !
#! !
#!---------------------------------------------------------------------!
import src.common.params as params
import src.common.file_handler as file_handler
from src.oei.iclass.OEint import OEint
# [p|f] class, subclass of OEint
class PFint(OEint):
def gen_int(self):
# write code paths for integrals. Note that we use C++ classes here.
for m in range(0,self.max_m+1):
if m == 0:
self.fhc.write("\n/* PF true integral, m=%d */ \n" % (m))
self.fhd.write("\n/* PF true integral, m=%d */ \n" % (m))
else:
self.fhc.write("\n/* PF auxilary integral, m=%d */ \n" % (m))
self.fhd.write("\n/* PF auxilary integral, m=%d */ \n" % (m))
self.fhc.write("class PFint_%d{ \n" % (m))
self.fhc.write("public: \n")
# write class variables; convention being used is s=0, p=1-3, d=4-9, f=10-19, g=20-34
self.fhc.write("#ifdef REG_PF \n")
for i in range(0,10):
for j in range(0,3):
self.fhc.write(" QUICKDouble x_%d_%d; // %s, %s \n" % (j+1, i+10, self.p_lbl[j], self.f_lbl[i]))
self.fhc.write("#endif \n")
# write class functions
self.fhc.write(" %s PFint_%d(QUICKDouble PAx, QUICKDouble PAy, QUICKDouble PAz,\n\
QUICKDouble PBx, QUICKDouble PBy, QUICKDouble PBz, QUICKDouble PCx, QUICKDouble PCy, QUICKDouble PCz,\n\
QUICKDouble TwoZetaInv, QUICKDouble* store, QUICKDouble* YVerticalTemp); \n" % (self.func_qualifier, m))
self.fhc.write("}; \n")
# write partial classes, these are useful to reduce the registry pressure
for i in range(0,10):
self.fhc.write("\n/* PF integral partial class, - Part %d, m=%d */ \n" % (i+1, m))
self.fhc.write("class PFint_%d_%d{ \n" % (m, i+1))
self.fhc.write("public: \n")
#self.fhc.write("#ifdef REG_PF \n")
for j in range(0,3):
self.fhc.write(" QUICKDouble x_%d_%d; // %s, %s \n" % (j+1, i+10, self.p_lbl[j], self.f_lbl[i]))
#self.fhc.write("#endif \n")
# write partial class functions
self.fhc.write(" %s PFint_%d_%d(QUICKDouble PAx, QUICKDouble PAy, QUICKDouble PAz,\n\
QUICKDouble PBx, QUICKDouble PBy, QUICKDouble PBz, QUICKDouble PCx, QUICKDouble PCy, QUICKDouble PCz,\n\
QUICKDouble TwoZetaInv, QUICKDouble* store, QUICKDouble* YVerticalTemp); \n" % (self.func_qualifier, m, i+1))
self.fhc.write("}; \n")
# write function definitions
self.fhd.write("%s PFint_%d::PFint_%d(QUICKDouble PAx, QUICKDouble PAy, QUICKDouble PAz,\n\
QUICKDouble PBx, QUICKDouble PBy, QUICKDouble PBz, QUICKDouble PCx, QUICKDouble PCy, QUICKDouble PCz,\n\
QUICKDouble TwoZetaInv, QUICKDouble* store, QUICKDouble* YVerticalTemp){ \n\n" % (self.func_qualifier, m, m))
self.fhd.write(" SDint_%d sd_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|d] for m=%d \n" % (m, m, m))
self.fhd.write(" SDint_%d sd_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|d] for m=%d \n" % (m+1, m+1, m+1))
self.fhd.write(" SFint_%d sf_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|f] for m=%d \n" % (m, m, m))
self.fhd.write(" SFint_%d sf_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|f] for m=%d \n\n" % (m+1, m+1, m+1))
# save all computed values into class variables that will reside in register/lmem space
self.fhd.write("#ifdef REG_PF \n")
for i in range(0,10):
for j in range(0,3):
tmp_mcal=[params.Mcal[i+10][0], params.Mcal[i+10][1], params.Mcal[i+10][2]]
for k in range(0,3):
#self.fhd.write("a(i,j) %d %d %d %d %d\n" % (tmp_mcal[0], tmp_mcal[1], tmp_mcal[2], params.Mcal[j+1][k], tmp_mcal[k]))
if params.Mcal[j+1][k] != 0:
self.fhd.write("#ifdef REG_SF \n")
self.fhd.write(" x_%d_%d = %s * sf_%d.x_%d_%d - %s * sf_%d.x_%d_%d; \n" % (j+1, i+10, self.PA[k], m, 0, i+10,\
self.PC[k], m+1, 0, i+10))
self.fhd.write("#else \n")
self.fhd.write(" x_%d_%d = %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d)- %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d); \n" % (j+1, i+10, self.PA[k], 0, i+10, m,\
self.PC[k], 0, i+10, m+1))
self.fhd.write("#endif \n")
if tmp_mcal[k] != 0:
tmp_mcal[k] -= 1
tmp_i=params.trans[tmp_mcal[0]][tmp_mcal[1]][tmp_mcal[2]]
self.fhd.write(" x_%d_%d += TwoZetaInv * %f * (sd_%d.x_%d_%d - sd_%d.x_%d_%d); \n" % (j+1, i+10, params.Mcal[i+10][k], m, 0, tmp_i-1, m+1, 0, tmp_i-1))
break
self.fhd.write("#else \n")
# save all computed values into store array in global memory
self.fhd.write(" QUICKDouble val; \n")
for i in range(0,10):
for j in range(0,3):
tmp_mcal=[params.Mcal[i+10][0], params.Mcal[i+10][1], params.Mcal[i+10][2]]
for k in range(0,3):
if params.Mcal[j+1][k] != 0:
self.fhd.write("#ifdef REG_SF \n")
self.fhd.write(" val = %s * sf_%d.x_%d_%d - %s * sf_%d.x_%d_%d; \n" % (self.PA[k], m, 0, i+10,\
self.PC[k], m+1, 0, i+10))
self.fhd.write("#else \n")
self.fhd.write(" val = %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d) - %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d); \n" % (self.PA[k], 0, i+10, m,\
self.PC[k], 0, i+10, m+1))
self.fhd.write("#endif \n")
if tmp_mcal[k] != 0:
tmp_mcal[k] -= 1
tmp_i=params.trans[tmp_mcal[0]][tmp_mcal[1]][tmp_mcal[2]]
self.fhd.write(" val += TwoZetaInv * %f * (sd_%d.x_%d_%d - sd_%d.x_%d_%d); \n" % (params.Mcal[i+10][k], m, 0, tmp_i-1, m+1, 0, tmp_i-1))
self.fhd.write(" LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d) = val; \n" % (j+1, i+10, m))
break
self.fhd.write("#endif \n")
self.fhd.write("\n } \n")
# write definition for partial classes, note that we will not write code for global memory based implementation here
for i in range(0,10):
self.fhd.write("\n/* PF integral partial class - Part %d, m=%d */ \n" % (i+1, m))
self.fhd.write("%s PFint_%d_%d::PFint_%d_%d(QUICKDouble PAx, QUICKDouble PAy, QUICKDouble PAz,\n\
QUICKDouble PBx, QUICKDouble PBy, QUICKDouble PBz, QUICKDouble PCx, QUICKDouble PCy, QUICKDouble PCz,\n\
QUICKDouble TwoZetaInv, QUICKDouble* store, QUICKDouble* YVerticalTemp){ \n\n" % (self.func_qualifier, m, i+1, m, i+1))
self.fhd.write(" SDint_%d sd_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|d] for m=%d \n" % (m, m, m))
self.fhd.write(" SDint_%d sd_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|d] for m=%d \n" % (m+1, m+1, m+1))
self.fhd.write(" SFint_%d sf_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|f] for m=%d \n" % (m, m, m))
self.fhd.write(" SFint_%d sf_%d(PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); // construct [s|f] for m=%d \n\n" % (m+1, m+1, m+1))
# save all computed values into class variables that will reside in register/lmem space
for j in range(0,3):
tmp_mcal=[params.Mcal[i+10][0], params.Mcal[i+10][1], params.Mcal[i+10][2]]
for k in range(0,3):
#self.fhd.write("a(i,j) %d %d %d %d %d\n" % (tmp_mcal[0], tmp_mcal[1], tmp_mcal[2], params.Mcal[j+1][k], tmp_mcal[k]))
if params.Mcal[j+1][k] != 0:
self.fhd.write("#ifdef REG_SF \n")
self.fhd.write(" x_%d_%d = %s * sf_%d.x_%d_%d - %s * sf_%d.x_%d_%d; \n" % (j+1, i+10, self.PA[k], m, 0, i+10,\
self.PC[k], m+1, 0, i+10))
self.fhd.write("#else \n")
self.fhd.write(" x_%d_%d = %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d)- %s * LOCSTOREFULL(store, %d, %d, STOREDIM, STOREDIM, %d); \n" % (j+1, i+10, self.PA[k], 0, i+10, m,\
self.PC[k], 0, i+10, m+1))
self.fhd.write("#endif \n")
if tmp_mcal[k] != 0:
tmp_mcal[k] -= 1
tmp_i=params.trans[tmp_mcal[0]][tmp_mcal[1]][tmp_mcal[2]]
self.fhd.write(" x_%d_%d += TwoZetaInv * %f * (sd_%d.x_%d_%d - sd_%d.x_%d_%d); \n" % (j+1, i+10, params.Mcal[i+10][k], m, 0, tmp_i-1, m+1, 0, tmp_i-1))
break
self.fhd.write("\n } \n")
# generate code to save computed [p|f] integral
def save_int(self):
self.fha.write("\n // PF integral, m=%d \n" % (0))
self.fha.write(" if(I == 1 && J == 3){ \n")
self.fha.write(" PFint_0 pf(PAx, PAy, PAz, PBx, PBy, PBz, PCx, PCy, PCz, TwoZetaInv, store, YVerticalTemp); \n")
self.fha.write("#ifdef REG_PF \n")
for i in range(0,10):
for j in range(0,3):
self.fha.write(" LOCSTORE(store, %d, %d, STOREDIM, STOREDIM) = pf.x_%d_%d;\n" % (j+1, i+10, j+1, i+10))
self.fha.write("#endif \n")
# include print statements if debug option is on
if OEint.debug == 1:
self.fha.write("\n#ifdef DEBUG_OEI \n")
for i in range(0,10):
for j in range(0,3):
self.fha.write(" printf(\"II %%d JJ %%d %s store[%d,%d] = %%f \\n\", II, JJ, LOCSTORE(store, %d, %d, STOREDIM, STOREDIM)); \n" % ( "PF", j+1, i+10, j+1, i+10))
self.fha.write("#endif \n\n")
self.fha.write(" } \n")
| 63.807292 | 214 | 0.476941 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Madu86/QUICK-GenInt | src/oei/iclass/PFint.py | 12,254 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0062_no_invoice_for_historic_events'),
('workshops', '0062_add_stalled_unresponsive_tags'),
]
operations = [
]
| 20.25 | 61 | 0.685185 | [
"MIT"
] | aditnryn/amy | workshops/migrations/0063_merge.py | 324 | Python |
"""
Django settings for project_name project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# for best-practices.
# SECURITY WARNING: keep the secret key used in production secret!
# Please set SECRET_KEY environment variable in your production environment
# (e.g. Heroku).
SECRET_KEY = os.getenv('SECRET_KEY', 'django-insecure-*%$(!wsn1rre5@c!$jm7w&$+s3y0xqn%cnmk)&6(ukjn)18b!0')
# Automatically determine environment by detecting if DATABASE_URL variable.
# DATABASE_URL is provided by Heroku if a database add-on is added
# (e.g. Heroku Postgres).
PRODUCTION = os.getenv('DATABASE_URL') is not None
# SECURITY WARNING: don't run with debug turned on in production!
# If you want to enable debugging on Heroku for learning purposes,
# set this to True.
DEBUG = not PRODUCTION
HEROKU_APP_NAME = os.getenv('HEROKU_APP_NAME', '')
ALLOWED_HOSTS = [f'{HEROKU_APP_NAME}.herokuapp.com']
if not PRODUCTION:
ALLOWED_HOSTS += ['.localhost', '127.0.0.1', '[::1]']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_name.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR / 'templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_name.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Set database settings automatically using DATABASE_URL.
if PRODUCTION:
DATABASES['default'] = dj_database_url.config(
conn_max_age=600, ssl_require=True
)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
# Feel free to change these according to your needs.
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# This is the directory for storing `collectstatic` results.
# This shouldn't be included in your Git repository.
STATIC_ROOT = BASE_DIR / 'staticfiles'
# You can use this directory to store project-wide static files.
STATICFILES_DIRS = [
BASE_DIR / 'static',
]
# Make sure the directories exist to prevent errors when doing `collectstatic`.
for directory in [*STATICFILES_DIRS, STATIC_ROOT]:
directory.mkdir(exist_ok=True)
# Enable compression and caching features of whitenoise.
# You can remove this if it causes problems on your setup.
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| 28.08982 | 106 | 0.715412 | [
"Unlicense"
] | YoggyPutra/cobaq | project_name/settings.py | 4,691 | Python |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import re
import random
import shutil
import socket
import string
import json
import ipaddress
import charms.leadership
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.kubernetes.flagmanager import FlagManager
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.host import service_stop
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
def service_cidr():
''' Return the charm's service-cidr config '''
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def reset_states_for_delivery():
'''An upgrade charm event was triggered by Juju, react to that here.'''
migrate_from_pre_snaps()
install_snaps()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# clear the flag managers
FlagManager('kube-apiserver').destroy_all()
FlagManager('kube-controller-manager').destroy_all()
FlagManager('kube-scheduler').destroy_all()
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
set_state('kubernetes-master.snaps.installed')
remove_state('kubernetes-master.components.started')
@when('config.changed.channel')
def channel_changed():
install_snaps()
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin')
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
charms.leadership.leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
api_opts = FlagManager('kube-apiserver')
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
api_opts.add('service-account-key-file', service_key)
controller_opts = FlagManager('kube-controller-manager')
controller_opts.add('service-account-private-key-file', service_key)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys, overwrite_local=False):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k) or overwrite_local:
# Fetch data from leadership broadcast
contents = charms.leadership.leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
msg = "Waiting on leaders crypto keys."
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('cdk-addons.configured', 'kube-api-endpoint.available',
'kube-control.connected')
def idle_status(kube_api, kube_control):
''' Signal at the end of the run that we are running. '''
if not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
else:
# All services should be up and running at this point. Double-check...
failing_services = master_services_down()
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes master running.')
else:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg)
def master_services_down():
"""Ensure master services are up and running.
Return: list of failing services"""
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not host.service_running(daemon):
failing_services.append(service)
return failing_services
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This hapens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
handle_etcd_relation(etcd)
configure_master_services()
hookenv.status_set('maintenance',
'Starting the Kubernetes master services.')
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
host.service_restart('snap.%s.daemon' % service)
hookenv.open_port(6443)
set_state('kubernetes-master.components.started')
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
# Note that the DNS server doesn't necessarily exist at this point. We know
# where we're going to put it, though, so let's send the info anyway.
dns_ip = get_dns_ip()
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
@when('kube-control.auth.requested')
@when('authentication.setup')
@when('leadership.is_leader')
def send_tokens(kube_control):
"""Send the tokens to the workers."""
kubelet_token = get_token('kubelet')
proxy_token = get_token('kube_proxy')
admin_token = get_token('admin')
# Send the data
requests = kube_control.auth_user()
for request in requests:
kube_control.sign_auth_request(request[0], kubelet_token,
proxy_token, admin_token)
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator master is waiting for a relation to workers.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set('blocked', 'Waiting for workers.')
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kubernetes-master.components.started')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
args = [
'arch=' + arch(),
'dns-ip=' + get_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'enable-dashboard=' + dbEnabled
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except:
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
@hook('stop')
def shutdown():
""" Stop the kubernetes master services
"""
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
'''Get an IP address for the DNS server on the provided cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .10 at the end of the network
ip = interface.network.network_address + 10
return ip.exploded
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .1 at the end of the network
ip = interface.network.network_address + 1
return ip.exploded
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
# Never use stale data, always prefer whats coming in during context
# building. if its stale, its because whats in unitdata is stale
data = api_opts.data
if data.get('etcd-servers-strict') or data.get('etcd-servers'):
api_opts.destroy('etcd-cafile')
api_opts.destroy('etcd-keyfile')
api_opts.destroy('etcd-certfile')
api_opts.destroy('etcd-servers', strict=True)
api_opts.destroy('etcd-servers')
# Set the apiserver flags in the options manager
api_opts.add('etcd-cafile', ca)
api_opts.add('etcd-keyfile', key)
api_opts.add('etcd-certfile', cert)
api_opts.add('etcd-servers', connection_string, strict=True)
def configure_master_services():
''' Add remaining flags for the master services and configure snaps to use
them '''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
scheduler_opts = FlagManager('kube-scheduler')
scheduler_opts.add('v', '2')
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts.add('allow-privileged', 'true', strict=True)
set_state('kubernetes-master.privileged')
else:
api_opts.add('allow-privileged', 'false', strict=True)
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts.add('service-cluster-ip-range', service_cidr())
api_opts.add('min-request-timeout', '300')
api_opts.add('v', '4')
api_opts.add('tls-cert-file', server_cert_path)
api_opts.add('tls-private-key-file', server_key_path)
api_opts.add('kubelet-certificate-authority', ca_cert_path)
api_opts.add('kubelet-client-certificate', client_cert_path)
api_opts.add('kubelet-client-key', client_key_path)
api_opts.add('logtostderr', 'true')
api_opts.add('insecure-bind-address', '127.0.0.1')
api_opts.add('insecure-port', '8080')
api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support
admission_control = [
'Initializers',
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
if get_version('kube-apiserver') < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
if get_version('kube-apiserver') < (1, 7):
hookenv.log('Removing Initializers from admission-control')
admission_control.remove('Initializers')
api_opts.add('admission-control', ','.join(admission_control), strict=True)
# Default to 3 minute resync. TODO: Make this configureable?
controller_opts.add('min-resync-period', '3m')
controller_opts.add('v', '2')
controller_opts.add('root-ca-file', ca_cert_path)
controller_opts.add('logtostderr', 'true')
controller_opts.add('master', 'http://127.0.0.1:8080')
scheduler_opts.add('v', '2')
scheduler_opts.add('logtostderr', 'true')
scheduler_opts.add('master', 'http://127.0.0.1:8080')
cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')
check_call(cmd)
cmd = (
['snap', 'set', 'kube-controller-manager'] +
controller_opts.to_s().split(' ')
)
check_call(cmd)
cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ')
check_call(cmd)
def setup_basic_auth(password=None, username='admin', uid='admin'):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if not os.path.isfile(tokens_fname):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db = unitdata.kv()
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
if status != 'Running':
return False
return True
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
| 36.878848 | 79 | 0.683819 | [
"Apache-2.0"
] | BaiHuoYu/nbp | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | 37,137 | Python |
from ..api import rule
from ..api._endpoint import ApiEndpoint, maybe_login_required
from ..entities._entity import NotFound
from ..entities.commit import Commit, CommitSerializer
class CommitListAPI(ApiEndpoint):
serializer = CommitSerializer()
@maybe_login_required
def get(self):
"""
---
description: Get a list of commits.
responses:
"200": "CommitList"
"401": "401"
tags:
- Commits
"""
commits = Commit.all(order_by=Commit.timestamp.desc(), limit=500)
return self.serializer.many.dump(commits)
class CommitEntityAPI(ApiEndpoint):
serializer = CommitSerializer()
def _get(self, commit_id):
try:
commit = Commit.one(id=commit_id)
except NotFound:
self.abort_404_not_found()
return commit
@maybe_login_required
def get(self, commit_id):
"""
---
description: Get a commit.
responses:
"200": "CommitEntity"
"401": "401"
"404": "404"
parameters:
- name: commit_id
in: path
schema:
type: string
tags:
- Commits
"""
commit = self._get(commit_id)
return self.serializer.one.dump(commit)
commit_entity_view = CommitEntityAPI.as_view("commit")
commit_list_view = CommitListAPI.as_view("commits")
rule(
"/commits/<commit_id>/",
view_func=commit_entity_view,
methods=["GET"],
)
rule(
"/commits/",
view_func=commit_list_view,
methods=["GET"],
)
| 23.362319 | 73 | 0.584367 | [
"MIT"
] | Christian8491/conbench | conbench/api/commits.py | 1,612 | Python |
from django.apps import AppConfig
class Events(AppConfig):
name = 'events'
| 13.5 | 33 | 0.728395 | [
"MIT"
] | DeanORourke1996/haco | HacoWeb/haco/events/apps.py | 81 | Python |
############################################################################
# examples/multi_webcamera/host/test_module/__init__.py
#
# Copyright 2019, 2020 Sony Semiconductor Solutions Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name NuttX nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
from TestServer import TestServer
| 49.777778 | 76 | 0.717634 | [
"Apache-2.0"
] | Curly386/spresense | examples/multi_webcamera/host/test_module/__init__.py | 1,792 | Python |
import io
import logging
import os
import json
import time
import boto3
import botocore
from markov.utils import log_and_exit, Logger, get_boto_config, \
SIMAPP_EVENT_ERROR_CODE_500, SIMAPP_EVENT_ERROR_CODE_400, \
SIMAPP_S3_DATA_STORE_EXCEPTION
LOG = Logger(__name__, logging.INFO).get_logger()
# The amount of time for the sim app to wait for sagemaker to produce
# the ip
SAGEMAKER_WAIT_TIME = 1200 # 20 minutes
class SageS3Client():
def __init__(self, bucket=None, s3_prefix=None, aws_region=None, s3_endpoint_url=None):
self.aws_region = aws_region
self.bucket = bucket
self.s3_prefix = s3_prefix
self.s3_endpoint_url = s3_endpoint_url
self.config_key = os.path.normpath(s3_prefix + "/ip/ip.json")
self.hyperparameters_key = os.path.normpath(s3_prefix + "/ip/hyperparameters.json")
self.done_file_key = os.path.normpath(s3_prefix + "/ip/done")
self.model_checkpoints_prefix = os.path.normpath(s3_prefix + "/model/") + "/"
LOG.info("Initializing SageS3Client...")
def get_client(self):
session = boto3.session.Session()
return session.client('s3', region_name=self.aws_region, endpoint_url=self.s3_endpoint_url, config=get_boto_config())
def _get_s3_key(self, key):
return os.path.normpath(self.model_checkpoints_prefix + "/" + key)
def write_ip_config(self, ip_address):
try:
s3_client = self.get_client()
data = {"IP": ip_address}
json_blob = json.dumps(data)
file_handle = io.BytesIO(json_blob.encode())
file_handle_done = io.BytesIO(b'done')
s3_client.upload_fileobj(file_handle, self.bucket, self.config_key)
s3_client.upload_fileobj(file_handle_done, self.bucket, self.done_file_key)
except botocore.exceptions.ClientError:
log_and_exit("Write ip config failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Write ip config failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def upload_hyperparameters(self, hyperparams_json):
try:
s3_client = self.get_client()
file_handle = io.BytesIO(hyperparams_json.encode())
s3_client.upload_fileobj(file_handle, self.bucket, self.hyperparameters_key)
except botocore.exceptions.ClientError:
log_and_exit("Hyperparameters failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Hyperparameters failed to upload",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def get_ip(self):
s3_client = self.get_client()
time_elapsed = 0
try:
# Wait for sagemaker to produce the redis ip
while time_elapsed < SAGEMAKER_WAIT_TIME:
response = s3_client.list_objects(Bucket=self.bucket, Prefix=self.done_file_key)
if "Contents" in response:
break
time.sleep(1)
time_elapsed += 1
if time_elapsed % 5 == 0:
LOG.info("Waiting for SageMaker Redis server IP: Time elapsed: %s seconds",
time_elapsed)
if time_elapsed >= SAGEMAKER_WAIT_TIME:
log_and_exit("Timed out while attempting to retrieve the Redis IP",
SIMAPP_S3_DATA_STORE_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)
# Download the ip file
s3_client.download_file(self.bucket, self.config_key, 'ip.json')
with open("ip.json") as file:
ip_file = json.load(file)["IP"]
return ip_file
except botocore.exceptions.ClientError:
log_and_exit("Unable to retrieve redis ip",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to retrieve redis ip",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def download_file(self, s3_key, local_path):
s3_client = self.get_client()
try:
s3_client.download_file(self.bucket, s3_key, local_path)
return True
except botocore.exceptions.ClientError as err:
# It is possible that the file isn't there in which case we should
# return fasle and let the client decide the next action
if err.response['Error']['Code'] == "404":
return False
else:
log_and_exit("Unable to download file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to download file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def upload_file(self, s3_key, local_path):
s3_client = self.get_client()
try:
s3_client.upload_file(Filename=local_path,
Bucket=self.bucket,
Key=s3_key)
return True
except botocore.exceptions.ClientError:
log_and_exit("Unable to upload file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except Exception:
log_and_exit("Unable to upload file",
SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
| 44.589552 | 125 | 0.604017 | [
"MIT"
] | Ending2015a/deepracer-local | src/rl_coach_2020_v2/src/markov/s3_client.py | 5,975 | Python |
"""Components for use in `CycleGroup`. For details, see `CycleGroup`."""
from __future__ import division, print_function
from six.moves import range
import numpy as np
import scipy.sparse as sparse
import unittest
from openmdao.core.explicitcomponent import ExplicitComponent
PSI = 1.
_vec_terms = {}
def _compute_vector_terms(system_size):
# Try/Except pattern is much faster than if key in ... if the key is present (which it will be
# outside of the first invocation).
try:
return _vec_terms[system_size]
except KeyError:
u = np.zeros(system_size)
u[[0, -1]] = np.sqrt(2)/2
v = np.zeros(system_size)
v[1:-1] = 1 / np.sqrt(system_size - 2)
cross_terms = np.outer(v, u) - np.outer(u, v)
same_terms = np.outer(u, u) + np.outer(v, v)
_vec_terms[system_size] = u, v, cross_terms, same_terms
return u, v, cross_terms, same_terms
def _compute_A(system_size, theta):
u, v, cross_terms, same_terms = _compute_vector_terms(system_size)
return (np.eye(system_size)
+ np.sin(theta) * cross_terms
+ (np.cos(theta) - 1) * same_terms)
def _compute_dA(system_size, theta):
u, v, cross_terms, same_terms = _compute_vector_terms(system_size)
return np.cos(theta) * cross_terms - np.sin(theta) * same_terms
def array_idx(i, var_size):
return slice(i * var_size, (i + 1) * var_size)
class ExplicitCycleComp(ExplicitComponent):
def _inputs_to_vector(self, inputs):
var_shape = self.metadata['var_shape']
num_var = self.metadata['num_var']
size = np.prod(var_shape)
x = np.zeros(num_var * size)
for i in range(num_var):
x_i = inputs[self._cycle_names['x'].format(i)].flat
x[size * i:size * (i + 1)] = x_i
return x
def _vector_to_outputs(self, vec, outputs):
var_shape = self.metadata['var_shape']
num_var = self.metadata['num_var']
size = np.prod(var_shape)
for i in range(num_var):
y_i = vec[size * i:size * (i + 1)].reshape(var_shape)
outputs[self._cycle_names['y'].format(i)] = y_i
def __str__(self):
return 'Explicit Cycle Component'
def initialize(self):
self.metadata.declare('jacobian_type', default='matvec',
values=['matvec', 'dense', 'sparse-coo', 'sparse-csr',
'sparse-csc'],
desc='method of assembling derivatives')
self.metadata.declare('partial_type', default='array',
values=['array', 'sparse', 'aij'],
desc='type of partial derivatives')
self.metadata.declare('num_var', type_=int, default=1,
desc='Number of variables per component')
self.metadata.declare('var_shape', type_=tuple, default=(3,),
desc='Shape of each variable')
self.metadata.declare('index', type_=int,
desc='Index of the component. Used for testing implicit connections')
self.metadata.declare('connection_type', type_=str, default='explicit',
values=['explicit', 'implicit'],
desc='How to connect variables.')
self.metadata.declare('finite_difference', default=False,
type_=bool,
desc='If the derivatives should be finite differenced.')
self.metadata.declare('num_comp', type_=int, default=2,
desc='Total number of components')
self.angle_param = 'theta'
self._cycle_names = {}
def _init_parameterized(self):
self.num_var = self.metadata['num_var']
self.var_shape = self.metadata['var_shape']
self.size = self.num_var * np.prod(self.var_shape)
if self.metadata['jacobian_type'] == 'matvec':
self.compute_jacvec_product = self.jacvec_product
if self.metadata['connection_type'] == 'implicit':
idx = self.metadata['index']
self._cycle_names['x'] = 'x_{}_{{}}'.format(idx)
self._cycle_names['y'] = 'x_{}_{{}}'.format(idx + 1)
self._cycle_names['theta'] = 'theta_{}'.format(idx)
self._cycle_names['theta_out'] = 'theta_{}'.format(idx + 1)
num_var = self.metadata['num_var']
self._cycle_promotes_in = [self._cycle_names['x'].format(i) for i in range(num_var)]
self._cycle_promotes_out = [self._cycle_names['y'].format(i) for i in range(num_var)]
self._cycle_promotes_in.append(self._cycle_names['theta'])
self._cycle_promotes_out.append(self._cycle_names['theta_out'])
else:
self._cycle_names['x'] = 'x_{}'
self._cycle_names['y'] = 'y_{}'
self._cycle_names['theta'] = 'theta'
self._cycle_names['theta_out'] = 'theta_out'
self._cycle_promotes_in = self._cycle_promotes_out = []
def setup(self):
for i in range(self.num_var):
self.add_input(self._cycle_names['x'].format(i), shape=self.var_shape)
self.add_output(self._cycle_names['y'].format(i), shape=self.var_shape)
self.add_input(self._cycle_names['theta'], val=1.)
self.add_output(self._cycle_names['theta_out'], shape=(1,))
# Setup partials
pd_type = self.metadata['partial_type']
if self.metadata['finite_difference']:
if self.metadata['jacobian_type'] == 'matvec':
raise unittest.SkipTest('not testing FD and matvec')
if pd_type != 'array':
raise unittest.SkipTest('only dense FD supported')
self.declare_partials('*', '*', method='fd')
elif self.metadata['jacobian_type'] != 'matvec' and pd_type != 'array':
num_var = self.num_var
var_shape = self.var_shape
var_size = np.prod(var_shape)
A = np.ones((self.size, self.size))
dA_x = np.ones((self.size, 1))
dtheta = np.array([[1.]])
angle_param = self._cycle_names[self.angle_param]
# if our subjacs are not dense, we must assign values here that
# match their type (data values don't matter, only structure).
# Otherwise, we assume they are dense and we'll get an error later
# when we assign a subjac with a type that doesn't match.
for out_idx in range(num_var):
out_var = self._cycle_names['y'].format(out_idx)
for in_idx in range(num_var):
in_var = self._cycle_names['x'].format(in_idx)
Aij = A[array_idx(out_idx, var_size), array_idx(in_idx, var_size)]
self.declare_partials(out_var, in_var,
**self._array2kwargs(Aij, pd_type))
self.declare_partials(out_var, angle_param,
**self._array2kwargs(dA_x[array_idx(out_idx, var_size)],
pd_type))
self.declare_partials(self._cycle_names['theta_out'], self._cycle_names['theta'],
**self._array2kwargs(dtheta, pd_type))
else:
# Declare everything
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
A = _compute_A(self.size, theta)
x = self._inputs_to_vector(inputs)
y = A.dot(x)
self._vector_to_outputs(y, outputs)
outputs[self._cycle_names['theta_out']] = theta
def jacvec_product(self, inputs, d_inputs, d_outputs, mode):
angle_param = self._cycle_names[self.angle_param]
x = self._inputs_to_vector(inputs)
angle = inputs[angle_param]
A = _compute_A(self.size, angle)
dA = _compute_dA(self.size, angle)
var_shape = self.metadata['var_shape']
var_size = np.prod(var_shape)
num_var = self.metadata['num_var']
x_name = self._cycle_names['x']
y_name = self._cycle_names['y']
theta_name = self._cycle_names['theta']
theta_out_name = self._cycle_names['theta_out']
if mode == 'fwd':
for j in range(num_var):
x_j = x_name.format(j)
if x_j in d_inputs:
dx = d_inputs[x_j].flat[:]
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
Aij = A[array_idx(i, var_size), array_idx(j, var_size)]
d_outputs[y_i] += Aij.dot(dx).reshape(var_shape)
if theta_name in d_inputs and theta_out_name in d_outputs:
dtheta = d_inputs[theta_name]
d_outputs[theta_out_name] += dtheta
if angle_param in d_inputs:
dangle = d_inputs[angle_param]
dy_dangle = (dA.dot(x)) * dangle
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
d_outputs[y_i] += dy_dangle[array_idx(i, var_size)].reshape(var_shape)
elif mode == 'rev':
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
dy_i = d_outputs[y_i].flat[:]
for j in range(num_var):
x_j = x_name.format(j)
if x_j in d_inputs:
Aij = A[array_idx(i, var_size), array_idx(j, var_size)]
d_inputs[x_j] += Aij.T.dot(dy_i).reshape(var_shape)
if angle_param in d_inputs:
dAij = dA[array_idx(i, var_size), array_idx(j, var_size)]
x_j_vec = inputs[x_j].flat[:]
d_inputs[angle_param] += x_j_vec.T.dot(dAij.T.dot(dy_i))
if theta_out_name in d_outputs and theta_name in d_inputs:
dtheta_out = d_outputs[theta_out_name]
d_inputs[theta_name] += dtheta_out
def make_jacobian_entry(self, A, pd_type):
if pd_type == 'aij':
return self.make_sub_jacobian(A, pd_type)[0]
return self.make_sub_jacobian(A, pd_type)
def make_sub_jacobian(self, A, pd_type):
if pd_type == 'array':
return A
if pd_type == 'sparse':
return sparse.csr_matrix(A)
if pd_type == 'aij':
data = []
rows = []
cols = []
A = np.atleast_2d(A)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if np.abs(A[i, j]) > 1e-15:
data.append(A[i, j])
rows.append(i)
cols.append(j)
return [np.array(data), np.array(rows), np.array(cols)]
raise ValueError('Unknown partial_type: {}'.format(pd_type))
def _array2kwargs(self, arr, pd_type):
jac = self.make_sub_jacobian(arr, pd_type)
if pd_type == 'aij':
return {'val': jac[0], 'rows': jac[1], 'cols': jac[2]}
else:
return {'val': jac}
def compute_partials(self, inputs, partials):
if self.metadata['jacobian_type'] != 'matvec' and not self.metadata['finite_difference']:
angle_param = self._cycle_names[self.angle_param]
angle = inputs[angle_param]
num_var = self.num_var
var_shape = self.var_shape
var_size = np.prod(var_shape)
x = self._inputs_to_vector(inputs)
size = self.size
A = _compute_A(size, angle)
dA = _compute_dA(size, angle)
dA_x = np.atleast_2d(dA.dot(x)).T
pd_type = self.metadata['partial_type']
dtheta = np.array([[1.]])
y_name = self._cycle_names['y']
x_name = self._cycle_names['x']
for out_idx in range(num_var):
out_var = y_name.format(out_idx)
for in_idx in range(num_var):
in_var = x_name.format(in_idx)
Aij = A[array_idx(out_idx, var_size), array_idx(in_idx, var_size)]
J_y_x = self.make_jacobian_entry(Aij, pd_type)
J_y_angle = self.make_jacobian_entry(dA_x[array_idx(out_idx, var_size)],
pd_type)
partials[out_var, in_var] = J_y_x
partials[out_var, angle_param] = J_y_angle
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
partials[theta_out, theta] = self.make_jacobian_entry(dtheta, pd_type)
class ExplicitFirstComp(ExplicitCycleComp):
def __str__(self):
return 'Explicit Cycle Component - First'
def setup(self):
self.add_input('psi', val=1.)
self.angle_param = 'psi'
self._cycle_names['psi'] = 'psi'
super(ExplicitFirstComp, self).setup()
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
psi = inputs[self._cycle_names['psi']]
A = _compute_A(self.size, psi)
y = A.dot(np.ones(self.size))
self._vector_to_outputs(y, outputs)
outputs[self._cycle_names['theta_out']] = theta
class ExplicitLastComp(ExplicitFirstComp):
def __str__(self):
return 'Explicit Cycle Component - Last'
def setup(self):
super(ExplicitLastComp, self).setup()
self.add_output('x_norm2', shape=(1,))
self._n = 1
# Setup partials
pd_type = self.metadata['partial_type']
if self.metadata['jacobian_type'] != 'matvec' and pd_type != 'array':
x = np.ones(self.var_shape)
for i in range(self.metadata['num_var']):
in_var = self._cycle_names['x'].format(i)
self.declare_partials('x_norm2', in_var,
**self._array2kwargs(x.flatten(), pd_type))
self.declare_partials(self._cycle_names['theta_out'], self._cycle_names['psi'],
**self._array2kwargs(np.array([1.]), pd_type))
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
psi = inputs[self._cycle_names['psi']]
k = self.metadata['num_comp']
x = self._inputs_to_vector(inputs)
outputs['x_norm2'] = 0.5*np.dot(x,x)
# theta_out has 1/2 the error as theta does to the correct angle.
outputs[self._cycle_names['theta_out']] = theta / 2 + (self._n * 2 * np.pi - psi) / (2 * k - 2)
def compute_partials(self, inputs, partials):
if self.metadata['jacobian_type'] != 'matvec' and not self.metadata['finite_difference']:
pd_type = self.metadata['partial_type']
for i in range(self.metadata['num_var']):
in_var = self._cycle_names['x'].format(i)
partials['x_norm2', in_var] = self.make_jacobian_entry(inputs[in_var].flat[:],
pd_type)
k = self.metadata['num_comp']
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
partials[theta_out, theta] = self.make_jacobian_entry(np.array([.5]), pd_type)
partials[theta_out, self._cycle_names['psi']] = \
self.make_jacobian_entry(np.array([-1/(2*k-2)]), pd_type)
def jacvec_product(self, inputs, d_inputs, d_outputs, mode):
if self.metadata['jacobian_type'] == 'matvec':
k = self.metadata['num_comp']
num_var = self.metadata['num_var']
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
psi = self._cycle_names['psi']
if mode == 'fwd':
if theta_out in d_outputs:
if theta in d_inputs:
d_outputs[theta_out] += 0.5 * d_inputs[theta]
if psi in d_inputs:
d_outputs[theta_out] += -d_inputs[psi] / (2 * k - 2)
for i in range(num_var):
in_var = self._cycle_names['x'].format(i)
if in_var in d_inputs and 'x_norm2' in d_outputs:
d_outputs['x_norm2'] += np.dot(inputs[in_var].flat, d_inputs[in_var].flat)
elif mode == 'rev':
if 'x_norm2' in d_outputs:
dxnorm = d_outputs['x_norm2']
for i in range(num_var):
x_i_name = self._cycle_names['x'].format(i)
if x_i_name in d_inputs:
d_inputs[x_i_name] += inputs[x_i_name] * dxnorm
if theta_out in d_outputs:
dtheta_out = d_outputs[theta_out]
if theta in d_inputs:
d_inputs[theta] += .5*dtheta_out
if psi in d_inputs:
d_inputs[psi] += -dtheta_out/(2*k-2)
| 41.802885 | 103 | 0.552444 | [
"Apache-2.0"
] | hwangjt/blue | openmdao/test_suite/components/cycle_comps.py | 17,390 | Python |
import tensorflow as tf
from TransformerNet.layers import Encoder, Decoder
def Decoder_test(*args, **kwargs):
inputs = tf.random.uniform((64, 62), dtype=tf.int64, minval=0, maxval=200) # (batch_size, input_seq_len)
enc_output = Encoder(num_layers=2, d_model=512, num_heads=8,
d_ff=2048, input_vocab_size=8500,
maximum_position_encoding=10000)(inputs, False, None)
target = tf.random.uniform((64, 26), dtype=tf.int64, minval=0, maxval=200) # (batch_size, target_seq_len)
sample_decoder = Decoder(*args, **kwargs)
output, attn = sample_decoder(target,
enc_output=enc_output,
training=False,
look_ahead_mask=None,
padding_mask=None)
print(output.shape) # (batch_size, target_seq_len, d_model)
print(attn['decoder_layer2_attention2'].shape) # (batch_size, target_seq_len, input_seq_len)
if __name__ == "__main__":
Decoder_test(num_layers=2, d_model=512, num_heads=8,
d_ff=2048, target_vocab_size=8000,
maximum_position_encoding=5000)
| 45.111111 | 111 | 0.602627 | [
"MIT"
] | TeaKatz/Models_Corpus | TransformerNet/layers/Decoder_test.py | 1,218 | Python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
oo_option lookup plugin for openshift-ansible
Usage:
- debug:
msg: "{{ lookup('oo_option', '<key>') | default('<default_value>', True) }}"
This returns, by order of priority:
* if it exists, the `cli_<key>` ansible variable. This variable is set by `bin/cluster --option <key>=<value> …`
* if it exists, the envirnoment variable named `<key>`
* if none of the above conditions are met, empty string is returned
'''
import os
# pylint: disable=no-name-in-module,import-error,unused-argument,unused-variable,super-init-not-called,too-few-public-methods,missing-docstring
try:
# ansible-2.0
from ansible.plugins.lookup import LookupBase
except ImportError:
# ansible-1.9.x
class LookupBase(object):
def __init__(self, basedir=None, runner=None, **kwargs):
self.runner = runner
self.basedir = self.runner.basedir
def get_basedir(self, variables):
return self.basedir
# Reason: disable too-few-public-methods because the `run` method is the only
# one required by the Ansible API
# Status: permanently disabled
# pylint: disable=too-few-public-methods
class LookupModule(LookupBase):
''' oo_option lookup plugin main class '''
# Reason: disable unused-argument because Ansible is calling us with many
# parameters we are not interested in.
# The lookup plugins of Ansible have this kwargs “catch-all” parameter
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
def __init__(self, basedir=None, **kwargs):
''' Constructor '''
self.basedir = basedir
# Reason: disable unused-argument because Ansible is calling us with many
# parameters we are not interested in.
# The lookup plugins of Ansible have this kwargs “catch-all” parameter
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
def run(self, terms, variables, **kwargs):
''' Main execution path '''
ret = []
for term in terms:
option_name = term.split()[0]
cli_key = 'cli_' + option_name
if 'vars' in variables and cli_key in variables['vars']:
ret.append(variables['vars'][cli_key])
elif option_name in os.environ:
ret.append(os.environ[option_name])
else:
ret.append('')
return ret
| 33.688312 | 143 | 0.653045 | [
"Apache-2.0"
] | Acidburn0zzz/openshift-ansible | lookup_plugins/oo_option.py | 2,604 | Python |
def show_function(protein):
from sabueso.entity.protein import get_function_card
return card
| 14.857143 | 56 | 0.769231 | [
"MIT"
] | dprada/Sabueso | sabueso/protein/show_function.py | 104 | Python |
from setuptools import setup
setup(name='mws',
version='0.2',
description='Multi window sender',
url='https://github.com/TheWorldOfCode/MWS',
author='TheWorldOfCode',
author_email='dannj75@gmail.com',
install_requires=[
"python-daemon>=2.2.4",
"python-xlib>=0.27"
],
license='BSD',
packages=['mws'],
zip_safe=False)
| 24.411765 | 50 | 0.561446 | [
"BSD-3-Clause"
] | TheWorldOfCode/MWS | packages/setup.py | 415 | Python |
from opentrons import protocol_api, types
metadata = {
"protocolName": "Testosaur Version 3",
"author": "Opentrons <engineering@opentrons.com>",
"description": 'A variant on "Dinosaur" for testing with Protocol API v3',
"source": "Opentrons Repository",
"apiLevel": "3.0",
}
def run(ctx: protocol_api.ProtocolContext) -> None:
tip_rack = ctx.load_labware("opentrons_96_tiprack_300ul", 8)
source = ctx.load_labware("nest_12_reservoir_15ml", 1)
dest = ctx.load_labware("corning_96_wellplate_360ul_flat", 2)
pipette = ctx.load_instrument("p300_single_gen2", types.Mount.RIGHT, [])
for i in range(4):
pipette.pick_up_tip(tip_rack.wells()[i])
pipette.aspirate(50, source.wells_by_name()["A1"])
pipette.dispense(50, dest.wells()[i])
pipette.drop_tip(tip_rack.wells()[i])
| 35.083333 | 78 | 0.690024 | [
"Apache-2.0"
] | Opentrons/labware | api/tests/opentrons/data/testosaur_v3.py | 842 | Python |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import os
import itertools
from datetime import datetime
from dateutil.relativedelta import relativedelta
import subprocess
from ..train_utils import TSCVSplitter
class ParameterSweeper:
"""
The function of this class is currently replaced by HyperDrive.
But let's keep it to preserve the work already done, and also
in case we need more flexibility than what HyperDrive provides.
"""
def __init__(self, config):
self.work_directory = config["WorkDirectory"]
data_config = config["DataParams"]
self.data_path = data_config["DataPath"]
if "DataFile" in data_config:
data_file = data_config["DataFile"]
self.data_full_path = os.path.join(self.work_directory, self.data_path, data_file)
else:
self.data_full_path = os.path.join(self.work_directory, self.data_path)
parameters_config = config["Parameters"]
self.parameter_name_list = [n for n, _ in parameters_config.items()]
parameter_value_list = [p for _, p in parameters_config.items()]
self.parameter_combinations = list(itertools.product(*parameter_value_list))
features_config = config["Features"]
self.feature_selection_mode = features_config["FeatureSelectionMode"]
if self.feature_selection_mode == "Default":
# In default mode, simply iterate through each feature set in
# FeatureList
self.feature_list = features_config["FeatureList"]
else:
# Placeholder for more advanced feature selection strategy
pass
def sweep_parameters_script(self, script_config, cv_setting_file, params_setting_file):
script_command = script_config["ScriptCommand"]
script = os.path.join(self.work_directory, script_config["Script"])
task_list = []
parameter_sets = {}
count = 0
for f in self.feature_list:
for p in self.parameter_combinations:
count += 1
task = " ".join(
[
script_command,
script,
"-d",
self.data_full_path,
"-p",
params_setting_file,
"-c",
cv_setting_file,
"-s",
str(count),
]
)
task_list.append(task)
parameter_dict = {}
for n, v in zip(self.parameter_name_list, p):
parameter_dict[n] = v
parameter_sets[count] = {
"feature_set": f,
"features": self.feature_list[f],
"parameters": parameter_dict,
}
with open(params_setting_file, "w") as fp:
json.dump(parameter_sets, fp, indent=True)
# Run tasks in parallel
processes = []
for t in task_list:
process = subprocess.Popen(t, shell=True)
processes.append(process)
# Collect statuses
output = [p.wait() for p in processes]
print(output)
def sweep_parameters(self):
# placeholder for parameter sweeping in python
pass
def sweep_parameters_batch_ai(self):
# placeholder for parameter sweeping using batch ai
pass
def main(config_file):
with open(config_file) as f:
config = json.load(f)
datetime_format = config["DatetimeFormat"]
work_directory = config["WorkDirectory"]
cv_setting_file = os.path.join(work_directory, "cv_settings.json")
# parameter_setting_file = os.path.join(work_directory,
# 'parameter_settings.json')
cv = TSCVSplitter(config)
# This part adjusts the cv settings due to the specific problem setup
# of GEFCom2017. Different forecasting setups may require different
# adjustments. Most setups should not require any adjustment.
for k, v in cv.train_validation_split.items():
round_dict = {}
# Training data ends on 12/31, used to forecast Feb. and Mar.
train_end = datetime.strptime(v["train_range"][1], datetime_format)
# Jan. validation range
validation_start_1 = datetime.strptime(v["validation_range"][0], datetime_format)
validation_end_1 = validation_start_1 + relativedelta(months=1, hours=-1)
# Training data ends on 11/30, used to forecast Jan. and Feb.
train_end_prev = datetime.strftime(train_end + relativedelta(months=-1), datetime_format)
# Training data ends on 01/31, used to forecast Mar. and Apr.
train_end_next = datetime.strftime(train_end + relativedelta(months=1), datetime_format)
# Feb. validation range
validation_start_2 = validation_start_1 + relativedelta(months=1)
validation_end_2 = validation_start_2 + relativedelta(months=1, hours=-1)
# Mar. validation range
validation_start_3 = validation_start_1 + relativedelta(months=2)
validation_end_3 = validation_start_3 + relativedelta(months=1, hours=-1)
# Apr. validation range
validation_start_4 = validation_start_1 + relativedelta(months=3)
validation_end_4 = validation_start_4 + relativedelta(months=1, hours=-1)
validation_start_1 = datetime.strftime(validation_start_1, datetime_format)
validation_end_1 = datetime.strftime(validation_end_1, datetime_format)
validation_start_2 = datetime.strftime(validation_start_2, datetime_format)
validation_end_2 = datetime.strftime(validation_end_2, datetime_format)
validation_start_3 = datetime.strftime(validation_start_3, datetime_format)
validation_end_3 = datetime.strftime(validation_end_3, datetime_format)
validation_start_4 = datetime.strftime(validation_start_4, datetime_format)
validation_end_4 = datetime.strftime(validation_end_4, datetime_format)
round_dict[1] = {
"train_range": [v["train_range"][0], train_end_prev],
"validation_range": [validation_start_1, validation_end_1],
}
round_dict[2] = {
"train_range": [v["train_range"][0], train_end_prev],
"validation_range": [validation_start_2, validation_end_2],
}
round_dict[3] = {
"train_range": [v["train_range"][0], v["train_range"][1]],
"validation_range": [validation_start_2, validation_end_2],
}
round_dict[4] = {
"train_range": [v["train_range"][0], v["train_range"][1]],
"validation_range": [validation_start_3, validation_end_3],
}
round_dict[5] = {
"train_range": [v["train_range"][0], train_end_next],
"validation_range": [validation_start_3, validation_end_3],
}
round_dict[6] = {
"train_range": [v["train_range"][0], train_end_next],
"validation_range": [validation_start_4, validation_end_4],
}
cv.train_validation_split[k] = round_dict
with open(cv_setting_file, "w") as fp:
json.dump(cv.train_validation_split, fp, indent=True)
#
# ps = ParameterSweeper(config)
#
# script_config = config['ScriptParams']
# ps.sweep_parameters_script(script_config, cv_setting_file,
# parameter_setting_file)
if __name__ == "__main__":
main("backtest_config.json")
| 38.771574 | 97 | 0.626997 | [
"MIT"
] | Grkrish2002/Time-Series-Forecasting | contrib/tsperf/cross_validation/cross_validation.py | 7,638 | Python |
# coding: utf-8
# In[ ]:
import os
import re
import tarfile
import requests
from pugnlp.futil import path_status, find_files
# In[ ]:
# From the nlpia package for downloading data too big for the repo
BIG_URLS = {
'w2v': (
'https://www.dropbox.com/s/965dir4dje0hfi4/GoogleNews-vectors-negative300.bin.gz?dl=1',
1647046227,
),
'slang': (
'https://www.dropbox.com/s/43c22018fbfzypd/slang.csv.gz?dl=1',
117633024,
),
'tweets': (
'https://www.dropbox.com/s/5gpb43c494mc8p0/tweets.csv.gz?dl=1',
311725313,
),
'lsa_tweets': (
'https://www.dropbox.com/s/rpjt0d060t4n1mr/lsa_tweets_5589798_2003588x200.tar.gz?dl=1',
3112841563, # 3112841312,
),
'imdb': (
'https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1',
3112841563, # 3112841312,
),
}
# In[ ]:
# These functions are part of the nlpia package which can be pip installed and run from there.
def dropbox_basename(url):
filename = os.path.basename(url)
match = re.findall(r'\?dl=[0-9]$', filename)
if match:
return filename[:-len(match[0])]
return filename
def download_file(url, data_path='.', filename=None, size=None, chunk_size=4096, verbose=True):
"""Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https"""
if filename is None:
filename = dropbox_basename(url)
file_path = os.path.join(data_path, filename)
if url.endswith('?dl=0'):
url = url[:-1] + '1' # noninteractive download
if verbose:
tqdm_prog = tqdm
print('requesting URL: {}'.format(url))
else:
tqdm_prog = no_tqdm
r = requests.get(url, stream=True, allow_redirects=True)
size = r.headers.get('Content-Length', None) if size is None else size
print('remote size: {}'.format(size))
stat = path_status(file_path)
print('local size: {}'.format(stat.get('size', None)))
if stat['type'] == 'file' and stat['size'] == size: # TODO: check md5 or get the right size of remote file
r.close()
return file_path
print('Downloading to {}'.format(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive chunks
f.write(chunk)
r.close()
return file_path
def untar(fname):
if fname.endswith("tar.gz"):
with tarfile.open(fname) as tf:
tf.extractall()
else:
print("Not a tar.gz file: {}".format(fname))
# In[ ]:
# UNCOMMENT these 2 lines if you haven't already download the word2vec model and the imdb dataset
# download_file(BIG_URLS['w2v'][0])
# untar(download_file(BIG_URLS['imdb'][0]))
# In[ ]:
maxlen = 400
batch_size = 32
embedding_dims = 300
epochs = 2
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
import glob
import os
from random import shuffle
def pre_process_data(filepath):
"""
This is dependent on your training data source but we will try to generalize it as best as possible.
"""
positive_path = os.path.join(filepath, 'pos')
negative_path = os.path.join(filepath, 'neg')
pos_label = 1
neg_label = 0
dataset = []
for filename in glob.glob(os.path.join(positive_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((pos_label, f.read()))
for filename in glob.glob(os.path.join(negative_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((neg_label, f.read()))
shuffle(dataset)
return dataset
# In[ ]:
from nltk.tokenize import TreebankWordTokenizer
from gensim.models.keyedvectors import KeyedVectors
word_vectors = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True, limit=200000)
def tokenize_and_vectorize(dataset):
tokenizer = TreebankWordTokenizer()
vectorized_data = []
expected = []
for sample in dataset:
tokens = tokenizer.tokenize(sample[1])
sample_vecs = []
for token in tokens:
try:
sample_vecs.append(word_vectors[token])
except KeyError:
pass # No matching token in the Google w2v vocab
vectorized_data.append(sample_vecs)
return vectorized_data
# In[ ]:
def collect_expected(dataset):
""" Peel of the target values from the dataset """
expected = []
for sample in dataset:
expected.append(sample[0])
return expected
# In[ ]:
def pad_trunc(data, maxlen):
""" For a given dataset pad with zero vectors or truncate to maxlen """
new_data = []
# Create a vector of 0's the length of our word vectors
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = sample
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
# In[ ]:
import numpy as np
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
maxlen = 400
batch_size = 32 # How many samples to show the net before backpropogating the error and updating the weights
embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet
epochs = 2
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model1.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights1.h5")
print('Model saved.')
# In[ ]:
from keras.models import model_from_json
with open("lstm_model1.json", "r") as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights('lstm_weights1.h5')
# In[ ]:
sample_1 = "I'm hate that the dismal weather that had me down for so long, when will it break! Ugh, when does happiness return? The sun is blinding and the puffy clouds are too thin. I can't wait for the weekend."
# We pass a dummy value in the first element of the tuple just because our helper expects it from the way processed the initial data. That value won't ever see the network, so it can be whatever.
vec_list = tokenize_and_vectorize([(1, sample_1)])
# Tokenize returns a list of the data (length 1 here)
test_vec_list = pad_trunc(vec_list, maxlen)
test_vec = np.reshape(test_vec_list, (len(test_vec_list), maxlen, embedding_dims))
print("Sample's sentiment, 1 - pos, 2 - neg : {}".format(model.predict_classes(test_vec)))
print("Raw output of sigmoid function: {}".format(model.predict(test_vec)))
# In[ ]:
def test_len(data, maxlen):
total_len = truncated = exact = padded = 0
for sample in data:
total_len += len(sample)
if len(sample) > maxlen:
truncated += 1
elif len(sample) < maxlen:
padded += 1
else:
exact +=1
print('Padded: {}'.format(padded))
print('Equal: {}'.format(exact))
print('Truncated: {}'.format(truncated))
print('Avg length: {}'.format(total_len/len(data)))
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
test_len(vectorized_data, 400)
# In[ ]:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, LSTM
maxlen = 200
batch_size = 32 # How many samples to show the net before backpropagating the error and updating the weights
embedding_dims = 300 # Length of the token vectors we will create for passing into the Convnet
epochs = 2
dataset = pre_process_data('./aclImdb_v1/train')
vectorized_data = tokenize_and_vectorize(dataset)
expected = collect_expected(dataset)
split_point = int(len(vectorized_data)*.8)
x_train = vectorized_data[:split_point]
y_train = expected[:split_point]
x_test = vectorized_data[split_point:]
y_test = expected[split_point:]
x_train = pad_trunc(x_train, maxlen)
x_test = pad_trunc(x_test, maxlen)
x_train = np.reshape(x_train, (len(x_train), maxlen, embedding_dims))
y_train = np.array(y_train)
x_test = np.reshape(x_test, (len(x_test), maxlen, embedding_dims))
y_test = np.array(y_test)
num_neurons = 50
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, embedding_dims)))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("lstm_model7.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("lstm_weights7.h5")
print('Model saved.')
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
# In[ ]:
def avg_len(data):
total_len = 0
for sample in data:
total_len += len(sample[1])
print(total_len/len(data))
print(avg_len(dataset))
# In[ ]:
def clean_data(data):
""" Shift to lower case, replace unknowns with UNK, and listify """
new_data = []
VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; '
for sample in data:
new_sample = []
for char in sample[1].lower(): # Just grab the string, not the label
if char in VALID:
new_sample.append(char)
else:
new_sample.append('UNK')
new_data.append(new_sample)
return new_data
listified_data = clean_data(dataset)
# In[ ]:
def char_pad_trunc(data, maxlen):
""" We truncate to maxlen or add in PAD tokens """
new_dataset = []
for sample in data:
if len(sample) > maxlen:
new_data = sample[:maxlen]
elif len(sample) < maxlen:
pads = maxlen - len(sample)
new_data = sample + ['PAD'] * pads
else:
new_data = sample
new_dataset.append(new_data)
return new_dataset
maxlen = 1500
# In[ ]:
def create_dicts(data):
""" Modified from Keras LSTM example"""
chars = set()
for sample in data:
chars.update(set(sample))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
return char_indices, indices_char
# In[ ]:
import numpy as np
def onehot_encode(dataset, char_indices, maxlen):
"""
One hot encode the tokens
Args:
dataset list of lists of tokens
char_indices dictionary of {key=character, value=index to use encoding vector}
maxlen int Length of each sample
Return:
np array of shape (samples, tokens, encoding length)
"""
X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))
for i, sentence in enumerate(dataset):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
return X
# In[ ]:
dataset = pre_process_data('./aclImdb_v1/train')
expected = collect_expected(dataset)
listified_data = clean_data(dataset)
maxlen = 1500
common_length_data = char_pad_trunc(listified_data, maxlen)
char_indices, indices_char = create_dicts(common_length_data)
encoded_data = onehot_encode(common_length_data, char_indices, maxlen)
# In[ ]:
split_point = int(len(encoded_data)*.8)
x_train = encoded_data[:split_point]
y_train = expected[:split_point]
x_test = encoded_data[split_point:]
y_test = expected[split_point:]
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, Flatten, LSTM
num_neurons = 40
print('Build model...')
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=(maxlen, len(char_indices.keys()))))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile('rmsprop', 'binary_crossentropy', metrics=['accuracy'])
print(model.summary())
# In[ ]:
# In[ ]:
batch_size = 32
epochs = 10
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
model_structure = model.to_json()
with open("char_lstm_model3.json", "w") as json_file:
json_file.write(model_structure)
model.save_weights("char_lstm_weights3.h5")
print('Model saved.')
# In[ ]:
from nltk.corpus import gutenberg
print(gutenberg.fileids())
# In[ ]:
text = ''
for txt in gutenberg.fileids():
if 'shakespeare' in txt:
text += gutenberg.raw(txt).lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# In[ ]:
print(text[:500])
# In[ ]:
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
# In[ ]:
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
print(model.summary())
# In[ ]:
epochs = 6
batch_size = 128
model_structure = model.to_json()
with open("shakes_lstm_model.json", "w") as json_file:
json_file.write(model_structure)
for i in range(5):
model.fit(X, y,
batch_size=batch_size,
epochs=epochs)
model.save_weights("shakes_lstm_weights_{}.h5".format(i+1))
print('Model saved.')
# In[ ]:
### NOT IN CHAPTER, Just to reproduce output
from keras.models import model_from_json
with open('shakes_lstm_model.json', 'r') as f:
model_json = f.read()
model = model_from_json(model_json)
model.load_weights('shakes_lstm_weights_4.h5')
# In[ ]:
import random
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# In[ ]:
import sys
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
# In[ ]:
from keras.models import Sequential
from keras.layers import GRU
model = Sequential()
model.add(GRU(num_neurons, return_sequences=True, input_shape=X[0].shape))
# In[ ]:
from keras.models import Sequential
from keras.layers import LSTM
model = Sequential()
model.add(LSTM(num_neurons, return_sequences=True, input_shape=X[0].shape))
model.add(LSTM(num_neurons_2, return_sequences=True))
| 23.73838 | 215 | 0.669762 | [
"MIT"
] | brusic/nlpia | nlpia/book/examples/ch09.py | 17,875 | Python |
from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd, rip
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset == 'pascal':
train_set = pascal.VOCSegmentation(args, split='train')
val_set = pascal.VOCSegmentation(args, split='val')
if args.use_sbd:
sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'cityscapes':
train_set = cityscapes.CityscapesSegmentation(args, split='train')
val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'coco':
train_set = coco.COCOSegmentation(args, split='train')
val_set = coco.COCOSegmentation(args, split='val')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'rip':
classes = {'full': 7, 'level1': 2, 'level2': 3, 'level3': 5}
import os
from mypath import Path
data_root = Path.db_root_dir(args.dataset)
root = os.path.join(data_root, 'RipTrainingAllData')
patches, level = args.rip_mode.split('-')
if patches == 'patches':
patches = 'COCOJSONPatches'
elif patches == 'patches_v1':
patches = 'COCOJSONPatches_v1'
else:
patches = 'COCOJSONs'
# patches = 'COCOJSONPatches' if patches == 'patches' else 'COCOJSONs'
train_ann_file =os.path.join(data_root, patches, level, 'cv_5_fold', 'train_1.json')
val_ann_file =os.path.join(data_root, patches, level, 'cv_5_fold', 'val_1.json')
train_set = rip.RIPSegmentation(args, split='train', root=root, ann_file=train_ann_file)
val_set = rip.RIPSegmentation(args, split='val', root=root, ann_file=val_ann_file)
num_classes = classes[level]
# NOTE: drop_last=True here to avoid situation when batch_size=1 which causes BatchNorm2d errors
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_classes
else:
raise NotImplementedError
| 48.211268 | 112 | 0.684487 | [
"MIT"
] | dzwallkilled/pytorch-deeplab-xception | dataloaders/__init__.py | 3,423 | Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""This module provides different kinds of serialization methods for XML event
streams.
"""
from itertools import chain
import re
from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind
from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI, COMMENT, XML_NAMESPACE
__all__ = ['encode', 'get_serializer', 'DocType', 'XMLSerializer',
'XHTMLSerializer', 'HTMLSerializer', 'TextSerializer']
__docformat__ = 'restructuredtext en'
def encode(iterator, method='xml', encoding=None, out=None):
"""Encode serializer output into a string.
:param iterator: the iterator returned from serializing a stream (basically
any iterator that yields unicode objects)
:param method: the serialization method; determines how characters not
representable in the specified encoding are treated
:param encoding: how the output string should be encoded; if set to `None`,
this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:since: version 0.4.1
:note: Changed in 0.5: added the `out` parameter
"""
if encoding is not None:
errors = 'replace'
if method != 'text' and not isinstance(method, TextSerializer):
errors = 'xmlcharrefreplace'
_encode = lambda string: string.encode(encoding, errors)
else:
_encode = lambda string: string
if out is None:
return _encode(''.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk))
def get_serializer(method='xml', **kwargs):
"""Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1
"""
if isinstance(method, basestring):
method = {'xml': XMLSerializer,
'xhtml': XHTMLSerializer,
'html': HTMLSerializer,
'text': TextSerializer}[method.lower()]
return method(**kwargs)
def _prepare_cache(use_cache=True):
"""Prepare a private token serialization cache.
:param use_cache: boolean indicating whether a real cache should
be used or not. If not, the returned functions
are no-ops.
:return: emit and get functions, for storing and retrieving
serialized values from the cache.
"""
cache = {}
if use_cache:
def _emit(kind, input, output):
cache[kind, input] = output
return output
_get = cache.get
else:
def _emit(kind, input, output):
return output
def _get(key):
pass
return _emit, _get, cache
class DocType(object):
"""Defines a number of commonly used DOCTYPE declarations as constants."""
HTML_STRICT = (
'html', '-//W3C//DTD HTML 4.01//EN',
'http://www.w3.org/TR/html4/strict.dtd'
)
HTML_TRANSITIONAL = (
'html', '-//W3C//DTD HTML 4.01 Transitional//EN',
'http://www.w3.org/TR/html4/loose.dtd'
)
HTML_FRAMESET = (
'html', '-//W3C//DTD HTML 4.01 Frameset//EN',
'http://www.w3.org/TR/html4/frameset.dtd'
)
HTML = HTML_STRICT
HTML5 = ('html', None, None)
XHTML_STRICT = (
'html', '-//W3C//DTD XHTML 1.0 Strict//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'
)
XHTML_TRANSITIONAL = (
'html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
)
XHTML_FRAMESET = (
'html', '-//W3C//DTD XHTML 1.0 Frameset//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd'
)
XHTML = XHTML_STRICT
XHTML11 = (
'html', '-//W3C//DTD XHTML 1.1//EN',
'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'
)
SVG_FULL = (
'svg', '-//W3C//DTD SVG 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'
)
SVG_BASIC = (
'svg', '-//W3C//DTD SVG Basic 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd'
)
SVG_TINY = (
'svg', '-//W3C//DTD SVG Tiny 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd'
)
SVG = SVG_FULL
@classmethod
def get(cls, name):
"""Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``
declaration for the specified name.
The following names are recognized in this version:
* "html" or "html-strict" for the HTML 4.01 strict DTD
* "html-transitional" for the HTML 4.01 transitional DTD
* "html-frameset" for the HTML 4.01 frameset DTD
* "html5" for the ``DOCTYPE`` proposed for HTML5
* "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD
* "xhtml-transitional" for the XHTML 1.0 transitional DTD
* "xhtml-frameset" for the XHTML 1.0 frameset DTD
* "xhtml11" for the XHTML 1.1 DTD
* "svg" or "svg-full" for the SVG 1.1 DTD
* "svg-basic" for the SVG Basic 1.1 DTD
* "svg-tiny" for the SVG Tiny 1.1 DTD
:param name: the name of the ``DOCTYPE``
:return: the ``(name, pubid, sysid)`` tuple for the requested
``DOCTYPE``, or ``None`` if the name is not recognized
:since: version 0.4.1
"""
return {
'html': cls.HTML, 'html-strict': cls.HTML_STRICT,
'html-transitional': DocType.HTML_TRANSITIONAL,
'html-frameset': DocType.HTML_FRAMESET,
'html5': cls.HTML5,
'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT,
'xhtml-transitional': cls.XHTML_TRANSITIONAL,
'xhtml-frameset': cls.XHTML_FRAMESET,
'xhtml11': cls.XHTML11,
'svg': cls.SVG, 'svg-full': cls.SVG_FULL,
'svg-basic': cls.SVG_BASIC,
'svg-tiny': cls.SVG_TINY
}.get(name.lower())
class XMLSerializer(object):
"""Produces XML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XMLSerializer()(elem.generate())))
<div><a href="foo"/><br/><hr noshade="True"/></div>
"""
_PRESERVE_SPACE = frozenset()
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, cache=True):
"""Initialize the XML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output, or the name of a DOCTYPE as
defined in `DocType.get`
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.4.2: The `doctype` parameter can now be a string.
:note: Changed in 0.6: The `cache` parameter was added
"""
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = cache
def _prepare_cache(self):
return _prepare_cache(self.cache)[:2]
def __call__(self, stream):
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
buf += [' ', attr, '="', escape(value), '"']
buf.append(kind is EMPTY and '/>' or '>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is XML_DECL and not have_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class XHTMLSerializer(XMLSerializer):
"""Produces XHTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XHTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br /><hr noshade="noshade" /></div>
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
_BOOLEAN_ATTRS = frozenset(['selected', 'checked', 'compact', 'declare',
'defer', 'disabled', 'ismap', 'multiple',
'nohref', 'noresize', 'noshade', 'nowrap'])
_PRESERVE_SPACE = frozenset([
QName('pre'), QName('http://www.w3.org/1999/xhtml}pre'),
QName('textarea'), QName('http://www.w3.org/1999/xhtml}textarea')
])
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, drop_xml_decl=True, cache=True):
super(XHTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
namespace_prefixes = namespace_prefixes or {}
namespace_prefixes['http://www.w3.org/1999/xhtml'] = ''
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.drop_xml_decl = drop_xml_decl
self.cache = cache
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
drop_xml_decl = self.drop_xml_decl
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
value = attr
elif attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr == 'xml:space':
continue
buf += [' ', attr, '="', escape(value), '"']
if kind is EMPTY:
if tag in empty_elems:
buf.append(' />')
else:
buf.append('></%s>' % tag)
else:
buf.append('>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is XML_DECL and not have_decl and not drop_xml_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class HTMLSerializer(XHTMLSerializer):
"""Produces HTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(HTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br><hr noshade></div>
"""
_NOESCAPE_ELEMS = frozenset([
QName('script'), QName('http://www.w3.org/1999/xhtml}script'),
QName('style'), QName('http://www.w3.org/1999/xhtml}style')
])
def __init__(self, doctype=None, strip_whitespace=True, cache=True):
"""Initialize the HTML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.6: The `cache` parameter was added
"""
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE,
self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
}, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
noescape_elems = self._NOESCAPE_ELEMS
have_doctype = False
noescape = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, _ in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
output = _get((kind, data))
if output is not None:
yield output
if (kind is START or kind is EMPTY) \
and data[0] in noescape_elems:
noescape = True
elif kind is END:
noescape = False
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
if value:
buf += [' ', attr]
elif ':' in attr:
if attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr != 'xmlns':
buf += [' ', attr, '="', escape(value), '"']
buf.append('>')
if kind is EMPTY:
if tag not in empty_elems:
buf.append('</%s>' % tag)
yield _emit(kind, data, Markup(''.join(buf)))
if tag in noescape_elems:
noescape = True
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
noescape = False
elif kind is TEXT:
if noescape:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class TextSerializer(object):
"""Produces plain text from an event stream.
Only text events are included in the output. Unlike the other serializer,
special XML characters are not escaped:
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a('<Hello!>', href='foo'), tag.br)
>>> print(elem)
<div><a href="foo"><Hello!></a><br/></div>
>>> print(''.join(TextSerializer()(elem.generate())))
<Hello!>
If text events contain literal markup (instances of the `Markup` class),
that markup is by default passed through unchanged:
>>> elem = tag.div(Markup('<a href="foo">Hello & Bye!</a><br/>'))
>>> print(elem.generate().render(TextSerializer, encoding=None))
<a href="foo">Hello & Bye!</a><br/>
You can use the ``strip_markup`` to change this behavior, so that tags and
entities are stripped from the output (or in the case of entities,
replaced with the equivalent character):
>>> print(elem.generate().render(TextSerializer, strip_markup=True,
... encoding=None))
Hello & Bye!
"""
def __init__(self, strip_markup=False):
"""Create the serializer.
:param strip_markup: whether markup (tags and encoded characters) found
in the text should be removed
"""
self.strip_markup = strip_markup
def __call__(self, stream):
strip_markup = self.strip_markup
for event in stream:
if event[0] is TEXT:
data = event[1]
if strip_markup and type(data) is Markup:
data = data.striptags().stripentities()
yield unicode(data)
class EmptyTagFilter(object):
"""Combines `START` and `STOP` events into `EMPTY` events for elements that
have no contents.
"""
EMPTY = StreamEventKind('EMPTY')
def __call__(self, stream):
prev = (None, None, None)
for ev in stream:
if prev[0] is START:
if ev[0] is END:
prev = EMPTY, prev[1], prev[2]
yield prev
continue
else:
yield prev
if ev[0] is not START:
yield ev
prev = ev
EMPTY = EmptyTagFilter.EMPTY
class NamespaceFlattener(object):
r"""Output stream filter that removes namespace information from the stream,
instead adding namespace attributes and prefixes as needed.
:param prefixes: optional mapping of namespace URIs to prefixes
>>> from genshi.input import XML
>>> xml = XML('''<doc xmlns="NS1" xmlns:two="NS2">
... <two:item/>
... </doc>''')
>>> for kind, data, pos in NamespaceFlattener()(xml):
... print('%s %r' % (kind, data))
START (u'doc', Attrs([('xmlns', u'NS1'), (u'xmlns:two', u'NS2')]))
TEXT u'\n '
START (u'two:item', Attrs())
END u'two:item'
TEXT u'\n'
END u'doc'
"""
def __init__(self, prefixes=None, cache=True):
self.prefixes = {XML_NAMESPACE.uri: 'xml'}
if prefixes is not None:
self.prefixes.update(prefixes)
self.cache = cache
def __call__(self, stream):
prefixes = dict([(v, [k]) for k, v in self.prefixes.items()])
namespaces = {XML_NAMESPACE.uri: ['xml']}
_emit, _get, cache = _prepare_cache(self.cache)
def _push_ns(prefix, uri):
namespaces.setdefault(uri, []).append(prefix)
prefixes.setdefault(prefix, []).append(uri)
cache.clear()
def _pop_ns(prefix):
uris = prefixes.get(prefix)
uri = uris.pop()
if not uris:
del prefixes[prefix]
if uri not in uris or uri != uris[-1]:
uri_prefixes = namespaces[uri]
uri_prefixes.pop()
if not uri_prefixes:
del namespaces[uri]
cache.clear()
return uri
ns_attrs = []
_push_ns_attr = ns_attrs.append
def _make_ns_attr(prefix, uri):
return 'xmlns%s' % (prefix and ':%s' % prefix or ''), uri
def _gen_prefix():
val = 0
while 1:
val += 1
yield 'ns%d' % val
_gen_prefix = _gen_prefix().next
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield kind, data, pos
continue
output = _get((kind, data))
if output is not None:
yield kind, output, pos
elif kind is START or kind is EMPTY:
tag, attrs = data
tagname = tag.localname
tagns = tag.namespace
if tagns:
if tagns in namespaces:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
else:
_push_ns_attr(('xmlns', tagns))
_push_ns('', tagns)
new_attrs = []
for attr, value in attrs:
attrname = attr.localname
attrns = attr.namespace
if attrns:
if attrns not in namespaces:
prefix = _gen_prefix()
_push_ns(prefix, attrns)
_push_ns_attr(('xmlns:%s' % prefix, attrns))
else:
prefix = namespaces[attrns][-1]
if prefix:
attrname = '%s:%s' % (prefix, attrname)
new_attrs.append((attrname, value))
data = _emit(kind, data, (tagname, Attrs(ns_attrs + new_attrs)))
yield kind, data, pos
del ns_attrs[:]
elif kind is END:
tagname = data.localname
tagns = data.namespace
if tagns:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
yield kind, _emit(kind, data, tagname), pos
elif kind is START_NS:
prefix, uri = data
if uri not in namespaces:
prefix = prefixes.get(uri, [prefix])[-1]
_push_ns_attr(_make_ns_attr(prefix, uri))
_push_ns(prefix, uri)
elif kind is END_NS:
if data in prefixes:
uri = _pop_ns(data)
if ns_attrs:
attr = _make_ns_attr(data, uri)
if attr in ns_attrs:
ns_attrs.remove(attr)
else:
yield kind, data, pos
class WhitespaceFilter(object):
"""A filter that removes extraneous ignorable white space from the
stream.
"""
def __init__(self, preserve=None, noescape=None):
"""Initialize the filter.
:param preserve: a set or sequence of tag names for which white-space
should be preserved
:param noescape: a set or sequence of tag names for which text content
should not be escaped
The `noescape` set is expected to refer to elements that cannot contain
further child elements (such as ``<style>`` or ``<script>`` in HTML
documents).
"""
if preserve is None:
preserve = []
self.preserve = frozenset(preserve)
if noescape is None:
noescape = []
self.noescape = frozenset(noescape)
def __call__(self, stream, ctxt=None, space=XML_NAMESPACE['space'],
trim_trailing_space=re.compile('[ \t]+(?=\n)').sub,
collapse_lines=re.compile('\n{2,}').sub):
mjoin = Markup('').join
preserve_elems = self.preserve
preserve = 0
noescape_elems = self.noescape
noescape = False
textbuf = []
push_text = textbuf.append
pop_text = textbuf.pop
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
if noescape:
data = Markup(data)
push_text(data)
else:
if textbuf:
if len(textbuf) > 1:
text = mjoin(textbuf, escape_quotes=False)
del textbuf[:]
else:
text = escape(pop_text(), quotes=False)
if not preserve:
text = collapse_lines('\n', trim_trailing_space('', text))
yield TEXT, Markup(text), pos
if kind is START:
tag, attrs = data
if preserve or (tag in preserve_elems or
attrs.get(space) == 'preserve'):
preserve += 1
if not noescape and tag in noescape_elems:
noescape = True
elif kind is END:
noescape = False
if preserve:
preserve -= 1
elif kind is START_CDATA:
noescape = True
elif kind is END_CDATA:
noescape = False
if kind:
yield kind, data, pos
class DocTypeInserter(object):
"""A filter that inserts the DOCTYPE declaration in the correct location,
after the XML declaration.
"""
def __init__(self, doctype):
"""Initialize the filter.
:param doctype: DOCTYPE as a string or DocType object.
"""
if isinstance(doctype, basestring):
doctype = DocType.get(doctype)
self.doctype_event = (DOCTYPE, doctype, (None, -1, -1))
def __call__(self, stream):
doctype_inserted = False
for kind, data, pos in stream:
if not doctype_inserted:
doctype_inserted = True
if kind is XML_DECL:
yield (kind, data, pos)
yield self.doctype_event
continue
yield self.doctype_event
yield (kind, data, pos)
if not doctype_inserted:
yield self.doctype_event
| 37.134204 | 82 | 0.521604 | [
"Apache-2.0"
] | 262877348/Data | Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | 31,267 | Python |
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_insaneDA import \
nnUNetTrainerV2_insaneDA
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_MMS(nnUNetTrainerV2_insaneDA):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["p_rot"] = 0.7
self.data_aug_params["p_eldef"] = 0.1
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params['gamma_range'] = (0.5, 1.6)
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
"""def run_training(self):
from batchviewer import view_batch
a = next(self.tr_gen)
view_batch(a['data'])
import IPython;IPython.embed()"""
| 43.639344 | 117 | 0.657776 | [
"Apache-2.0"
] | ADVasculatureProject/nnUNet | nnunet/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py | 2,662 | Python |
import numpy as np # import numpy
with open("data/day4.txt") as f:
drawing_numbers = f.readline()
board_lst = []
board_line = []
counter = 0
for line in f:
if line != '\n':
board_line.append(line.strip())
if len(board_line) == 5:
board_lst.append(board_line)
board_line = []
drawing_numbers = drawing_numbers.strip().split(',')
def create_board(board_lst):
board_array = []
for item in board_lst:
board = [x for x in item.split(' ') if x.strip() != '']
board_array.append(board)
board_array = np.array(board_array)
board_array = board_array.astype(float)
return board_array
def check_winning(board_lst, number_lst):
winning_condition = {
'Answer': 0,
'counter': 625
}
for item in board_lst:
board = create_board(item)
counter=0
for number in number_lst:
number = float(number)
counter += 1
if number in board:
result = np.where(board == number)
board[int(result[0])][int(result[1])] = np.nan
if np.all(np.isnan(board), axis=1).any() or np.all(np.isnan(board), axis=0).any():
if counter < winning_condition['counter']:
winning_condition['counter'] = counter
winning_condition['Answer'] = number * np.nansum(board)
print('The Answer is:', winning_condition)
check_winning(board_lst, drawing_numbers) | 31.72 | 94 | 0.552333 | [
"MIT"
] | rogall-e/advent_of_code | 2021/day4_part1.py | 1,586 | Python |
import torch
from torch_geometric.utils import k_hop_subgraph, subgraph
def test_subgraph():
edge_index = torch.tensor([
[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6],
[1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5],
])
edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
idx = torch.tensor([3, 4, 5], dtype=torch.long)
mask = torch.tensor([0, 0, 0, 1, 1, 1, 0], dtype=torch.bool)
indices = [3, 4, 5]
for subset in [idx, mask, indices]:
out = subgraph(subset, edge_index, edge_attr)
assert out[0].tolist() == [[3, 4, 4, 5], [4, 3, 5, 4]]
assert out[1].tolist() == [7, 8, 9, 10]
out = subgraph(subset, edge_index, edge_attr, relabel_nodes=True)
assert out[0].tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]]
assert out[1].tolist() == [7, 8, 9, 10]
def test_k_hop_subgraph():
edge_index = torch.tensor([
[0, 1, 2, 3, 4, 5],
[2, 2, 4, 4, 6, 6],
])
subset, edge_index, mapping, edge_mask = k_hop_subgraph(
6, 2, edge_index, relabel_nodes=True)
assert subset.tolist() == [2, 3, 4, 5, 6]
assert edge_index.tolist() == [[0, 1, 2, 3], [2, 2, 4, 4]]
assert mapping.tolist() == [4]
assert edge_mask.tolist() == [False, False, True, True, True, True]
edge_index = torch.tensor([
[1, 2, 4, 5],
[0, 1, 5, 6],
])
subset, edge_index, mapping, edge_mask = k_hop_subgraph([0, 6], 2,
edge_index,
relabel_nodes=True)
assert subset.tolist() == [0, 1, 2, 4, 5, 6]
assert edge_index.tolist() == [[1, 2, 3, 4], [0, 1, 4, 5]]
assert mapping.tolist() == [0, 5]
assert edge_mask.tolist() == [True, True, True, True]
| 33.886792 | 79 | 0.505011 | [
"MIT"
] | LingxiaoShawn/pytorch_geometric | test/utils/test_subgraph.py | 1,796 | Python |
# -*- coding: utf-8 -*-
# Begin CVS Header
# $Source: /Volumes/Home/Users/shoops/cvs/copasi_dev/copasi/bindings/python/unittests/Test_CMoiety.py,v $
# $Revision: 1.11 $
# $Name: $
# $Author: shoops $
# $Date: 2010/07/16 18:55:59 $
# End CVS Header
# Copyright (C) 2010 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CMoiety(unittest.TestCase):
def setUp(self):
self.datamodel=COPASI.CCopasiRootContainer.addDatamodel()
self.model=self.datamodel.getModel()
self.model.createCompartment("comp1",1.0)
self.model.createCompartment("comp2",2.0)
m1=self.model.createMetabolite("A","comp1")
m2=self.model.createMetabolite("B","comp1")
m3=self.model.createMetabolite("C","comp1")
m4=self.model.createMetabolite("D","comp1")
m5=self.model.createMetabolite("E","comp2")
m6=self.model.createMetabolite("F","comp2")
m7=self.model.createMetabolite("G","comp2")
r=self.model.createReaction("react1")
r.addSubstrate(m1.getKey())
r.addProduct(m2.getKey())
r=self.model.createReaction("react2")
r.addSubstrate(m3.getKey())
r.addProduct(m5.getKey())
r=self.model.createReaction("react3")
r.addSubstrate(m6.getKey())
r.addProduct(m2.getKey())
self.model.compileIfNecessary()
self.moiety=self.model.getMoiety(0)
def test_getDescription(self):
desc=self.moiety.getDescription(self.model)
self.assert_(type(desc)==StringType)
def test_dependentNumber(self):
v=self.moiety.dependentNumber()
self.assert_(type(v)==FloatType)
def test_getNumber(self):
v=self.moiety.getNumber()
self.assert_(type(v)==FloatType)
def test_getKey(self):
key=self.moiety.getKey()
self.assert_(type(key)==StringType)
def test_getDependentNumber(self):
v=self.moiety.getDependentNumber()
self.assert_(type(v)==FloatType)
def suite():
tests=[
'test_getDescription'
,'test_getDependentNumber'
,'test_getNumber'
,'test_getKey'
,'test_getDependentNumber'
]
return unittest.TestSuite(map(Test_CMoiety,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
| 30.119048 | 108 | 0.696838 | [
"Artistic-2.0"
] | bmoreau/COPASI | copasi/bindings/python/unittests/Test_CMoiety.py | 2,530 | Python |
# build_features.py
# This module holds utility classes and functions that creates and manipulates input features
# This module also holds the various input transformers
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
def correlation_columns(dataset: pd.DataFrame, target_column: str, k: float=0.5):
"""
Columns that are correlated to the target point
Parameters
----------
dataset: pd.DataFrame
The pandas dataframe
target_column: str
The target column to calculate correlation against
k: float
The correlation cuttoff point; defaults to -0.5 and 0.5.
The values passed in represents the negative and positive cutofff
Returns
-------
columns: list
A list of columns that are correlated to the target column based on the cutoff point
"""
corr = np.abs(dataset.corr()[target_column])
corr_sorted = corr.sort_values(ascending=False)
columns = [col for col, value in zip(corr_sorted.index, corr_sorted.values) if value >= k and col != target_column]
return columns
class ColumnExtractor(BaseEstimator, TransformerMixin):
"""Columns Extractor based on correlation to the output label"""
def __init__(self, columns):
print(columns)
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self.columns]
if __name__ == '__main__':
correlation_columns(pd.read_csv('././data/raw/creditcard.csv'), 'Class', k=0.3) | 29.660377 | 119 | 0.692112 | [
"MIT"
] | samie-hash/data-science-repo | credit-card-fraud/src/features/build_features.py | 1,572 | Python |
# Generated by Django 3.0 on 2019-12-12 08:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('team', '0002_auto_20191210_2330'),
('members', '0001_initial'),
]
operations = [
migrations.AddField(model_name='member', name='team', field=models.ForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, to='team.Team'), preserve_default=False,),
]
| 29.176471 | 197 | 0.693548 | [
"MIT"
] | AroraShreshth/officialWebsite | backend/members/migrations/0002_member_team.py | 496 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 32.721025 | 100 | 0.527 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | XpressAI/spark | python/pyspark/pandas/generic.py | 104,770 | Python |
import importlib
import sys
import argparse
from multi_sample_factory.algorithms.utils.algo_utils import ExperimentStatus
from multi_sample_factory.runner.run_ngc import add_ngc_args
from multi_sample_factory.runner.run_slurm import add_slurm_args
from multi_sample_factory.utils.utils import log
def runner_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir', default='./train_dir', type=str, help='Directory for sub-experiments')
parser.add_argument('--run', default=None, type=str,
help='Name of the python module that describes the run, e.g. sample_factory.runner.runs.doom_battle_hybrid')
parser.add_argument('--runner', default='processes', choices=['processes', 'slurm'])
parser.add_argument('--runner', default='processes', choices=['processes', 'slurm', 'ngc'])
parser.add_argument('--pause_between', default=10, type=int, help='Pause in seconds between processes')
parser.add_argument('--num_gpus', default=1, type=int, help='How many GPUs to use')
parser.add_argument('--experiments_per_gpu', default=-1, type=int, help='How many experiments can we squeeze on a single GPU (-1 for not altering CUDA_VISIBLE_DEVICES at all)')
parser.add_argument('--max_parallel', default=4, type=int, help='Maximum simultaneous experiments')
parser.add_argument('--experiment_suffix', default='', type=str, help='Append this to the name of the experiment dir')
parser = add_slurm_args(parser)
parser = add_ngc_args(parser)
return parser
def parse_args():
args = runner_argparser().parse_args(sys.argv[1:])
return args
def main():
args = parse_args()
try:
# assuming we're given the full name of the module
run_module = importlib.import_module(f'{args.run}')
except ImportError:
try:
run_module = importlib.import_module(f'multi_sample_factory.runner.runs.{args.run}')
except ImportError:
log.error('Could not import the run module')
return ExperimentStatus.FAILURE
run_description = run_module.RUN_DESCRIPTION
run_description.experiment_suffix = args.experiment_suffix
if args.runner == 'processes':
from multi_sample_factory.runner.run_processes import run
run(run_description, args)
elif args.runner == 'slurm':
from multi_sample_factory.runner.run_slurm import run_slurm
run_slurm(run_description, args)
elif args.runner == 'ngc':
from multi_sample_factory.runner.run_ngc import run_ngc
run_ngc(run_description, args)
return ExperimentStatus.SUCCESS
if __name__ == '__main__':
sys.exit(main())
| 40.545455 | 180 | 0.721226 | [
"MIT"
] | PG642/multi-sample-factory | multi_sample_factory/runner/run.py | 2,676 | Python |
import base64
def decode_img(img_string):
img_data = base64.b64decode(img_string)
filename = "temp_img.jpg"
with open(filename, "wb") as f:
f.write(img_data)
| 20 | 43 | 0.677778 | [
"MIT"
] | Eye-Remocon/Face_Recognition | example/decode_image.py | 180 | Python |
"""
Showcases *LLAB(l:c)* colour appearance model computations.
"""
import numpy as np
import colour
from colour.appearance.llab import CAM_ReferenceSpecification_LLAB
from colour.utilities import message_box
message_box('"LLAB(l:c)" Colour Appearance Model Computations')
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_0 = np.array([95.05, 100.00, 108.88])
Y_b = 20.0
L = 318.31
surround = colour.VIEWING_CONDITIONS_LLAB["ref_average_4_minus"]
message_box(
f'Converting to the "LLAB(l:c)" colour appearance model specification '
f"using given parameters:\n\n"
f"\tXYZ: {XYZ}\n"
f"\tXYZ_0: {XYZ_0}\n"
f"\tY_b: {Y_b}\n"
f"\tL: {L}\n"
f"\tsurround: {surround}"
)
specification = colour.XYZ_to_LLAB(XYZ, XYZ_0, Y_b, L, surround)
print(specification)
print("\n")
message_box(
'Broadcasting the current output "LLAB(l:c)" colour appearance '
"model specification to the reference specification.\n"
"The intent of this reference specification is to provide names "
'as closest as possible to the "Mark D. Fairchild" reference.\n'
"The current output specification is meant to be consistent with "
"the other colour appearance model specification by using same "
"argument names for consistency wherever possible."
)
print(CAM_ReferenceSpecification_LLAB(*specification.values))
| 30.953488 | 76 | 0.728024 | [
"BSD-3-Clause"
] | soma2000-lang/colour | colour/examples/appearance/examples_llab.py | 1,331 | Python |
import re
from mail_app.mail import Mail
from mail_app.mail_processors.abstract_processor import AbstractProcessor
from mail_app.processed_mail import ProcessedMail
class PasswordProcessor(AbstractProcessor):
general_keywords = ["password (reset|request|update|updated)", "(new|reset|change|updated|changed your) password",
"address verification", "(confirm|confirm your|activate your) (registration|account)"]
def __init__(self):
super().__init__()
self.category = "Password"
def process(self, mail):
if self.__general_conditions(mail):
return ProcessedMail(mail.user_id, mail.message_id, mail.from_, self.category, mail.body, mail.time, mail.attachments)
############################################ Conditions ############################################
def __general_conditions(self, mail: Mail):
return (any(re.search(keyword, mail.subject.lower()) for keyword in self.general_keywords) or
any(re.search(keyword, mail.body.lower()) for keyword in self.general_keywords) or
any(re.search(keyword, name.lower()) for name, _ in mail.attachments.items() for keyword in self.general_keywords)) | 48.52 | 130 | 0.666117 | [
"Apache-2.0"
] | teamsaucisse/Data-Mailing | mail_app/mail_processors/password_processor.py | 1,213 | Python |
# stdlib
from typing import Any
from typing import Dict
# syft absolute
import syft as sy
from syft import serialize
from syft.core.io.address import Address
from syft.grid.messages.group_messages import CreateGroupMessage
from syft.grid.messages.group_messages import CreateGroupResponse
from syft.grid.messages.group_messages import DeleteGroupMessage
from syft.grid.messages.group_messages import DeleteGroupResponse
from syft.grid.messages.group_messages import GetGroupMessage
from syft.grid.messages.group_messages import GetGroupResponse
from syft.grid.messages.group_messages import GetGroupsMessage
from syft.grid.messages.group_messages import GetGroupsResponse
from syft.grid.messages.group_messages import UpdateGroupMessage
from syft.grid.messages.group_messages import UpdateGroupResponse
def test_create_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {
"group-name": "Heart diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
}
msg = CreateGroupMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_create_group_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Group Created Successfully!"}
msg = CreateGroupResponse(
address=target,
status_code=200,
content=request_content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {"group_id": "f2a6as5d16fasd"}
msg = DeleteGroupMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_group_response_serde() -> None:
target = Address(name="Alice")
content = {"msg": "Group deleted Successfully!"}
msg = DeleteGroupResponse(
status_code=200,
address=target,
content=content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_update_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
content = {
"group-id": "eqw9e4a5d846",
"group-name": "Brain diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
}
msg = UpdateGroupMessage(
address=target,
content=content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_update_group_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Group updated successfully!"}
msg = UpdateGroupResponse(
address=target,
status_code=200,
content=request_content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
content = {"group-id": "eqw9e4a5d846"}
msg = GetGroupMessage(
address=target,
content=content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_group_response_serde() -> None:
target = Address(name="Alice")
content = {
"group-id": "eqw9e4a5d846",
"group-name": "Heart diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
}
msg = GetGroupResponse(
address=target,
status_code=200,
content=content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_all_groups_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
content: Dict[Any, Any] = {}
msg = GetGroupsMessage(
address=target,
content=content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_all_groups_response_serde() -> None:
target = Address(name="Alice")
request_content = {
"groups": {
"626sadaf631": {
"group-name": "Heart diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
},
"a84ew64wq6e": {
"group-name": "Brain diseases group",
"members": ["user-id5", "user-id7", "user-id9"],
"data": [
{"id": "26463afasd", "permissions": "read"},
{"id": "264613dafeqwe", "permissions": "write"},
{"id": "896632sdfsf", "permissions": "read"},
],
},
}
}
msg = GetGroupsResponse(
address=target,
status_code=200,
content=request_content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
| 27.228137 | 68 | 0.606898 | [
"Apache-2.0"
] | H4LL/PySyft | tests/syft/grid/messages/group_msg_test.py | 7,161 | Python |
import random
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_NO_OP = actions.FUNCTIONS.no_op.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_BUILD_SUPPLY_DEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id
_BUILD_BARRACKS = actions.FUNCTIONS.Build_Barracks_screen.id
_TRAIN_MARINE = actions.FUNCTIONS.Train_Marine_quick.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id
_HARVEST_GATHER = actions.FUNCTIONS.Harvest_Gather_screen.id
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
_PLAYER_ID = features.SCREEN_FEATURES.player_id.index
_PLAYER_SELF = 1
_PLAYER_HOSTILE = 4
_ARMY_SUPPLY = 5
_TERRAN_COMMANDCENTER = 18
_TERRAN_SCV = 45
_TERRAN_SUPPLY_DEPOT = 19
_TERRAN_BARRACKS = 21
_NEUTRAL_MINERAL_FIELD = 341
_NOT_QUEUED = [0]
_QUEUED = [1]
_SELECT_ALL = [2]
DATA_FILE = 'sparse_agent_data'
ACTION_DO_NOTHING = 'donothing'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_BUILD_MARINE,
]
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 16) + '_' + str(mm_y - 16))
# Stolen from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow
class QLearningTable:
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = actions # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
if np.random.uniform() < self.epsilon:
# choose best action
state_action = self.q_table.ix[observation, :]
# some actions have the same value
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
self.check_state_exist(s)
q_predict = self.q_table.ix[s, a]
if s_ != 'terminal':
q_target = r + self.gamma * self.q_table.ix[s_, :].max()
else:
q_target = r # next state is terminal
# update
self.q_table.ix[s, a] += self.lr * (q_target - q_predict)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(
pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state))
class SparseAgent(base_agent.BaseAgent):
def __init__(self):
super(SparseAgent, self).__init__()
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_action = None
self.previous_state = None
self.cc_y = None
self.cc_x = None
self.move_number = 0
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def splitAction(self, action_id):
smart_action = smart_actions[action_id]
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
return (smart_action, x, y)
def step(self, obs):
super(SparseAgent, self).step(obs)
if obs.last():
reward = obs.reward
self.qlearn.learn(str(self.previous_state), self.previous_action, reward, 'terminal')
self.qlearn.q_table.to_pickle(DATA_FILE + '.gz', 'gzip')
self.previous_action = None
self.previous_state = None
self.move_number = 0
return actions.FunctionCall(_NO_OP, [])
unit_type = obs.observation['screen'][_UNIT_TYPE]
if obs.first():
player_y, player_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
self.cc_y, self.cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
cc_y, cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()
cc_count = 1 if cc_y.any() else 0
depot_y, depot_x = (unit_type == _TERRAN_SUPPLY_DEPOT).nonzero()
supply_depot_count = int(round(len(depot_y) / 69))
barracks_y, barracks_x = (unit_type == _TERRAN_BARRACKS).nonzero()
barracks_count = int(round(len(barracks_y) / 137))
if self.move_number == 0:
self.move_number += 1
current_state = np.zeros(8)
current_state[0] = cc_count
current_state[1] = supply_depot_count
current_state[2] = barracks_count
current_state[3] = obs.observation['player'][_ARMY_SUPPLY]
hot_squares = np.zeros(4)
enemy_y, enemy_x = (obs.observation['feature_minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 32))
x = int(math.ceil((enemy_x[i] + 1) / 32))
hot_squares[((y - 1) * 2) + (x - 1)] = 1
if not self.base_top_left:
hot_squares = hot_squares[::-1]
for i in range(0, 4):
current_state[i + 4] = hot_squares[i]
if self.previous_action is not None:
self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))
rl_action = self.qlearn.choose_action(str(current_state))
self.previous_state = current_state
self.previous_action = rl_action
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
unit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
target = [unit_x[i], unit_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_MARINE:
if barracks_y.any():
i = random.randint(0, len(barracks_y) - 1)
target = [barracks_x[i], barracks_y[i]]
return actions.FunctionCall(_SELECT_POINT, [_SELECT_ALL, target])
elif smart_action == ACTION_ATTACK:
if _SELECT_ARMY in obs.observation['available_actions']:
return actions.FunctionCall(_SELECT_ARMY, [_NOT_QUEUED])
elif self.move_number == 1:
self.move_number += 1
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if supply_depot_count < 2 and _BUILD_SUPPLY_DEPOT in obs.observation['available_actions']:
if self.cc_y.any():
if supply_depot_count == 0:
target = self.transformDistance(round(self.cc_x.mean()), -35, round(self.cc_y.mean()), 0)
elif supply_depot_count == 1:
target = self.transformDistance(round(self.cc_x.mean()), -25, round(self.cc_y.mean()), -25)
return actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_BARRACKS:
if barracks_count < 2 and _BUILD_BARRACKS in obs.observation['available_actions']:
if self.cc_y.any():
if barracks_count == 0:
target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), -9)
elif barracks_count == 1:
target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), 12)
return actions.FunctionCall(_BUILD_BARRACKS, [_NOT_QUEUED, target])
elif smart_action == ACTION_BUILD_MARINE:
if _TRAIN_MARINE in obs.observation['available_actions']:
return actions.FunctionCall(_TRAIN_MARINE, [_QUEUED])
elif smart_action == ACTION_ATTACK:
do_it = True
if len(obs.observation['single_select']) > 0 and obs.observation['single_select'][0][0] == _TERRAN_SCV:
do_it = False
if len(obs.observation['multi_select']) > 0 and obs.observation['multi_select'][0][0] == _TERRAN_SCV:
do_it = False
if do_it and _ATTACK_MINIMAP in obs.observation["available_actions"]:
x_offset = random.randint(-1, 1)
y_offset = random.randint(-1, 1)
return actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED,
self.transformLocation(int(x) + (x_offset * 8),
int(y) + (y_offset * 8))])
elif self.move_number == 2:
self.move_number = 0
smart_action, x, y = self.splitAction(self.previous_action)
if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if _HARVEST_GATHER in obs.observation['available_actions']:
unit_y, unit_x = (unit_type == _NEUTRAL_MINERAL_FIELD).nonzero()
if unit_y.any():
i = random.randint(0, len(unit_y) - 1)
m_x = unit_x[i]
m_y = unit_y[i]
target = [int(m_x), int(m_y)]
return actions.FunctionCall(_HARVEST_GATHER, [_QUEUED, target])
return actions.FunctionCall(_NO_OP, []) | 36.503356 | 119 | 0.602041 | [
"Apache-2.0"
] | Tao-Chengyang/PySC2-Tutorial | 7. Using Reward for Agent/reward_agent.py | 10,878 | Python |
"""
# Sheets Account
Read a Google Sheet as if it were are realtime source of transactions
for a GL account. Columns are mapped to attributes. The
assumption is that the sheet maps to a single account, and the
rows are the credit/debits to that account.
Can be used as a plugin, which will write new entries (for reference)
to a file, but also maintain a "live" view of the transactions.
We support most of the sane columns on a sheet:
- date
- narration
- payee
- account
- amount
- currency
- tags
- links
- Anything else, if non-empty cell, gets added as a META
Some things to look at are:
- Multi-currency Support
- Lot support?
- Other Directives: Note, Document, Balance?
- Smarter per-sheet caching of local results
I strongly suggest using "Transfer" accounts for all asset movements between
two accounts both of which are tracked via a Sheet. This simplifies the
"Matching" and allows each side to be reconciled independently.
TODO: Default Account when account column is blank?
"""
# stdlib imports
import logging
import decimal
import pprint
import typing
import datetime
import dateparser
import pathlib
import slugify
# Beancount imports
from beancount.core import data
from coolbeans.utils import safe_plugin, get_setting
from coolbeans.tools.sheets import google_connect, safe_open_sheet
from coolbeans.plugins.accountsync import apply_coolbean_settings
import gspread
STRIP_SYMOLS = '₱$'
DEFAULT_CURRENCY = "USD"
logger = logging.getLogger(__name__)
__plugins__ = ['apply_coolbean_settings', 'remote_entries_plugin']
def clean_slug(slug):
"""Clean a possible Slug string to remove dashes and lower case."""
return slug.replace('-', '').lower()
def coolbean_sheets(entries, context):
"""Given a set of entries, pull out any slugs and add them to the context"""
settings = context.setdefault('coolbean-accounts', {})
# Pull out any 'slug' meta data
for entry in entries:
if isinstance(entry, data.Open):
document = entry.meta.get('document_name', None)
tab = entry.meta.get('document_tab', None)
slug = entry.meta.get('slug', "")
if document and tab and slug:
settings[slug] = {
'account': entry.account,
'document': document,
'tab': tab,
'currencies': entry.currencies
}
else:
if document or tab:
print(f"Skipping {entry.account}: {document}/{tab}/{slug}")
return entries, []
def remote_entries(entries, options_map):
"""
@param entries:
@param options_map:
@return:
"""
errors = []
settings = options_map['coolbeans']
secrets_file = get_setting('google-apis', settings)
connection = google_connect(secrets_file)
new_entries_path = None
new_entries_file = get_setting('new-entries-bean', settings)
if new_entries_file:
new_entries_path = pathlib.Path(new_entries_file)
# Capture the configuration off the Open
remote_accounts = {}
for entry in entries:
if not isinstance(entry, data.Open):
continue
document_name = entry.meta.get('document_name', None)
default_currency = entry.currencies[0] if entry.currencies else DEFAULT_CURRENCY
if document_name:
options = dict(
document_name=document_name,
document_tab=entry.meta.get('document_tab', None),
reverse_amount=entry.meta.get('reverse', False),
default_currency=default_currency,
entry=entry,
entry_file=new_entries_path
)
remote_accounts[entry.account] = options
new_entries = []
for account, options in remote_accounts.items():
try:
new_entries += load_remote_account(
connection=connection,
errors=errors,
account=account,
options=options
)
except Exception as exc:
logger.error(f"while processing {account}", exc_info=exc)
if new_entries and new_entries_path:
from beancount.parser import printer
with new_entries_path.open("w") as stream:
printer.print_entries(new_entries, file=stream)
logger.info(f"Wrote {len(new_entries)} new account(s) to {new_entries_path}.")
return entries+new_entries, errors
remote_entries_plugin = safe_plugin(remote_entries)
ALIASES = {
'narration': ['description', 'notes', 'details', 'memo']
}
def clean_record(record: typing.Dict[str, str]):
"""This is a bit of a hack. But using get_all_records doesn't leave many
options"""
new_record = {}
for k, v in record.items():
k = slugify.slugify(k.lower().strip())
v = str(v)
# Combine multiple narration columns if needed:
for field, names in ALIASES.items():
new_record.setdefault(field, '')
if k in names:
# Add the value to Narration:
new_record[field] += ('. ' if new_record[field] else '') + v
k = None # Clear this Key
break
# Really Ugly hack around embeded currency symbols. Needs Cleanup
if k == 'amount':
v = v.replace(',', '')
for s in STRIP_SYMOLS:
v = v.replace(s, '')
if v and not v[0].isdecimal() and not v[0]=='-':
v = v[1:]
# Pull currency?
# Decimal is fussy
try:
v = decimal.Decimal(v)
except decimal.InvalidOperation:
v = 0
if k:
new_record[k] = v
return new_record
def load_remote_account(
connection: gspread.Client,
errors: list,
account: str,
options: typing.Dict[str, str]
):
"""Try to Load Entries from URL into Account.
options include:
- document_name -- the Actual Google Doc name
- document_tab -- the Tab name on the Doc
- default_currency - the entry currency if None is provided
- reverse_amount - if true, assume positive entries are credits
"""
entries = []
document_name = options['document_name']
document_tab = options.get('document_tab', 0) or 0
default_currency = options['default_currency']
reverse_amount = options.get('reverse_amount', False)
if not document_name:
return
m = -1 if reverse_amount else 1
logger.info(f"Attempting to download entries for {account} from {document_name}.{document_tab}")
workbook = connection.open(document_name)
sheet = None
try:
document_tab = int(document_tab)
sheet = workbook.get_worksheet(document_tab)
except ValueError:
pass
if sheet is None:
sheet = workbook.worksheet(document_tab)
records = sheet.get_all_records()
import re
row = 0
# logger.info(f"Found {len(records)} entries.")
for record in records:
row += 1
record = clean_record(record)
if 'date' not in record or not record['date']:
continue
if 'amount' not in record or not record['amount']:
continue
#if 'account' not in record or not record['account'].strip():
# continue
narration = record.pop('narration', None)
payee = record.pop('payee', None)
tagstr = record.pop('tags', '')
tags = set(re.split(r'\W+', tagstr)) if tagstr else set()
date = dateparser.parse(record.pop('date'))
if date:
date = datetime.date(year=date.year, month=date.month, day=date.day)
linkstr = record.pop('links', '')
links = set(re.split(r'\W+', linkstr)) if linkstr else set()
meta = {
'filename': str(options['entry_file']),
'lineno': 0,
'document-sheet-row': f"{document_name}/{document_tab}/{row+1}"
}
amount = decimal.Decimal(record.pop('amount')) * m
currency = record.pop('currency', default_currency)
entry_account = record.pop('account')
for k, v in record.items():
if v:
meta[k] = v
try:
if not entry_account:
errors.append(f"Skipping Record with Blank Account: {meta['document-sheet-row']}")
logger.warning(f"Skipping Record with Blank Account: {meta['document-sheet-row']}")
continue
entry = data.Transaction(
date=date,
narration=narration,
payee=payee,
tags=tags,
meta=meta,
links=links,
flag='*',
postings=[
data.Posting(
account=account,
units=data.Amount(amount, currency),
cost=None,
price=None,
flag='*',
meta={}
),
data.Posting(
account=entry_account,
units=data.Amount(-amount, currency),
cost=None,
price=None,
flag='*',
meta={}
)
]
)
entries.append(entry)
except Exception as exc:
logger.error(f"Error while parsing {record}", exc_info=exc)
errors.append(str(exc))
logger.info(f"Loaded {len(entries)} entries for {account} from {document_name}.{document_tab}")
return entries
| 31.650485 | 100 | 0.58456 | [
"MIT"
] | runarp/coolbeans | src/coolbeans/plugins/sheetsaccount.py | 9,782 | Python |
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.unit.rules import BaseRuleTestCase
from cfnlint.rules.resources.properties.OnlyOne import OnlyOne # pylint: disable=E0401
class TestPropertyOnlyOne(BaseRuleTestCase):
"""Test OnlyOne Property Configuration"""
def setUp(self):
"""Setup"""
super(TestPropertyOnlyOne, self).setUp()
self.collection.register(OnlyOne())
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative(
'test/fixtures/templates/bad/resources/properties/onlyone.yaml', 5)
| 29.92 | 87 | 0.696524 | [
"MIT-0"
] | awkspace/cfn-python-lint | test/unit/rules/resources/properties/test_onlyone.py | 748 | Python |
import unittest
from unittest.mock import patch, call, Mock, MagicMock, mock_open
from botocore.exceptions import ClientError
from ground_truth.src import ground_truth
from common import _utils
required_args = [
'--region', 'us-west-2',
'--role', 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'--job_name', 'test_job',
'--manifest_location', 's3://fake-bucket/manifest',
'--output_location', 's3://fake-bucket/output',
'--task_type', 'fake-task',
'--worker_type', 'fake_worker',
'--ui_template', 's3://fake-bucket/ui_template',
'--title', 'fake-image-labelling-work',
'--description', 'fake job',
'--num_workers_per_object', '1',
'--time_limit', '180',
]
class GroundTruthTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
parser = ground_truth.create_parser()
cls.parser = parser
def test_create_parser(self):
self.assertIsNotNone(self.parser)
def test_main(self):
# Mock out all of utils except parser
ground_truth._utils = MagicMock()
ground_truth._utils.add_default_client_arguments = _utils.add_default_client_arguments
# Set some static returns
ground_truth._utils.get_labeling_job_outputs.return_value = ('s3://fake-bucket/output', 'arn:aws:sagemaker:us-east-1:999999999999:labeling-job')
with patch('builtins.open', mock_open()) as file_open:
ground_truth.main(required_args)
# Check if correct requests were created and triggered
ground_truth._utils.create_labeling_job.assert_called()
ground_truth._utils.wait_for_labeling_job.assert_called()
ground_truth._utils.get_labeling_job_outputs.assert_called()
# Check the file outputs
file_open.assert_has_calls([
call('/tmp/output_manifest_location.txt', 'w'),
call('/tmp/active_learning_model_arn.txt', 'w')
], any_order=True)
file_open().write.assert_has_calls([
call('s3://fake-bucket/output'),
call('arn:aws:sagemaker:us-east-1:999999999999:labeling-job')
], any_order=False)
def test_ground_truth(self):
mock_client = MagicMock()
mock_args = self.parser.parse_args(required_args)
response = _utils.create_labeling_job(mock_client, vars(mock_args))
mock_client.create_labeling_job.assert_called_once_with(
HumanTaskConfig={'WorkteamArn': None, 'UiConfig': {'UiTemplateS3Uri': 's3://fake-bucket/ui_template'},
'PreHumanTaskLambdaArn': '', 'TaskTitle': 'fake-image-labelling-work',
'TaskDescription': 'fake job', 'NumberOfHumanWorkersPerDataObject': 1,
'TaskTimeLimitInSeconds': 180,
'AnnotationConsolidationConfig': {'AnnotationConsolidationLambdaArn': ''}},
InputConfig={'DataSource': {'S3DataSource': {'ManifestS3Uri': 's3://fake-bucket/manifest'}}},
LabelAttributeName='test_job', LabelingJobName='test_job',
OutputConfig={'S3OutputPath': 's3://fake-bucket/output', 'KmsKeyId': ''},
RoleArn='arn:aws:iam::123456789012:user/Development/product_1234/*', Tags=[]
)
self.assertEqual(response, 'test_job')
def test_sagemaker_exception_in_ground_truth(self):
mock_client = MagicMock()
mock_exception = ClientError({"Error": {"Message": "SageMaker broke"}}, "ground_truth")
mock_client.create_labeling_job.side_effect = mock_exception
mock_args = self.parser.parse_args(required_args)
with self.assertRaises(Exception):
_utils.get_labeling_job_outputs(mock_client, vars(mock_args))
def test_wait_for_labeling_job_creation(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.side_effect = [
{"LabelingJobStatus": "InProgress"},
{"LabelingJobStatus": "Completed"},
{"LabelingJobStatus": "Should not be called"}
]
_utils.wait_for_labeling_job(mock_client, 'test-batch', 0)
self.assertEqual(mock_client.describe_labeling_job.call_count, 2)
def test_wait_for_labeling_job_creation(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.side_effect = [
{"LabelingJobStatus": "InProgress"},
{"LabelingJobStatus": "Failed"},
{"LabelingJobStatus": "Should not be called"}
]
with self.assertRaises(Exception):
_utils.wait_for_labeling_job(mock_client, 'test-batch', 0)
self.assertEqual(mock_client.describe_labeling_job.call_count, 2)
def test_get_labeling_job_output_from_job(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.return_value = {"LabelingJobOutput": {
"OutputDatasetS3Uri": "s3://path/",
"FinalActiveLearningModelArn": "fake-arn"
}}
output_manifest, active_learning_model_arn = _utils.get_labeling_job_outputs(mock_client, 'labeling-job', True)
self.assertEqual(output_manifest, 's3://path/')
self.assertEqual(active_learning_model_arn, 'fake-arn')
def test_pass_most_args(self):
required_args = [
'--region', 'us-west-2',
'--role', 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'--job_name', 'test_job',
'--manifest_location', 's3://fake-bucket/manifest',
'--output_location', 's3://fake-bucket/output',
'--task_type', 'image classification',
'--worker_type', 'fake_worker',
'--ui_template', 's3://fake-bucket/ui_template',
'--title', 'fake-image-labelling-work',
'--description', 'fake job',
'--num_workers_per_object', '1',
'--time_limit', '180',
]
arguments = required_args + ['--label_attribute_name', 'fake-attribute',
'--max_human_labeled_objects', '10',
'--max_percent_objects', '50',
'--enable_auto_labeling', 'True',
'--initial_model_arn', 'fake-model-arn',
'--task_availibility', '30',
'--max_concurrent_tasks', '10',
'--task_keywords', 'fake-keyword',
'--worker_type', 'public',
'--no_adult_content', 'True',
'--no_ppi', 'True',
'--tags', '{"fake_key": "fake_value"}'
]
response = _utils.create_labeling_job_request(vars(self.parser.parse_args(arguments)))
print(response)
self.assertEqual(response, {'LabelingJobName': 'test_job',
'LabelAttributeName': 'fake-attribute',
'InputConfig': {'DataSource': {'S3DataSource': {'ManifestS3Uri': 's3://fake-bucket/manifest'}},
'DataAttributes': {'ContentClassifiers': ['FreeOfAdultContent', 'FreeOfPersonallyIdentifiableInformation']}},
'OutputConfig': {'S3OutputPath': 's3://fake-bucket/output', 'KmsKeyId': ''},
'RoleArn': 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'StoppingConditions': {'MaxHumanLabeledObjectCount': 10, 'MaxPercentageOfInputDatasetLabeled': 50},
'LabelingJobAlgorithmsConfig': {'LabelingJobAlgorithmSpecificationArn': 'arn:aws:sagemaker:us-west-2:027400017018:labeling-job-algorithm-specification/image-classification',
'InitialActiveLearningModelArn': 'fake-model-arn',
'LabelingJobResourceConfig': {'VolumeKmsKeyId': ''}},
'HumanTaskConfig': {'WorkteamArn': 'arn:aws:sagemaker:us-west-2:394669845002:workteam/public-crowd/default',
'UiConfig': {'UiTemplateS3Uri': 's3://fake-bucket/ui_template'},
'PreHumanTaskLambdaArn': 'arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass',
'TaskKeywords': ['fake-keyword'],
'TaskTitle': 'fake-image-labelling-work',
'TaskDescription': 'fake job',
'NumberOfHumanWorkersPerDataObject': 1,
'TaskTimeLimitInSeconds': 180,
'TaskAvailabilityLifetimeInSeconds': 30,
'MaxConcurrentTaskCount': 10,
'AnnotationConsolidationConfig': {'AnnotationConsolidationLambdaArn': 'arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass'},
'PublicWorkforceTaskPrice': {'AmountInUsd': {'Dollars': 0, 'Cents': 0, 'TenthFractionsOfACent': 0}}},
'Tags': [{'Key': 'fake_key', 'Value': 'fake_value'}]}
)
| 51.747191 | 205 | 0.594072 | [
"Apache-2.0"
] | Intellicode/pipelines | components/aws/sagemaker/tests/unit_tests/tests/test_ground_truth.py | 9,211 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.