id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
22,251 |
def test_loader():
class TestToolDirectory(object):
def __init__(self):
self.temp_directory = mkdtemp()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
rmtree(self.temp_directory)
def write(self, contents, name="tool.xml"):
open(os.path.join(self.temp_directory, name), "w").write(contents)
def load(self, name="tool.xml", preprocess=True):
path = os.path.join(self.temp_directory, name)
if preprocess:
return load_tool(path)
else:
return parse_xml(path)
# Test simple macro replacement.
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs" />
<macros>
<macro name="inputs">
<inputs />
</macro>
</macros>
</tool>''')
xml = tool_dir.load(preprocess=False)
assert xml.find("inputs") is None
xml = tool_dir.load(preprocess=True)
assert xml.find("inputs") is not None
# Test importing macros from external files
with TestToolDirectory() as tool_dir:
tool_dir.write(SIMPLE_TOOL_WITH_MACRO)
tool_dir.write(SIMPLE_MACRO.substitute(tool_version="2.0"), name="external.xml")
xml = tool_dir.load(preprocess=False)
assert xml.find("inputs") is None
xml = tool_dir.load(preprocess=True)
assert xml.find("inputs") is not None
# Test macros with unnamed yield statements.
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs">
<input name="first_input" />
</expand>
<expand macro="inputs">
<input name="second_input" />
</expand>
<expand macro="inputs">
<input name="third_input" />
</expand>
<macros>
<macro name="inputs">
<expand macro="foo">
<yield />
</expand>
</macro>
<macro name="foo">
<inputs>
<yield />
</inputs>
</macro>
</macros>
</tool>''')
xml = tool_dir.load()
assert xml.findall("inputs")[0].find("input").get("name") == "first_input"
assert xml.findall("inputs")[1].find("input").get("name") == "second_input"
assert xml.findall("inputs")[2].find("input").get("name") == "third_input"
# Test recursive macro applications.
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs">
<input name="first_input" />
<expand macro="second" />
</expand>
<macros>
<macro name="inputs">
<inputs>
<yield />
<input name="third_input" />
</inputs>
</macro>
<macro name="second">
<input name="second_input" />
</macro>
</macros>
</tool>''')
xml = tool_dir.load()
assert xml.find("inputs").findall("input")[0].get("name") == "first_input"
assert xml.find("inputs").findall("input")[1].get("name") == "second_input"
assert xml.find("inputs").findall("input")[2].get("name") == "third_input"
# Test recursive macro applications.
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs">
<input name="first_input" />
<expand macro="second" />
</expand>
<macros>
<macro name="inputs">
<inputs>
<yield />
</inputs>
</macro>
<macro name="second">
<expand macro="second_delegate" />
<input name="third_input" />
</macro>
<macro name="second_delegate">
<input name="second_input" />
</macro>
</macros>
</tool>''')
xml = tool_dir.load()
assert xml.find("inputs").findall("input")[0].get("name") == "first_input"
assert xml.find("inputs").findall("input")[1].get("name") == "second_input"
assert xml.find("inputs").findall("input")[2].get("name") == "third_input"
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool id="issue_647">
<macros>
<macro name="a">
<param name="a1" type="text" value="a1" label="a1"/>
<yield />
</macro>
</macros>
<inputs>
<expand macro="a">
<param name="b" type="text" value="b" label="b" />
</expand>
</inputs>
</tool>''')
xml = tool_dir.load()
assert xml.find("inputs").findall("param")[0].get("name") == "a1"
assert xml.find("inputs").findall("param")[1].get("name") == "b"
# Test <xml> is shortcut for macro type="xml"
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs" />
<macros>
<xml name="inputs">
<inputs />
</xml>
</macros>
</tool>''')
xml = tool_dir.load()
assert xml.find("inputs") is not None
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<command interpreter="python">tool_wrapper.py
#include source=$tool_params
</command>
<macros>
<template name="tool_params">-a 1 -b 2</template>
</macros>
</tool>
''')
xml = tool_dir.load()
params_dict = template_macro_params(xml.getroot())
assert params_dict['tool_params'] == "-a 1 -b 2"
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<macros>
<token name="@CITATION@">The citation.</token>
</macros>
<help>@CITATION@</help>
<another>
<tag />
</another>
</tool>
''')
xml = tool_dir.load()
help_el = xml.find("help")
assert help_el.text == "The citation.", help_el.text
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<macros>
<token name="@TAG_VAL@">The value.</token>
</macros>
<another>
<tag value="@TAG_VAL@" />
</another>
</tool>
''')
xml = tool_dir.load()
tag_el = xml.find("another").find("tag")
value = tag_el.get('value')
assert value == "The value.", value
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<macros>
<token name="@TAG_VAL@"><![CDATA[]]></token>
</macros>
<another>
<tag value="@TAG_VAL@" />
</another>
</tool>
''')
xml = tool_dir.load()
tag_el = xml.find("another").find("tag")
value = tag_el.get('value')
assert value == "", value
# Test macros XML macros with $$ expansions in attributes
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs" bar="hello" />
<expand macro="inputs" bar="my awesome" />
<expand macro="inputs" bar="doggo" />
<macros>
<xml name="inputs" tokens="bar" token_quote="$$">
<inputs type="the type is $$BAR$$" />
</xml>
</macros>
</tool>
''')
xml = tool_dir.load()
input_els = xml.findall("inputs")
assert len(input_els) == 3
assert input_els[0].attrib["type"] == "the type is hello"
assert input_els[1].attrib["type"] == "the type is my awesome"
assert input_els[2].attrib["type"] == "the type is doggo"
# Test macros XML macros with @ expansions in text
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs" foo="hello" />
<expand macro="inputs" foo="world" />
<expand macro="inputs" />
<macros>
<xml name="inputs" token_foo="the_default">
<inputs>@FOO@</inputs>
</xml>
</macros>
</tool>
''')
xml = tool_dir.load()
input_els = xml.findall("inputs")
assert len(input_els) == 3
assert input_els[0].text == "hello"
assert input_els[1].text == "world"
assert input_els[2].text == "the_default"
# Test macros XML macros with @ expansions and recurisve
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs" foo="hello" />
<expand macro="inputs" foo="world" />
<expand macro="inputs" />
<macros>
<xml name="inputs" token_foo="the_default">
<expand macro="real_inputs"><cow>@FOO@</cow></expand>
</xml>
<xml name="real_inputs">
<inputs><yield /></inputs>
</xml>
</macros>
</tool>
''')
xml = tool_dir.load()
input_els = xml.findall("inputs")
assert len(input_els) == 3
print(input_els[0].text)
assert input_els[0].find("cow").text == "hello"
assert input_els[1].find("cow").text == "world"
assert input_els[2].find("cow").text == "the_default"
|
def test_loader():
class TestToolDirectory(object):
def __init__(self):
self.temp_directory = mkdtemp()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
rmtree(self.temp_directory)
def write(self, contents, name="tool.xml"):
open(os.path.join(self.temp_directory, name), "w").write(contents)
def load(self, name="tool.xml", preprocess=True):
path = os.path.join(self.temp_directory, name)
if preprocess:
return load_tool(path)
else:
return parse_xml(path)
# Test simple macro replacement.
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs" />
<macros>
<macro name="inputs">
<inputs />
</macro>
</macros>
</tool>''')
xml = tool_dir.load(preprocess=False)
assert xml.find("inputs") is None
xml = tool_dir.load(preprocess=True)
assert xml.find("inputs") is not None
# Test importing macros from external files
with TestToolDirectory() as tool_dir:
tool_dir.write(SIMPLE_TOOL_WITH_MACRO)
tool_dir.write(SIMPLE_MACRO.substitute(tool_version="2.0"), name="external.xml")
xml = tool_dir.load(preprocess=False)
assert xml.find("inputs") is None
xml = tool_dir.load(preprocess=True)
assert xml.find("inputs") is not None
# Test macros with unnamed yield statements.
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs">
<input name="first_input" />
</expand>
<expand macro="inputs">
<input name="second_input" />
</expand>
<expand macro="inputs">
<input name="third_input" />
</expand>
<macros>
<macro name="inputs">
<expand macro="foo">
<yield />
</expand>
</macro>
<macro name="foo">
<inputs>
<yield />
</inputs>
</macro>
</macros>
</tool>''')
xml = tool_dir.load()
assert xml.findall("inputs")[0].find("input").get("name") == "first_input"
assert xml.findall("inputs")[1].find("input").get("name") == "second_input"
assert xml.findall("inputs")[2].find("input").get("name") == "third_input"
# Test recursive macro applications.
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs">
<input name="first_input" />
<expand macro="second" />
</expand>
<macros>
<macro name="inputs">
<inputs>
<yield />
<input name="third_input" />
</inputs>
</macro>
<macro name="second">
<input name="second_input" />
</macro>
</macros>
</tool>''')
xml = tool_dir.load()
assert xml.find("inputs").findall("input")[0].get("name") == "first_input"
assert xml.find("inputs").findall("input")[1].get("name") == "second_input"
assert xml.find("inputs").findall("input")[2].get("name") == "third_input"
# Test recursive macro applications.
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs">
<input name="first_input" />
<expand macro="second" />
</expand>
<macros>
<macro name="inputs">
<inputs>
<yield />
</inputs>
</macro>
<macro name="second">
<expand macro="second_delegate" />
<input name="third_input" />
</macro>
<macro name="second_delegate">
<input name="second_input" />
</macro>
</macros>
</tool>''')
xml = tool_dir.load()
assert xml.find("inputs").findall("input")[0].get("name") == "first_input"
assert xml.find("inputs").findall("input")[1].get("name") == "second_input"
assert xml.find("inputs").findall("input")[2].get("name") == "third_input"
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool id="issue_647">
<macros>
<macro name="a">
<param name="a1" type="text" value="a1" label="a1"/>
<yield />
</macro>
</macros>
<inputs>
<expand macro="a">
<param name="b" type="text" value="b" label="b" />
</expand>
</inputs>
</tool>''')
xml = tool_dir.load()
assert xml.find("inputs").findall("param")[0].get("name") == "a1"
assert xml.find("inputs").findall("param")[1].get("name") == "b"
# Test <xml> is shortcut for macro type="xml"
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs" />
<macros>
<xml name="inputs">
<inputs />
</xml>
</macros>
</tool>''')
xml = tool_dir.load()
assert xml.find("inputs") is not None
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<command interpreter="python">tool_wrapper.py
#include source=$tool_params
</command>
<macros>
<template name="tool_params">-a 1 -b 2</template>
</macros>
</tool>
''')
xml = tool_dir.load()
params_dict = template_macro_params(xml.getroot())
assert params_dict['tool_params'] == "-a 1 -b 2"
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<macros>
<token name="@CITATION@">The citation.</token>
</macros>
<help>@CITATION@</help>
<another>
<tag />
</another>
</tool>
''')
xml = tool_dir.load()
help_el = xml.find("help")
assert help_el.text == "The citation.", help_el.text
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<macros>
<token name="@TAG_VAL@">The value.</token>
</macros>
<another>
<tag value="@TAG_VAL@" />
</another>
</tool>
''')
xml = tool_dir.load()
tag_el = xml.find("another").find("tag")
value = tag_el.get('value')
assert value == "The value.", value
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<macros>
<token name="@TAG_VAL@"><![CDATA[]]></token>
</macros>
<another>
<tag value="@TAG_VAL@" />
</another>
</tool>
''')
xml = tool_dir.load()
tag_el = xml.find("another").find("tag")
value = tag_el.get('value')
assert value == "", value
# Test macros XML macros with $$ expansions in attributes
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs" bar="hello" />
<expand macro="inputs" bar="my awesome" />
<expand macro="inputs" bar="doggo" />
<macros>
<xml name="inputs" tokens="bar" token_quote="$$">
<inputs type="the type is $$BAR$$" />
</xml>
</macros>
</tool>
''')
xml = tool_dir.load()
input_els = xml.findall("inputs")
assert len(input_els) == 3
assert input_els[0].attrib["type"] == "the type is hello"
assert input_els[1].attrib["type"] == "the type is my awesome"
assert input_els[2].attrib["type"] == "the type is doggo"
# Test macros XML macros with @ expansions in text
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs" foo="hello" />
<expand macro="inputs" foo="world" />
<expand macro="inputs" />
<macros>
<xml name="inputs" token_foo="the_default">
<inputs>@FOO@</inputs>
</xml>
</macros>
</tool>
''')
xml = tool_dir.load()
input_els = xml.findall("inputs")
assert len(input_els) == 3
assert input_els[0].text == "hello"
assert input_els[1].text == "world"
assert input_els[2].text == "the_default"
# Test macros XML macros with @ expansions and recursive
with TestToolDirectory() as tool_dir:
tool_dir.write('''
<tool>
<expand macro="inputs" foo="hello" />
<expand macro="inputs" foo="world" />
<expand macro="inputs" />
<macros>
<xml name="inputs" token_foo="the_default">
<expand macro="real_inputs"><cow>@FOO@</cow></expand>
</xml>
<xml name="real_inputs">
<inputs><yield /></inputs>
</xml>
</macros>
</tool>
''')
xml = tool_dir.load()
input_els = xml.findall("inputs")
assert len(input_els) == 3
print(input_els[0].text)
assert input_els[0].find("cow").text == "hello"
assert input_els[1].find("cow").text == "world"
assert input_els[2].find("cow").text == "the_default"
|
20,015 |
def find_color_card(rgb_img, threshold_type='adaptgauss', threshvalue=125, blurry=False, background='dark',
record_chip_size="median"):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Algorithm written by Brandon Hurr. Updated and implemented into PlantCV by Haley Schuhl.
Inputs:
rgb_img = Input RGB image data containing a color card.
threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss')
threshvalue = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light' (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
record_chip_size = Optional str for choosing chip size measurement to be recorded, either "median",
"mean", or None
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param rgb_img: numpy.ndarray
:param threshold: str
:param threshvalue: int
:param blurry: bool
:param background: str
:param record_chip_size: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = rgb_img.shape
total_pix = float(height * width)
# Minimum and maximum square size based upon 12 MP image
min_area = 1000. / 12000000. * total_pix
max_area = 8000000. / 12000000. * total_pix
# Create gray image for further processing
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold_type.upper() == "OTSU":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold_type.upper() == "NORMAL":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold_type.upper() == "ADAPTGAUSS":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Input threshold=' + str(threshold_type) + ' but should be "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
canny_edges = skimage.feature.canny(threshold)
canny_edges.dtype = 'uint8'
# Compute contours to find the squares of the card
contours, hierarchy = cv2.findContours(canny_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and min_area < marea[index] < max_area:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.1 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
mwhratio.append(wh[0] / wh[1])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or len(approx) == 5:
msquarecoords.append(approx)
else: # It's not square
# msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
all_contours = {'index': mindex, 'x': mx, 'y': my, 'width': mwidth, 'height': mheight, 'res_ratio': mwhratio,
'area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(all_contours)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['area'] > min_area) & (df['area'] < max_area) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['res_ratio'] < 1.2) & (df['res_ratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card isolate area measurements
filtered_area = df['area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'x', 'y', 'width', 'height', 'res_ratio', 'area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers. Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card. Median width of square time 2.5 gives proximity radius
# for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Remove all not numeric values use to_numeric with parameter, errors='coerce' - it replace non numeric to NaNs:
df['x'] = pd.to_numeric(df['x'], errors='coerce')
df['y'] = pd.to_numeric(df['y'], errors='coerce')
# Remove NaN
df = df.dropna()
if df['x'].min() is np.nan or df['y'].min() is np.nan:
fatal_error('No color card found under current parameters')
else:
# Extract the starting coordinate
start_coord = (df['x'].min(), df['y'].min())
# start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['x'].max() - df['x'].min()) / 3
spacingy_short = (df['y'].max() - df['y'].min()) / 3
spacingx_long = (df['x'].max() - df['x'].min()) / 5
spacingy_long = (df['y'].max() - df['y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
if record_chip_size is not None:
if record_chip_size.upper() == "MEDIAN":
chip_size = df.loc[:,"area"].median()
elif record_chip_size.upper() == "MEAN":
chip_size = df.loc[:,"area"].mean()
else:
print(str(record_chip_size) + " Is not a valid entry for record_chip_size." +
" Must be either 'mean', 'median', or None.")
chip_size = None
# Store into global measurements
outputs.add_observation(variable='color_chip_size', trait='size of color card chips identified',
method='plantcv.plantcv.transform.find_color_card', scale='none',
datatype=float, value=chip_size, label=str(record_chip_size))
return df, start_coord, spacing
|
def find_color_card(rgb_img, threshold_type='adaptgauss', threshvalue=125, blurry=False, background='dark',
record_chip_size="median"):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Algorithm written by Brandon Hurr. Updated and implemented into PlantCV by Haley Schuhl.
Inputs:
rgb_img = Input RGB image data containing a color card.
threshold_type = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss')
threshvalue = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light' (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
record_chip_size = Optional str for choosing chip size measurement to be recorded, either "median",
"mean", or None
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param rgb_img: numpy.ndarray
:param threshold: str
:param threshvalue: int
:param blurry: bool
:param background: str
:param record_chip_size: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = rgb_img.shape
total_pix = float(height * width)
# Minimum and maximum square size based upon 12 MP image
min_area = 1000. / 12000000. * total_pix
max_area = 8000000. / 12000000. * total_pix
# Create gray image for further processing
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold_type.upper() == "OTSU":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold_type.upper() == "NORMAL":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold_type.upper() == "ADAPTGAUSS":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Input threshold=' + str(threshold_type) + ' but should be "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
canny_edges = skimage.feature.canny(threshold)
canny_edges.dtype = 'uint8'
# Compute contours to find the squares of the card
contours, hierarchy = cv2.findContours(canny_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and min_area < marea[index] < max_area:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.1 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
mwhratio.append(wh[0] / wh[1])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or len(approx) == 5:
msquarecoords.append(approx)
else: # It's not square
# msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
all_contours = {'index': mindex, 'x': mx, 'y': my, 'width': mwidth, 'height': mheight, 'res_ratio': mwhratio,
'area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(all_contours)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['area'] > min_area) & (df['area'] < max_area) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['res_ratio'] < 1.2) & (df['res_ratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card isolate area measurements
filtered_area = df['area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'x', 'y', 'width', 'height', 'res_ratio', 'area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers. Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card. Median width of square time 2.5 gives proximity radius
# for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Remove all not numeric values use to_numeric with parameter, errors='coerce' - it replace non numeric to NaNs:
df['x'] = pd.to_numeric(df['x'], errors='coerce')
df['y'] = pd.to_numeric(df['y'], errors='coerce')
# Remove NaN
df = df.dropna()
if df['x'].min() is np.nan or df['y'].min() is np.nan:
fatal_error('No color card found under current parameters')
else:
# Extract the starting coordinate
start_coord = (df['x'].min(), df['y'].min())
# start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['x'].max() - df['x'].min()) / 3
spacingy_short = (df['y'].max() - df['y'].min()) / 3
spacingx_long = (df['x'].max() - df['x'].min()) / 5
spacingy_long = (df['y'].max() - df['y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
if record_chip_size is not None:
if record_chip_size.upper() == "MEDIAN":
chip_size = df.loc[:,"area"].median()
elif record_chip_size.upper() == "MEAN":
chip_size = df.loc[:,"area"].mean()
else:
print(str(record_chip_size) + " Is not a valid entry for record_chip_size." +
" Must be either 'mean', 'median', or None.")
chip_size = None
# Store into global measurements
outputs.add_observation(variable='color_chip_size', trait='size of color card chips identified',
method='plantcv.plantcv.transform.find_color_card', scale='none',
datatype=float, value=chip_size, label=str(record_chip_size))
return df, start_coord, spacing
|
5,017 |
def polar(*args, **kwargs):
"""
Make a polar plot.
call signature::
polar(theta, r, **kwargs)
Multiple *theta*, *r* arguments are supported, with format strings, as in
`plot`.
"""
# If an axis already exists, check if it has a polar projection
if gcf().get_axes():
ax = gca()
if isinstance(gca(), PolarAxes):
return ax
else:
_api.warn_external('Trying to create polar plot on an axis '
'that does not have a polar projection.')
ax = axes(polar=True)
ret = ax.plot(*args, **kwargs)
return ret
|
def polar(*args, **kwargs):
"""
Make a polar plot.
call signature::
polar(theta, r, **kwargs)
Multiple *theta*, *r* arguments are supported, with format strings, as in
`plot`.
"""
# If an axis already exists, check if it has a polar projection
if gcf().get_axes():
ax = gca()
if isinstance(ax, PolarAxes):
return ax
else:
_api.warn_external('Trying to create polar plot on an axis '
'that does not have a polar projection.')
ax = axes(polar=True)
ret = ax.plot(*args, **kwargs)
return ret
|
5,799 |
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# Use the original function object if it is wrapped.
obj = obj.__wrapped__ if hasattr(obj, "__wrapped__") else obj
# SciPy's distributions are instances of *_gen. Point to this
# class since it contains the implementation of all the methods.
if isinstance(obj, (rv_generic, multi_rv_generic)):
obj = obj.__class__
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
startdir = os.path.abspath(os.path.join(dirname(scipy.__file__), '..'))
fn = relpath(fn, start=startdir).replace(os.path.sep, '/')
if fn.startswith('scipy/'):
m = re.match(r'^.*dev0\+([a-f0-9]+)$', scipy.__version__)
if m:
return "https://github.com/scipy/scipy/blob/%s/%s%s" % (
m.group(1), fn, linespec)
elif 'dev' in scipy.__version__:
return "https://github.com/scipy/scipy/blob/main/%s%s" % (
fn, linespec)
else:
return "https://github.com/scipy/scipy/blob/v%s/%s%s" % (
scipy.__version__, fn, linespec)
else:
return None
|
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# Use the original function object if it is wrapped.
obj = getattr(obj, "__wrapped__", obj)
# SciPy's distributions are instances of *_gen. Point to this
# class since it contains the implementation of all the methods.
if isinstance(obj, (rv_generic, multi_rv_generic)):
obj = obj.__class__
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
startdir = os.path.abspath(os.path.join(dirname(scipy.__file__), '..'))
fn = relpath(fn, start=startdir).replace(os.path.sep, '/')
if fn.startswith('scipy/'):
m = re.match(r'^.*dev0\+([a-f0-9]+)$', scipy.__version__)
if m:
return "https://github.com/scipy/scipy/blob/%s/%s%s" % (
m.group(1), fn, linespec)
elif 'dev' in scipy.__version__:
return "https://github.com/scipy/scipy/blob/main/%s%s" % (
fn, linespec)
else:
return "https://github.com/scipy/scipy/blob/v%s/%s%s" % (
scipy.__version__, fn, linespec)
else:
return None
|
45,547 |
def _exception_leads_to_http_5xx(ex):
# type: (Union[Exception, falcon.HTTPError, falcon.http_status.HTTPStatus]) -> bool
is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or "").startswith(
"5"
)
is_unhandled_error = not isinstance(
ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)
)
return is_server_error or is_unhandled_error
|
def _exception_leads_to_http_5xx(ex):
# type: (BaseException) -> bool
is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or "").startswith(
"5"
)
is_unhandled_error = not isinstance(
ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)
)
return is_server_error or is_unhandled_error
|
14,637 |
def generate_learning_curve_plots(experiment_name,
output_dir,
learning_curve_tsv_file):
"""
Generate the learning curve plots given the TSV output
file from a learning curve experiment.
Parameters
----------
experiment_name : str
The name of the experiment.
output_dir : str
Path to the output directory for the plots.
learning_curve_tsv_file : str
The path to the learning curve TSV file.
"""
# use pandas to read in the TSV file into a data frame
# and massage it from wide to long format for plotting
df = pd.read_csv(learning_curve_tsv_file, sep='\t')
num_learners = len(df['learner_name'].unique())
num_metrics = len(df['metric'].unique())
df_melted = pd.melt(df, id_vars=[c for c in df.columns
if c not in ['train_score_mean', 'test_score_mean']])
# make sure the "variable" column is cateogrical since it will be
# mapped to hue levels in the learning curve below
df_melted["variable"] = df_melted["variable"].astype("category")
# if there are any training sizes greater than 1000,
# then we should probably rotate the tick labels
# since otherwise the labels are not clearly rendered
rotate_labels = np.any([size >= 1000 for size in df['training_set_size'].unique()])
# set up and draw the actual learning curve figures, one for
# each of the featuresets
for fs_name, df_fs in df_melted.groupby('featureset_name'):
fig = plt.figure()
fig.set_size_inches(2.5 * num_learners, 2.5 * num_metrics)
# compute ylimits for this feature set for each objective
with sns.axes_style('whitegrid', {"grid.linestyle": ':',
"xtick.major.size": 3.0}):
g = sns.FacetGrid(df_fs, row="metric", col="learner_name",
height=2.5, aspect=1, margin_titles=True,
despine=True, sharex=False,
sharey=False, legend_out=False)
train_color, test_color = sns.color_palette(palette="Set1", n_colors=2)
g = g.map_dataframe(sns.pointplot, x="training_set_size",
y="value", hue="variable", scale=.5,
errorbar=None,
palette={"train_score_mean": train_color,
"test_score_mean": test_color})
ylimits = _compute_ylimits_for_featureset(df_fs, g.row_names)
for ax in g.axes.flat:
plt.setp(ax.texts, text="")
g = (g.set_titles(row_template='', col_template='{col_name}')
.set_axis_labels('Training Examples', 'Score'))
if rotate_labels:
g = g.set_xticklabels(rotation=60)
for i, row_name in enumerate(g.row_names):
for j, col_name in enumerate(g.col_names):
ax = g.axes[i][j]
ax.set(ylim=ylimits[row_name])
df_ax_train = df_fs[(df_fs['learner_name'] == col_name) &
(df_fs['metric'] == row_name) &
(df_fs['variable'] == 'train_score_mean')]
df_ax_test = df_fs[(df_fs['learner_name'] == col_name) &
(df_fs['metric'] == row_name) &
(df_fs['variable'] == 'test_score_mean')]
ax.fill_between(list(range(len(df_ax_train))),
df_ax_train['value'] - df_ax_train['train_score_std'],
df_ax_train['value'] + df_ax_train['train_score_std'],
alpha=0.1,
color=train_color)
ax.fill_between(list(range(len(df_ax_test))),
df_ax_test['value'] - df_ax_test['test_score_std'],
df_ax_test['value'] + df_ax_test['test_score_std'],
alpha=0.1,
color=test_color)
if j == 0:
ax.set_ylabel(row_name)
if i == 0:
# set up the legend handles for this plot
plot_handles = [matplotlib.lines.Line2D([],
[],
color=c,
label=l,
linestyle='-')
for c, l in zip([train_color, test_color],
['Training', 'Cross-validation'])]
ax.legend(handles=plot_handles,
loc=4,
fancybox=True,
fontsize='x-small',
ncol=1,
frameon=True)
g.fig.tight_layout(w_pad=1)
plt.savefig(join(output_dir, f'{experiment_name}_{fs_name}.png'),
dpi=300)
# explicitly close figure to save memory
plt.close(fig)
|
def generate_learning_curve_plots(experiment_name,
output_dir,
learning_curve_tsv_file):
"""
Generate the learning curve plots given the TSV output
file from a learning curve experiment.
Parameters
----------
experiment_name : str
The name of the experiment.
output_dir : str
Path to the output directory for the plots.
learning_curve_tsv_file : str
The path to the learning curve TSV file.
"""
# use pandas to read in the TSV file into a data frame
# and massage it from wide to long format for plotting
df = pd.read_csv(learning_curve_tsv_file, sep='\t')
num_learners = len(df['learner_name'].unique())
num_metrics = len(df['metric'].unique())
df_melted = pd.melt(df, id_vars=[c for c in df.columns
if c not in ['train_score_mean', 'test_score_mean']])
# make sure the "variable" column is categorical since it will be
# mapped to hue levels in the learning curve below
df_melted["variable"] = df_melted["variable"].astype("category")
# if there are any training sizes greater than 1000,
# then we should probably rotate the tick labels
# since otherwise the labels are not clearly rendered
rotate_labels = np.any([size >= 1000 for size in df['training_set_size'].unique()])
# set up and draw the actual learning curve figures, one for
# each of the featuresets
for fs_name, df_fs in df_melted.groupby('featureset_name'):
fig = plt.figure()
fig.set_size_inches(2.5 * num_learners, 2.5 * num_metrics)
# compute ylimits for this feature set for each objective
with sns.axes_style('whitegrid', {"grid.linestyle": ':',
"xtick.major.size": 3.0}):
g = sns.FacetGrid(df_fs, row="metric", col="learner_name",
height=2.5, aspect=1, margin_titles=True,
despine=True, sharex=False,
sharey=False, legend_out=False)
train_color, test_color = sns.color_palette(palette="Set1", n_colors=2)
g = g.map_dataframe(sns.pointplot, x="training_set_size",
y="value", hue="variable", scale=.5,
errorbar=None,
palette={"train_score_mean": train_color,
"test_score_mean": test_color})
ylimits = _compute_ylimits_for_featureset(df_fs, g.row_names)
for ax in g.axes.flat:
plt.setp(ax.texts, text="")
g = (g.set_titles(row_template='', col_template='{col_name}')
.set_axis_labels('Training Examples', 'Score'))
if rotate_labels:
g = g.set_xticklabels(rotation=60)
for i, row_name in enumerate(g.row_names):
for j, col_name in enumerate(g.col_names):
ax = g.axes[i][j]
ax.set(ylim=ylimits[row_name])
df_ax_train = df_fs[(df_fs['learner_name'] == col_name) &
(df_fs['metric'] == row_name) &
(df_fs['variable'] == 'train_score_mean')]
df_ax_test = df_fs[(df_fs['learner_name'] == col_name) &
(df_fs['metric'] == row_name) &
(df_fs['variable'] == 'test_score_mean')]
ax.fill_between(list(range(len(df_ax_train))),
df_ax_train['value'] - df_ax_train['train_score_std'],
df_ax_train['value'] + df_ax_train['train_score_std'],
alpha=0.1,
color=train_color)
ax.fill_between(list(range(len(df_ax_test))),
df_ax_test['value'] - df_ax_test['test_score_std'],
df_ax_test['value'] + df_ax_test['test_score_std'],
alpha=0.1,
color=test_color)
if j == 0:
ax.set_ylabel(row_name)
if i == 0:
# set up the legend handles for this plot
plot_handles = [matplotlib.lines.Line2D([],
[],
color=c,
label=l,
linestyle='-')
for c, l in zip([train_color, test_color],
['Training', 'Cross-validation'])]
ax.legend(handles=plot_handles,
loc=4,
fancybox=True,
fontsize='x-small',
ncol=1,
frameon=True)
g.fig.tight_layout(w_pad=1)
plt.savefig(join(output_dir, f'{experiment_name}_{fs_name}.png'),
dpi=300)
# explicitly close figure to save memory
plt.close(fig)
|
5,891 |
def get_supported(
version=None, # type: Optional[str]
platform=None, # type: Optional[str]
impl=None, # type: Optional[str]
abi=None # type: Optional[str]
):
# type: (...) -> List[Pep425Tag]
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a string versions, of the form "33" or "32",
or None. The version will be assumed to support our ABI.
:param platform: specify the exact platform you want valid
tags for, or None. If None, use the local system platform.
:param impl: specify the exact implementation you want valid
tags for, or None. If None, use the local interpreter impl.
:param abi: specify the exact abi you want valid
tags for, or None. If None, use the local interpreter abi.
"""
supported = []
# Versions must be given with respect to the preference
if version is None:
version_info = get_impl_version_info()
versions = get_all_minor_versions_as_strings(version_info)
else:
versions = [version]
impl = impl or get_abbr_impl()
abis = [] # type: List[str]
abi = abi or get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set() # type: Set[str]
for suffix in get_extension_suffixes():
if suffix.startswith('.abi'):
abi3s.add(suffix.split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
arch = platform or get_platform()
arch_prefix, arch_sep, arch_suffix = arch.partition('_')
if arch.startswith('macosx'):
# support macosx-10.6-intel on macosx-10.9-x86_64
match = _osx_arch_pat.match(arch)
if match:
name, major, minor, actual_arch = match.groups()
tpl = '{}_{}_%i_%s'.format(name, major)
arches = []
for m in reversed(range(int(minor) + 1)):
for a in get_darwin_arches(int(major), m, actual_arch):
arches.append(tpl % (m, a))
else:
# arch pattern didn't match (?!)
arches = [arch]
elif arch_prefix == 'manylinux2014':
arches = [arch]
# manylinux1/manylinux2010 wheels run on most manylinux2014 systems
# with the exception of wheels depending on ncurses. PEP 599 states
# manylinux1/manylinux2010 wheels should be considered
# manylinux2014 wheels:
# https://www.python.org/dev/peps/pep-0599/#backwards-compatibility-with-manylinux2010-wheels
if arch_suffix in {'i686', 'x86_64'}:
arches.append('manylinux2010' + arch_sep + arch_suffix)
arches.append('manylinux1' + arch_sep + arch_suffix)
elif arch_prefix == 'manylinux2010':
# manylinux1 wheels run on most manylinux2010 systems with the
# exception of wheels depending on ncurses. PEP 571 states
# manylinux1 wheels should be considered manylinux2010 wheels:
# https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels
arches = [arch, 'manylinux1' + arch_sep + arch_suffix]
elif platform is None:
arches = []
if is_manylinux2014_compatible():
arches.append('manylinux2014' + arch_sep + arch_suffix)
if is_manylinux2010_compatible():
arches.append('manylinux2010' + arch_sep + arch_suffix)
if is_manylinux1_compatible():
arches.append('manylinux1' + arch_sep + arch_suffix)
arches.append(arch)
else:
arches = [arch]
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in arches:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# abi3 modules compatible with older version of Python
for version in versions[1:]:
# abi3 was introduced in Python 3.2
if version in {'31', '30'}:
break
for abi in abi3s: # empty set if not Python 3
for arch in arches:
supported.append(("%s%s" % (impl, version), abi, arch))
# Has binaries, does not use the Python API:
for arch in arches:
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, but requires our implementation:
supported.append(('%s%s' % (impl, versions[0]), 'none', 'any'))
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
|
def get_supported(
version=None, # type: Optional[str]
platform=None, # type: Optional[str]
impl=None, # type: Optional[str]
abi=None # type: Optional[str]
):
# type: (...) -> List[Pep425Tag]
"""Return a list of supported tags for each version specified in
`versions`.
:param version: a string versions, of the form "33" or "32",
or None. The version will be assumed to support our ABI.
:param platform: specify the exact platform you want valid
tags for, or None. If None, use the local system platform.
:param impl: specify the exact implementation you want valid
tags for, or None. If None, use the local interpreter impl.
:param abi: specify the exact abi you want valid
tags for, or None. If None, use the local interpreter abi.
"""
supported = []
# Versions must be given with respect to the preference
if version is None:
version_info = get_impl_version_info()
versions = get_all_minor_versions_as_strings(version_info)
else:
versions = [version]
impl = impl or get_abbr_impl()
abis = [] # type: List[str]
abi = abi or get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set() # type: Set[str]
for suffix in get_extension_suffixes():
if suffix.startswith('.abi'):
abi3s.add(suffix.split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
arch = platform or get_platform()
arch_prefix, arch_sep, arch_suffix = arch.partition('_')
if arch.startswith('macosx'):
# support macosx-10.6-intel on macosx-10.9-x86_64
match = _osx_arch_pat.match(arch)
if match:
name, major, minor, actual_arch = match.groups()
tpl = '{}_{}_%i_%s'.format(name, major)
arches = []
for m in reversed(range(int(minor) + 1)):
for a in get_darwin_arches(int(major), m, actual_arch):
arches.append(tpl % (m, a))
else:
# arch pattern didn't match (?!)
arches = [arch]
elif arch_prefix == 'manylinux2014':
arches = [arch]
# manylinux1/manylinux2010 wheels run on most manylinux2014 systems
# with the exception of wheels depending on ncurses. PEP 599 states
# manylinux1/manylinux2010 wheels should be considered
# manylinux2014 wheels:
# https://www.python.org/dev/peps/pep-0599/#backwards-compatibility-with-manylinux2010-wheels
if arch_suffix in {'i686', 'x86_64'}:
arches.append('manylinux2010' + arch_sep + arch_suffix)
arches.append('manylinux1' + arch_sep + arch_suffix)
elif arch_prefix == 'manylinux2010':
# manylinux1 wheels run on most manylinux2010 systems with the
# exception of wheels depending on ncurses. PEP 571 states
# manylinux1 wheels should be considered manylinux2010 wheels:
# https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels
arches = [arch, 'manylinux1' + arch_sep + arch_suffix]
elif platform is None:
arches = []
if is_manylinux2014_compatible():
arches.append('manylinux2014' + arch_sep + arch_suffix)
if is_manylinux2010_compatible():
arches.append('manylinux2010' + arch_sep + arch_suffix)
if is_manylinux1_compatible():
arches.append('manylinux1' + arch_sep + arch_suffix)
arches.append(arch)
else:
arches = [arch]
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in arches:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# abi3 modules compatible with older version of Python
for version in versions[1:]:
# abi3 was introduced in Python 3.2
if version in {'31', '30'}:
break
for abi in abi3s: # empty set if not Python 3
for arch in arches:
supported.append(("%s%s" % (impl, version), abi, arch))
# Has binaries, does not use the Python API:
for arch in arches:
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, but requires our implementation:
supported.append(('%s%s' % (impl, versions[0]), 'none', 'any'))
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
|
7,227 |
def difference_of_gaussians(image, sigma1, sigma2=None, *,
mode='nearest', cval=0, multichannel=False,
truncate=4.0):
"""Multi-dimensional band-pass filter using the Difference of Gaussians
method.
Parameters
----------
image : ndarray
Input array to filter.
sigma1 : scalar or sequence of scalars
Standard deviation(s) for the Gaussian kernel with the smaller sigmas
across all axes. The standard deviations are given for each axis as a
sequence, or as a single number, in which case the single number is
used as the standard deviation value for all axes.
sigma2 : scalar or sequence of scalars, optional (default is None)
Standard deviation(s) for the Gaussian kernel with the larger sigmas
across all axes. The standard deviations are given for each axis as a
sequence, or as a single number, in which case the single number is
used as the standard deviation value for all axes. If None is given
(default), sigmas for all axes are calculated as 1.6 * sigma1.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'nearest'.
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
multichannel : bool, optional (default: False)
Whether the last axis of the image is to be interpreted as multiple
channels. If True, each channel is filtered separately (channels are
not mixed together).
truncate : float, optional (default is 4.0)
Truncate the filter at this many standard deviations.
Returns
-------
filtered_image : ndarray
the filtered array
Notes
-----
This function will subtract an array filtered with a guassian kernel
with sigmas given by ``sigma2`` from an array filtered with a gaussian
kernel with sigmas provided by ``sigma1``. The values for ``sigma2`` must
always be greater than or equal to the corresponding values in ``sigma1``,
or a ``ValueError`` will be raised.
When ``sigma2`` is none, the values for ``sigma2`` will be calculated
as 1.6x the corresponding values in ``sigma1``. This approximates the
inverted Laplacian of Guassian, commonly used in edge and blob detection.
Input image is converted according to the conventions of ``img_as_float``.
Except for sigma values, all parameters are used for both filters.
Examples
--------
Apply a simple Difference of Gaussians filter to a color image:
>>> from skimage.data import astronaut
>>> from skimage.filters import difference_of_gaussians
>>> image = astronaut()
>>> filtered_image = difference_of_gaussians(image, 2, 10,
... multichannel=True)
Apply a Laplacian of Gaussian filter as approximated by the Difference
of Gaussians filter:
>>> filtered_image = difference_of_gaussians(image, 2, multichannel=True)
Apply a Difference of Gaussians filter to a grayscale image using different
sigma values for each axis:
>>> from skimage.data import camera
>>> image = camera()
>>> filtered_image = difference_of_gaussians(image, (2,5), (3,20))
"""
image = img_as_float(image)
sigma1 = np.array(sigma1, dtype='float', ndmin=1)
if sigma2 is None:
sigma2 = sigma1 * 1.6
else:
sigma2 = np.array(sigma2, dtype='float', ndmin=1)
if multichannel is True:
spatial_dims = image.ndim - 1
else:
spatial_dims = image.ndim
if len(sigma1) != 1 and len(sigma1) != spatial_dims:
raise ValueError('sigma1 must have length equal to number of spatial'
' dimensions of input')
if len(sigma2) != 1 and len(sigma2) != spatial_dims:
raise ValueError('sigma2 must have length equal to number of spatial'
' dimensions of input')
sigma1 = sigma1 * np.ones(spatial_dims)
sigma2 = sigma2 * np.ones(spatial_dims)
if any(sigma2 < sigma1):
raise ValueError('sigma2 must be equal to or larger than sigma1 for'
' all axes')
im1 = gaussian(image, sigma1, mode=mode, cval=cval,
multichannel=multichannel, truncate=truncate)
im2 = gaussian(image, sigma2, mode=mode, cval=cval,
multichannel=multichannel, truncate=truncate)
return im1 - im2
|
def difference_of_gaussians(image, sigma1, sigma2=None, *,
mode='nearest', cval=0, multichannel=False,
truncate=4.0):
"""Multi-dimensional band-pass filter using the Difference of Gaussians
method.
Parameters
----------
image : ndarray
Input array to filter.
sigma1 : scalar or sequence of scalars
Standard deviation(s) for the Gaussian kernel with the smaller sigmas
across all axes. The standard deviations are given for each axis as a
sequence, or as a single number, in which case the single number is
used as the standard deviation value for all axes.
sigma2 : scalar or sequence of scalars, optional (default is None)
Standard deviation(s) for the Gaussian kernel with the larger sigmas
across all axes. The standard deviations are given for each axis as a
sequence, or as a single number, in which case the single number is
used as the standard deviation value for all axes. If None is given
(default), sigmas for all axes are calculated as 1.6 * sigma1.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'nearest'.
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
multichannel : bool, optional (default: False)
Whether the last axis of the image is to be interpreted as multiple
channels. If True, each channel is filtered separately (channels are
not mixed together).
truncate : float, optional (default is 4.0)
Truncate the filter at this many standard deviations.
Returns
-------
filtered_image : ndarray
the filtered array
Notes
-----
This function will subtract an array filtered with a guassian kernel
with sigmas given by ``sigma2`` from an array filtered with a gaussian
kernel with sigmas provided by ``sigma1``. The values for ``sigma2`` must
always be greater than or equal to the corresponding values in ``sigma1``,
or a ``ValueError`` will be raised.
When ``sigma2`` is none, the values for ``sigma2`` will be calculated
as 1.6x the corresponding values in ``sigma1``. This approximates the
inverted Laplacian of Gaussian, commonly used in edge and blob detection.
Input image is converted according to the conventions of ``img_as_float``.
Except for sigma values, all parameters are used for both filters.
Examples
--------
Apply a simple Difference of Gaussians filter to a color image:
>>> from skimage.data import astronaut
>>> from skimage.filters import difference_of_gaussians
>>> image = astronaut()
>>> filtered_image = difference_of_gaussians(image, 2, 10,
... multichannel=True)
Apply a Laplacian of Gaussian filter as approximated by the Difference
of Gaussians filter:
>>> filtered_image = difference_of_gaussians(image, 2, multichannel=True)
Apply a Difference of Gaussians filter to a grayscale image using different
sigma values for each axis:
>>> from skimage.data import camera
>>> image = camera()
>>> filtered_image = difference_of_gaussians(image, (2,5), (3,20))
"""
image = img_as_float(image)
sigma1 = np.array(sigma1, dtype='float', ndmin=1)
if sigma2 is None:
sigma2 = sigma1 * 1.6
else:
sigma2 = np.array(sigma2, dtype='float', ndmin=1)
if multichannel is True:
spatial_dims = image.ndim - 1
else:
spatial_dims = image.ndim
if len(sigma1) != 1 and len(sigma1) != spatial_dims:
raise ValueError('sigma1 must have length equal to number of spatial'
' dimensions of input')
if len(sigma2) != 1 and len(sigma2) != spatial_dims:
raise ValueError('sigma2 must have length equal to number of spatial'
' dimensions of input')
sigma1 = sigma1 * np.ones(spatial_dims)
sigma2 = sigma2 * np.ones(spatial_dims)
if any(sigma2 < sigma1):
raise ValueError('sigma2 must be equal to or larger than sigma1 for'
' all axes')
im1 = gaussian(image, sigma1, mode=mode, cval=cval,
multichannel=multichannel, truncate=truncate)
im2 = gaussian(image, sigma2, mode=mode, cval=cval,
multichannel=multichannel, truncate=truncate)
return im1 - im2
|
8,594 |
def substring(geom, start_dist, end_dist, normalized=False):
"""Return a line segment between specified distances along a LineString
Negative distance values are taken as measured in the reverse
direction from the end of the geometry. Out-of-range index
values are handled by clamping them to the valid range of values.
If the start distance equals the end distance, a Point is returned.
If the start distance is actually beyond the end distance, then the
reversed substring is returned such that the start distance is
at the first coordinate.
Parameters
----------
geom : LineString
The geometry to get a substring of.
start_dist : float
The distance along `goem` of the start of the substring.
end_dist : float
The distance along `geom` of the end of the substring.
normalized : bool, False
Whether the distance parameters are interpreted as a
fraction of the geometry's length.
Returns
-------
Union[Point, LineString]
The substring between `start_dist` and `end_dist` or a Point
if they are at the same location.
Raises
------
AssertionError
If `geom` is not a LineString.
Examples
--------
>>> from shapely.geometry import LineString
>>> from shapely.ops import substring
>>> ls = LineString((i, 0) for i in range(6))
>>> ls.wkt
'LINESTRING (0 0, 1 0, 2 0, 3 0, 4 0, 5 0)'
>>> substring(ls, start_dist=1, end_dist=3).wkt
'LINESTRING (1 0, 2 0, 3 0)'
>>> substring(ls, start_dist=3, end_dist=1).wkt
'LINESTRING (3 0, 2 0, 1 0)'
>>> substring(ls, start_dist=1, end_dist=-3).wkt
'LINESTRING (1 0, 2 0)'
>>> substring(ls, start_dist=0.2, end_dist=-0.6, normalized=True).wkt
'LINESTRING (1 0, 2 0)'
Returning a `Point` when `start_dist` and `end_dist` are at the
same location.
>>> substring(ls, 2.5, -2.5).wkt
'POINT (2.5 0)'
"""
assert(isinstance(geom, LineString))
# Filter out cases in which to return a point
if start_dist == end_dist:
return geom.interpolate(start_dist, normalized)
elif not normalized and start_dist >= geom.length and end_dist >= geom.length:
return geom.interpolate(geom.length, normalized)
elif not normalized and -start_dist >= geom.length and -end_dist >= geom.length:
return geom.interpolate(0, normalized)
elif normalized and start_dist >= 1 and end_dist >= 1:
return geom.interpolate(1, normalized)
elif normalized and -start_dist >= 1 and -end_dist >= 1:
return geom.interpolate(0, normalized)
if normalized:
start_dist *= geom.length
end_dist *= geom.length
# Filter out cases where distances meet at a middle point from opposite ends.
if start_dist < 0 < end_dist and abs(start_dist) + end_dist == geom.length:
return geom.interpolate(end_dist)
elif end_dist < 0 < start_dist and abs(end_dist) + start_dist == geom.length:
return geom.interpolate(start_dist)
start_point = geom.interpolate(start_dist)
end_point = geom.interpolate(end_dist)
if start_dist < 0:
start_dist = geom.length + start_dist # Values may still be negative,
if end_dist < 0: # but only in the out-of-range
end_dist = geom.length + end_dist # sense, not the wrap-around sense.
reverse = start_dist > end_dist
if reverse:
start_dist, end_dist = end_dist, start_dist
if start_dist < 0:
start_dist = 0 # to avoid duplicating the first vertex
if reverse:
vertex_list = [(end_point.x, end_point.y)]
else:
vertex_list = [(start_point.x, start_point.y)]
coords = list(geom.coords)
current_distance = 0
for p1, p2 in zip(coords, coords[1:]):
if start_dist < current_distance < end_dist:
vertex_list.append(p1)
elif current_distance >= end_dist:
break
current_distance += ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) ** 0.5
if reverse:
vertex_list.append((start_point.x, start_point.y))
# reverse direction result
vertex_list = reversed(vertex_list)
else:
vertex_list.append((end_point.x, end_point.y))
return LineString(vertex_list)
|
def substring(geom, start_dist, end_dist, normalized=False):
"""Return a line segment between specified distances along a LineString
Negative distance values are taken as measured in the reverse
direction from the end of the geometry. Out-of-range index
values are handled by clamping them to the valid range of values.
If the start distance equals the end distance, a Point is returned.
If the start distance is actually beyond the end distance, then the
reversed substring is returned such that the start distance is
at the first coordinate.
Parameters
----------
geom : LineString
The geometry to get a substring of.
start_dist : float
The distance along `geom` of the start of the substring.
end_dist : float
The distance along `geom` of the end of the substring.
normalized : bool, False
Whether the distance parameters are interpreted as a
fraction of the geometry's length.
Returns
-------
Union[Point, LineString]
The substring between `start_dist` and `end_dist` or a Point
if they are at the same location.
Raises
------
AssertionError
If `geom` is not a LineString.
Examples
--------
>>> from shapely.geometry import LineString
>>> from shapely.ops import substring
>>> ls = LineString((i, 0) for i in range(6))
>>> ls.wkt
'LINESTRING (0 0, 1 0, 2 0, 3 0, 4 0, 5 0)'
>>> substring(ls, start_dist=1, end_dist=3).wkt
'LINESTRING (1 0, 2 0, 3 0)'
>>> substring(ls, start_dist=3, end_dist=1).wkt
'LINESTRING (3 0, 2 0, 1 0)'
>>> substring(ls, start_dist=1, end_dist=-3).wkt
'LINESTRING (1 0, 2 0)'
>>> substring(ls, start_dist=0.2, end_dist=-0.6, normalized=True).wkt
'LINESTRING (1 0, 2 0)'
Returning a `Point` when `start_dist` and `end_dist` are at the
same location.
>>> substring(ls, 2.5, -2.5).wkt
'POINT (2.5 0)'
"""
assert(isinstance(geom, LineString))
# Filter out cases in which to return a point
if start_dist == end_dist:
return geom.interpolate(start_dist, normalized)
elif not normalized and start_dist >= geom.length and end_dist >= geom.length:
return geom.interpolate(geom.length, normalized)
elif not normalized and -start_dist >= geom.length and -end_dist >= geom.length:
return geom.interpolate(0, normalized)
elif normalized and start_dist >= 1 and end_dist >= 1:
return geom.interpolate(1, normalized)
elif normalized and -start_dist >= 1 and -end_dist >= 1:
return geom.interpolate(0, normalized)
if normalized:
start_dist *= geom.length
end_dist *= geom.length
# Filter out cases where distances meet at a middle point from opposite ends.
if start_dist < 0 < end_dist and abs(start_dist) + end_dist == geom.length:
return geom.interpolate(end_dist)
elif end_dist < 0 < start_dist and abs(end_dist) + start_dist == geom.length:
return geom.interpolate(start_dist)
start_point = geom.interpolate(start_dist)
end_point = geom.interpolate(end_dist)
if start_dist < 0:
start_dist = geom.length + start_dist # Values may still be negative,
if end_dist < 0: # but only in the out-of-range
end_dist = geom.length + end_dist # sense, not the wrap-around sense.
reverse = start_dist > end_dist
if reverse:
start_dist, end_dist = end_dist, start_dist
if start_dist < 0:
start_dist = 0 # to avoid duplicating the first vertex
if reverse:
vertex_list = [(end_point.x, end_point.y)]
else:
vertex_list = [(start_point.x, start_point.y)]
coords = list(geom.coords)
current_distance = 0
for p1, p2 in zip(coords, coords[1:]):
if start_dist < current_distance < end_dist:
vertex_list.append(p1)
elif current_distance >= end_dist:
break
current_distance += ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) ** 0.5
if reverse:
vertex_list.append((start_point.x, start_point.y))
# reverse direction result
vertex_list = reversed(vertex_list)
else:
vertex_list.append((end_point.x, end_point.y))
return LineString(vertex_list)
|
8,554 |
def ipv6_address(addr: str) -> str:
"""
Validate an IPv6 address.
:param addr: ipv6 address
:returns: The ipv6 address.
"""
if not isinstance(addr, str):
raise CX("Invalid input, addr must be a string")
else:
addr = addr.strip()
if addr == "":
return addr
if not netaddr.valid_ipv6(addr):
raise CX("Invalid IPv6 address format (%s)" % addr)
return addr
|
def ipv6_address(addr: str) -> str:
"""
Validate an IPv6 address.
:param addr: IPv6 address
:returns: The ipv6 address.
"""
if not isinstance(addr, str):
raise CX("Invalid input, addr must be a string")
else:
addr = addr.strip()
if addr == "":
return addr
if not netaddr.valid_ipv6(addr):
raise CX("Invalid IPv6 address format (%s)" % addr)
return addr
|
31,041 |
def get_dlp_api_call(url):
""" Makes a HTTPS Get call on the DLP API"""
global ACCESS_TOKEN
count = 0
while count < MAX_ATTEMPTS:
res = requests.get(url=url, headers={'Authorization': "Bearer " + ACCESS_TOKEN})
if res.status_code != 403:
break
new_token = refreshtoken(ACCESS_TOKEN, REFRESH_TOKEN)
if new_token:
ACCESS_TOKEN = new_token
count += 1
if res.status_code < 200 or res.status_code >= 300:
raise Exception("Request to {} failed with status code {}".format(url, res.status_code))
result_json = {} if res.status_code == 204 else res.json()
return result_json, res.status_code
|
def get_dlp_api_call(url):
""" Makes a HTTPS Get call on the DLP API"""
global ACCESS_TOKEN
count = 0
while count < MAX_ATTEMPTS:
res = requests.get(url=url, headers={'Authorization': "Bearer " + ACCESS_TOKEN})
if res.status_code != 403:
break
new_token = refreshtoken(ACCESS_TOKEN, REFRESH_TOKEN)
if new_token:
ACCESS_TOKEN = new_token
count += 1
if res.status_code < 200 or res.status_code >= 300:
raise DemistoException("Request to {} failed with status code {}".format(url, res.status_code))
result_json = {} if res.status_code == 204 else res.json()
return result_json, res.status_code
|
5,833 |
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore', over='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# This can happen when attempting to sum things which are not
# numbers (e.g. as in the function `mode`). Try an alternative method:
try:
contains_nan = np.any(np.isnan(a.ravel()))
except TypeError:
# Don't know what to do. Fall back to omitting nan values and
# issue a warning.
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly "
"checked for nan values. nan values "
"will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return contains_nan, nan_policy
|
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore', over='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# This can happen when attempting to sum things which are not
# numbers (e.g. as in the function `mode`). Try an alternative method:
try:
contains_nan = np.any(np.isnan(a))
except TypeError:
# Don't know what to do. Fall back to omitting nan values and
# issue a warning.
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly "
"checked for nan values. nan values "
"will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return contains_nan, nan_policy
|
59,146 |
def harmony_timeseries(data: AnnData, tp: str):
"""Harmony time series for data visualization with augmented affinity matrix
at discrete time points [Nowotschin18i]_.
Harmony time series is a framework for data visualization, trajectory
detection and interpretation for scRNA-seq data measured at discrete
time points. Harmony constructs an augmented affinity matrix by augmenting
the kNN graph affinity matrix with mutually nearest neighbors between
successive time points. This augmented affinity matrix forms the basis for
generated a force directed layout for visualization and also serves as input
for computing the diffusion operator which can be used for trajectory
detection using **Palantir**.
More about **Palantir** can be found here:
https://github.com/dpeerlab/Palantir.
.. note::
More information and bug reports `here
<https://github.com/dpeerlab/Harmony>`__.
Parameters
----------
data
Annotated data matrix of shape n_obs `×` n_vars. Rows correspond to
cells and columns to genes. Rows represent two or more time points,
where replicates of the same time point are consecutive in order.
tp
key name of observation annotation `.obs` representing time points
Returns
-------
Updates `.uns` with `timepoint_connections`
Example
-------
>>> import scanpy as sc
>>> import scanpy.external as sce
**Load** `AnnData`
A sample with real data is available `here
<https://github.com/dpeerlab/Harmony/tree/master/data>`_.
Random data sets of three time points with two replicates each:
>>> adata_ref = sc.datasets.pbmc3k()
>>> start = [596, 615, 1682, 1663, 1409, 1432]
>>> adatas = [adata_ref[i : i + 1000] for i in start]
>>> sample_names = [
"sa1_Rep1",
"sa1_Rep2",
"sa2_Rep1",
"sa2_Rep2",
"sa3_Rep1",
"sa3_Rep2",
]
>>> timepoints = [i.split("_")[0] for i in sample_names]
>>> for ad, sn, tp in zip(adatas, sample_names, timepoints):
ad.obs["time_points"] = tp
ad.obs["sample_name"] = sn
>>> adata = adatas[0].concatenate(*adatas[1:], join="outer")
Normalize and filter for highly expressed genes
>>> sc.pp.normalize_total(adata, target_sum=10000)
>>> sc.pp.log1p(adata)
>>> sc.pp.highly_variable_genes(adata, n_top_genes=1000, subset=True)
Run harmony_timeseries
>>> d = sce.tl.harmony_timeseries(data=adata, tp="time_points")
**Harmony augmented affinity matrix**
>>> aug_aff, aff = d.harmony_timeseries.core.augmented_affinity_matrix(
data_df=adata.to_df(),
timepoints=adata.obs["time_points"],
timepoint_connections=adata.uns["timepoint_connections"],
)
**Visualization using force directed layouts**
>>> layout = d.harmony_timeseries.plot.force_directed_layout(
affinity_matrix=aug_aff, cell_names=adata.obs.index
)
Use any of Scanpy or Harmony methods for plotting. Example:
>>> d.harmony_timeseries.plot.plot_timepoints(
layout=layout, timepoints=adata.obs.time_points
)
For further demonstration of Harmony visualizations please follow this
notebook
`Harmony_sample_notebook.ipynb
<https://github.com/dpeerlab/Harmony/blob/master/notebooks/
Harmony_sample_notebook.ipynb>`_.
It provides a comprehensive guide to draw *gene expression trends*, amongst
other things.
"""
logg.info("Harmony augmented affinity matrix")
class _wrapper_cls(object):
"""/
A wrapper class to instantiate a new object that wraps `Harmony` as an
attribute reference attached to the class, together with other attribute
references. The class uses instance variables, to preprocess and
generate data using the embedded harmony package.
harmony accepts as input AnnData: Cells x Genes.
Methods used are:
- instantiation initiation
- instance function to embed harmony
- processing of input data
"""
def __init__(
self, adata: AnnData, tp: str, func=None,
):
"""/
Parameters
----------
adata
Annotated data matrix of shape n_obs `×` n_vars. Rows correspond
to cells and columns to genes. Rows represent two or more time
points, where replicates of the same time point are consecutive
in order.
tp
key name of observation annotation `.obs` representing time
points
func
function wrapper to import harmony (not to be used)
"""
# instantiate variables
self.func = func
timepoint_connections = pd.DataFrame(columns=[0, 1])
index = 0
timepoints = adata.obs[tp].unique().tolist()
for i in range(len(timepoints) - 1):
timepoint_connections.loc[index, :] = timepoints[i : i + 2]
index += 1
adata.uns["timepoint_connections"] = timepoint_connections
# load harmony_timeseries
self.__call__()
logg.info("harmony_timeseries loaded ...")
def __call__(self):
"""/
Call for function to import harmony_timeseries and instantiate it
as a class attribute
"""
self.harmony_timeseries = self.func()
def wrapper_cls(df, tpoints, func=None):
"""/
Class wrapper to pass a function to the class alongside positional
argument
"""
if func:
return _wrapper_cls(func)
else:
def wrapper(f):
return _wrapper_cls(df, tpoints, f)
return wrapper
# import Harmony and wrap it in a function passed to the wrapper class this
# method allows passing positional argument of data to `_wrapper_cls`
@wrapper_cls(data, tp)
def _run():
import importlib
try:
import palantir
except ImportError:
raise ImportError(
" please install palantir: \n\n\t"
"git clone git://github.com/dpeerlab/Palantir.git\n\t"
"cd Palantir\n\t"
"sudo -H pip3 install .\n"
)
try:
harmony = importlib.import_module("harmony")
except ImportError:
raise ImportError(
"\nplease install harmony: \n\n\t"
"git clone git://github.com/dpeerlab/Harmony.git\n\t"
"cd Harmony\n\t"
"sudo -H pip3 install .\n"
)
return harmony
return _run
|
def harmony_timeseries(data: AnnData, tp: str):
"""Harmony time series for data visualization with augmented affinity matrix
at discrete time points [Nowotschin18i]_.
Harmony time series is a framework for data visualization, trajectory
detection and interpretation for scRNA-seq data measured at discrete
time points. Harmony constructs an augmented affinity matrix by augmenting
the kNN graph affinity matrix with mutually nearest neighbors between
successive time points. This augmented affinity matrix forms the basis for
generated a force directed layout for visualization and also serves as input
for computing the diffusion operator which can be used for trajectory
detection using Palantir_.
.. _Palantir: https://github.com/dpeerlab/Palantir
More about **Palantir** can be found here:
https://github.com/dpeerlab/Palantir.
.. note::
More information and bug reports `here
<https://github.com/dpeerlab/Harmony>`__.
Parameters
----------
data
Annotated data matrix of shape n_obs `×` n_vars. Rows correspond to
cells and columns to genes. Rows represent two or more time points,
where replicates of the same time point are consecutive in order.
tp
key name of observation annotation `.obs` representing time points
Returns
-------
Updates `.uns` with `timepoint_connections`
Example
-------
>>> import scanpy as sc
>>> import scanpy.external as sce
**Load** `AnnData`
A sample with real data is available `here
<https://github.com/dpeerlab/Harmony/tree/master/data>`_.
Random data sets of three time points with two replicates each:
>>> adata_ref = sc.datasets.pbmc3k()
>>> start = [596, 615, 1682, 1663, 1409, 1432]
>>> adatas = [adata_ref[i : i + 1000] for i in start]
>>> sample_names = [
"sa1_Rep1",
"sa1_Rep2",
"sa2_Rep1",
"sa2_Rep2",
"sa3_Rep1",
"sa3_Rep2",
]
>>> timepoints = [i.split("_")[0] for i in sample_names]
>>> for ad, sn, tp in zip(adatas, sample_names, timepoints):
ad.obs["time_points"] = tp
ad.obs["sample_name"] = sn
>>> adata = adatas[0].concatenate(*adatas[1:], join="outer")
Normalize and filter for highly expressed genes
>>> sc.pp.normalize_total(adata, target_sum=10000)
>>> sc.pp.log1p(adata)
>>> sc.pp.highly_variable_genes(adata, n_top_genes=1000, subset=True)
Run harmony_timeseries
>>> d = sce.tl.harmony_timeseries(data=adata, tp="time_points")
**Harmony augmented affinity matrix**
>>> aug_aff, aff = d.harmony_timeseries.core.augmented_affinity_matrix(
data_df=adata.to_df(),
timepoints=adata.obs["time_points"],
timepoint_connections=adata.uns["timepoint_connections"],
)
**Visualization using force directed layouts**
>>> layout = d.harmony_timeseries.plot.force_directed_layout(
affinity_matrix=aug_aff, cell_names=adata.obs.index
)
Use any of Scanpy or Harmony methods for plotting. Example:
>>> d.harmony_timeseries.plot.plot_timepoints(
layout=layout, timepoints=adata.obs.time_points
)
For further demonstration of Harmony visualizations please follow this
notebook
`Harmony_sample_notebook.ipynb
<https://github.com/dpeerlab/Harmony/blob/master/notebooks/
Harmony_sample_notebook.ipynb>`_.
It provides a comprehensive guide to draw *gene expression trends*, amongst
other things.
"""
logg.info("Harmony augmented affinity matrix")
class _wrapper_cls(object):
"""/
A wrapper class to instantiate a new object that wraps `Harmony` as an
attribute reference attached to the class, together with other attribute
references. The class uses instance variables, to preprocess and
generate data using the embedded harmony package.
harmony accepts as input AnnData: Cells x Genes.
Methods used are:
- instantiation initiation
- instance function to embed harmony
- processing of input data
"""
def __init__(
self, adata: AnnData, tp: str, func=None,
):
"""/
Parameters
----------
adata
Annotated data matrix of shape n_obs `×` n_vars. Rows correspond
to cells and columns to genes. Rows represent two or more time
points, where replicates of the same time point are consecutive
in order.
tp
key name of observation annotation `.obs` representing time
points
func
function wrapper to import harmony (not to be used)
"""
# instantiate variables
self.func = func
timepoint_connections = pd.DataFrame(columns=[0, 1])
index = 0
timepoints = adata.obs[tp].unique().tolist()
for i in range(len(timepoints) - 1):
timepoint_connections.loc[index, :] = timepoints[i : i + 2]
index += 1
adata.uns["timepoint_connections"] = timepoint_connections
# load harmony_timeseries
self.__call__()
logg.info("harmony_timeseries loaded ...")
def __call__(self):
"""/
Call for function to import harmony_timeseries and instantiate it
as a class attribute
"""
self.harmony_timeseries = self.func()
def wrapper_cls(df, tpoints, func=None):
"""/
Class wrapper to pass a function to the class alongside positional
argument
"""
if func:
return _wrapper_cls(func)
else:
def wrapper(f):
return _wrapper_cls(df, tpoints, f)
return wrapper
# import Harmony and wrap it in a function passed to the wrapper class this
# method allows passing positional argument of data to `_wrapper_cls`
@wrapper_cls(data, tp)
def _run():
import importlib
try:
import palantir
except ImportError:
raise ImportError(
" please install palantir: \n\n\t"
"git clone git://github.com/dpeerlab/Palantir.git\n\t"
"cd Palantir\n\t"
"sudo -H pip3 install .\n"
)
try:
harmony = importlib.import_module("harmony")
except ImportError:
raise ImportError(
"\nplease install harmony: \n\n\t"
"git clone git://github.com/dpeerlab/Harmony.git\n\t"
"cd Harmony\n\t"
"sudo -H pip3 install .\n"
)
return harmony
return _run
|
513 |
def activate_new_user(
username, password, created_by, created_via, first_name=None, last_name=None,
is_domain_admin=True, domain=None, ip=None, atypical_user=False
):
now = datetime.utcnow()
new_user = WebUser.create(
domain,
username,
password,
created_by,
created_via,
is_admin=is_domain_admin,
can_skip_domain=(not domain),
)
new_user.first_name = first_name
new_user.last_name = last_name
new_user.email = username
new_user.subscribed_to_commcare_users = False
new_user.eula.signed = True
new_user.eula.date = now
new_user.eula.type = 'End User License Agreement'
if ip:
new_user.eula.user_ip = ip
new_user.is_staff = False # Can't log in to admin site
new_user.is_active = True
new_user.is_superuser = False
new_user.last_login = now
new_user.date_joined = now
new_user.last_password_set = now
new_user.atypical_user = atypical_user
new_user.save()
return new_user
|
def activate_new_user(
username, password, created_by, created_via, first_name=None, last_name=None,
is_domain_admin=True, domain=None, ip=None, atypical_user=False
):
now = datetime.utcnow()
new_user = WebUser.create(
domain,
username,
password,
created_by,
created_via,
is_admin=is_domain_admin,
can_skip_domain=(domain is None),
)
new_user.first_name = first_name
new_user.last_name = last_name
new_user.email = username
new_user.subscribed_to_commcare_users = False
new_user.eula.signed = True
new_user.eula.date = now
new_user.eula.type = 'End User License Agreement'
if ip:
new_user.eula.user_ip = ip
new_user.is_staff = False # Can't log in to admin site
new_user.is_active = True
new_user.is_superuser = False
new_user.last_login = now
new_user.date_joined = now
new_user.last_password_set = now
new_user.atypical_user = atypical_user
new_user.save()
return new_user
|
32,076 |
def main():
try:
demisto_params = demisto.params()
command = demisto.command()
params = {
'api_url': demisto_params['url'].rstrip('/'),
'use_ssl': not demisto_params.get('insecure', False),
'threshold': int(demisto_params.get('threshold', 1)),
'create_relationships': bool(demisto_params.get('create_relationships', True)),
'max_num_of_relationships': int(demisto_params.get('max_num_of_relationships', 1)) if int(
demisto_params.get('max_num_of_relationships', 1)) < 1000 else 1000,
}
reliability = demisto_params.get('integrationReliability', DBotScoreReliability.C)
if DBotScoreReliability.is_valid_type(reliability):
params['reliability'] = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception('Please provide a valid value for the Source Reliability parameter.')
# Remove proxy if not set to true in params
handle_proxy()
if command == 'test-module':
# This is the call made when pressing the integration test button.
test_module(**params)
demisto.results('ok')
elif command == 'url':
return_results(results=url_command(**params))
elif command == 'domain':
return_results(results=domain_command(**params))
elif command == 'file':
return_results(results=file_command(**params))
elif command == 'urlhaus-download-sample':
urlhaus_download_sample_command(**params)
# Log exceptions
except Exception as exc:
return_error(f'Failed to execute command "{command}".\nError: {exc}', error=exc)
|
def main():
try:
demisto_params = demisto.params()
command = demisto.command()
params = {
'api_url': demisto_params['url'].rstrip('/'),
'use_ssl': not demisto_params.get('insecure', False),
'threshold': int(demisto_params.get('threshold', 1)),
'create_relationships': bool(demisto_params.get('create_relationships', True)),
'max_num_of_relationships': min(1000, int(demisto_params.get('max_num_of_relationships', 10))),
}
reliability = demisto_params.get('integrationReliability', DBotScoreReliability.C)
if DBotScoreReliability.is_valid_type(reliability):
params['reliability'] = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception('Please provide a valid value for the Source Reliability parameter.')
# Remove proxy if not set to true in params
handle_proxy()
if command == 'test-module':
# This is the call made when pressing the integration test button.
test_module(**params)
demisto.results('ok')
elif command == 'url':
return_results(results=url_command(**params))
elif command == 'domain':
return_results(results=domain_command(**params))
elif command == 'file':
return_results(results=file_command(**params))
elif command == 'urlhaus-download-sample':
urlhaus_download_sample_command(**params)
# Log exceptions
except Exception as exc:
return_error(f'Failed to execute command "{command}".\nError: {exc}', error=exc)
|
32,911 |
def _default_span_processors_factory(
trace_filters, # type: List[TraceFilter]
trace_writer, # type: TraceWriter
partial_flush_enabled, # type: bool
partial_flush_min_spans, # type: int
appsec_enabled, # type: bool
):
# type: (...) -> List[SpanProcessor]
"""Construct the default list of span processors to use."""
trace_processors = [] # type: List[TraceProcessor]
trace_processors += [TraceTagsProcessor()]
trace_processors += [TraceSamplingProcessor()]
trace_processors += [TraceTopLevelSpanProcessor()]
trace_processors += trace_filters
span_processors = [] # type: List[SpanProcessor]
if appsec_enabled:
try:
from .appsec.processor import AppSecSpanProcessor
appsec_span_processor = AppSecSpanProcessor()
span_processors.append(appsec_span_processor)
except Exception as e:
# DDAS-001-01
log.error(
"[DDAS-001-01] "
"AppSec could not start because of an unexpected error. No security activities will be collected. "
"Please contact support at https://docs.datadoghq.com/help/ for help. Error details: \n%s",
repr(e),
)
if config._raise:
raise
span_processors.append(
SpanAggregator(
partial_flush_enabled=partial_flush_enabled,
partial_flush_min_spans=partial_flush_min_spans,
trace_processors=trace_processors,
writer=trace_writer,
)
)
return span_processors
|
def _default_span_processors_factory(
trace_filters, # type: List[TraceFilter]
trace_writer, # type: TraceWriter
partial_flush_enabled, # type: bool
partial_flush_min_spans, # type: int
appsec_enabled, # type: bool
):
# type: (...) -> List[SpanProcessor]
"""Construct the default list of span processors to use."""
trace_processors = [] # type: List[TraceProcessor]
trace_processors += [TraceTagsProcessor()]
trace_processors += [TraceSamplingProcessor()]
trace_processors += [TraceTopLevelSpanProcessor()]
trace_processors += trace_filters
span_processors = [] # type: List[SpanProcessor]
if self._appsec_enabled:
try:
from .appsec.processor import AppSecSpanProcessor
appsec_span_processor = AppSecSpanProcessor()
span_processors.append(appsec_span_processor)
except Exception as e:
# DDAS-001-01
log.error(
"[DDAS-001-01] "
"AppSec could not start because of an unexpected error. No security activities will be collected. "
"Please contact support at https://docs.datadoghq.com/help/ for help. Error details: \n%s",
repr(e),
)
if config._raise:
raise
span_processors.append(
SpanAggregator(
partial_flush_enabled=partial_flush_enabled,
partial_flush_min_spans=partial_flush_min_spans,
trace_processors=trace_processors,
writer=trace_writer,
)
)
return span_processors
|
14,844 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IO expander devices."""
global _PORT_VALUE
global _I2C_ADDR
global _BUS
invert_logic = config.get(CONF_INVERT_LOGIC)
binary_sensors = []
pins = config.get("pins")
bits = config.get(CONF_BITS)
_I2C_ADDR = config.get(CONF_I2CADDR)
# Make 8-bits (can be 2- or 4-bits, but should always pack in a 8-bit msg)
while bits % 8:
bits += 1
# Increase array size
_PORT_VALUE *= int(bits / 8)
# Set up I2C bus connectivity
_BUS = SMBus(config.get(CONF_I2CBUS))
# Write 1 to all pins to prepaire them for reading
msg = i2c_msg.write(_I2C_ADDR, _PORT_VALUE)
if _BUS:
_BUS.i2c_rdwr(msg)
else:
_LOGGER.error("I2C bus %d not available!!", config.get(CONF_I2CBUS))
for pin_num, pin_name in pins.items():
binary_sensors.append(Pi4ioe5v9BinarySensor(pin_name, pin_num, invert_logic))
add_entities(binary_sensors, True)
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IO expander devices."""
global _PORT_VALUE
global _I2C_ADDR
global _BUS
invert_logic = config.get(CONF_INVERT_LOGIC)
binary_sensors = []
pins = config[CONF_PINS]
bits = config.get(CONF_BITS)
_I2C_ADDR = config.get(CONF_I2CADDR)
# Make 8-bits (can be 2- or 4-bits, but should always pack in a 8-bit msg)
while bits % 8:
bits += 1
# Increase array size
_PORT_VALUE *= int(bits / 8)
# Set up I2C bus connectivity
_BUS = SMBus(config.get(CONF_I2CBUS))
# Write 1 to all pins to prepaire them for reading
msg = i2c_msg.write(_I2C_ADDR, _PORT_VALUE)
if _BUS:
_BUS.i2c_rdwr(msg)
else:
_LOGGER.error("I2C bus %d not available!!", config.get(CONF_I2CBUS))
for pin_num, pin_name in pins.items():
binary_sensors.append(Pi4ioe5v9BinarySensor(pin_name, pin_num, invert_logic))
add_entities(binary_sensors, True)
|
20,425 |
def _patch_legacy_helpers(app_folder):
files_to_patch = []
files_to_patch.extend(glob.glob("%s/scripts/*" % app_folder))
files_to_patch.extend(glob.glob("%s/scripts/.*" % app_folder))
stuff_to_replace = {
# Replace
# sudo yunohost app initdb $db_user -p $db_pwd
# by
# ynh_mysql_setup_db --db_user=$db_user --db_name=$db_user --db_pwd=$db_pwd
"yunohost app initdb": {
"pattern": r"(sudo )?yunohost app initdb \"?(\$\{?\w+\}?)\"?\s+-p\s\"?(\$\{?\w+\}?)\"?",
"replace": r"ynh_mysql_setup_db --db_user=\2 --db_name=\2 --db_pwd=\3",
"important": True,
},
# Replace
# sudo yunohost app checkport whaterver
# by
# ynh_port_available whatever
"yunohost app checkport": {
"pattern": r"(sudo )?yunohost app checkport",
"replace": r"ynh_port_available",
"important": True,
},
# We can't migrate easily port-available
# .. but at the time of writing this code, only two non-working apps are using it.
"yunohost tools port-available": {"important": True},
# Replace
# yunohost app checkurl "${domain}${path_url}" -a "${app}"
# by
# ynh_webpath_register --app=${app} --domain=${domain} --path_url=${path_url}
"yunohost app checkurl": {
"pattern": r"(sudo )?yunohost app checkurl \"?(\$\{?\w+\}?)\/?(\$\{?\w+\}?)\"?\s+-a\s\"?(\$\{?\w+\}?)\"?",
"replace": r"ynh_webpath_register --app=\4 --domain=\2 --path_url=\3",
"important": True,
},
# Remove
# Automatic diagnosis data from YunoHost
# __PRE_TAG1__$(yunohost tools diagnosis | ...)__PRE_TAG2__"
#
"yunohost tools diagnosis": {
"pattern": r"(Automatic diagnosis data from YunoHost( *\n)*)? *(__\w+__)? *\$\(yunohost tools diagnosis.*\)(__\w+__)?",
"replace": r"",
"important": False,
},
# Old $1, $2 in backup/restore scripts...
"app=$2": {
"only_for": ["scripts/backup", "scripts/restore"],
"pattern": r"app=\$2",
"replace": r"app=$YNH_APP_INSTANCE_NAME",
"important": True,
},
# Old $1, $2 in backup/restore scripts...
"backup_dir=$1": {
"only_for": ["scripts/backup", "scripts/restore"],
"pattern": r"backup_dir=\$1",
"replace": r"backup_dir=.",
"important": True,
},
# Old $1, $2 in backup/restore scripts...
"restore_dir=$1": {
"only_for": ["scripts/restore"],
"pattern": r"restore_dir=\$1",
"replace": r"restore_dir=.",
"important": True,
},
# Old $1, $2 in install scripts...
# We ain't patching that shit because it ain't trivial to patch all args...
"domain=$1": {"only_for": ["scripts/install"], "important": True},
}
for helper, infos in stuff_to_replace.items():
infos["pattern"] = (
re.compile(infos["pattern"]) if infos.get("pattern") else None
)
infos["replace"] = infos.get("replace")
for filename in files_to_patch:
# Ignore non-regular files
if not os.path.isfile(filename):
continue
try:
content = read_file(filename)
except Exception:
continue
replaced_stuff = False
show_warning = False
for helper, infos in stuff_to_replace.items():
# Ignore if not relevant for this file
if infos.get("only_for") and not any(
filename.endswith(f) for f in infos["only_for"]
):
continue
# If helper is used, attempt to patch the file
if helper in content and infos["pattern"]:
content = infos["pattern"].sub(infos["replace"], content)
replaced_stuff = True
if infos["important"]:
show_warning = True
# If the helper is *still* in the content, it means that we
# couldn't patch the deprecated helper in the previous lines. In
# that case, abort the install or whichever step is performed
if helper in content and infos["important"]:
raise YunohostError(
"This app is likely pretty old and uses deprecated / outdated helpers that can't be migrated easily. It can't be installed anymore.",
raw_msg=True,
)
if replaced_stuff:
# Check the app do load the helper
# If it doesn't, add the instruction ourselve (making sure it's after the #!/bin/bash if it's there...
if filename.split("/")[-1] in [
"install",
"remove",
"upgrade",
"backup",
"restore",
]:
source_helpers = "source /usr/share/yunohost/helpers"
if source_helpers not in content:
content.replace("#!/bin/bash", "#!/bin/bash\n" + source_helpers)
if source_helpers not in content:
content = source_helpers + "\n" + content
# Actually write the new content in the file
write_to_file(filename, content)
if show_warning:
# And complain about those damn deprecated helpers
logger.error(
r"/!\ Packagers ! This app uses a very old deprecated helpers ... Yunohost automatically patched the helpers to use the new recommended practice, but please do consider fixing the upstream code right now ..."
)
|
def _patch_legacy_helpers(app_folder):
files_to_patch = []
files_to_patch.extend(glob.glob("%s/scripts/*" % app_folder))
files_to_patch.extend(glob.glob("%s/scripts/.*" % app_folder))
stuff_to_replace = {
# Replace
# sudo yunohost app initdb $db_user -p $db_pwd
# by
# ynh_mysql_setup_db --db_user=$db_user --db_name=$db_user --db_pwd=$db_pwd
"yunohost app initdb": {
"pattern": r"(sudo )?yunohost app initdb \"?(\$\{?\w+\}?)\"?\s+-p\s\"?(\$\{?\w+\}?)\"?",
"replace": r"ynh_mysql_setup_db --db_user=\2 --db_name=\2 --db_pwd=\3",
"important": True,
},
# Replace
# sudo yunohost app checkport whaterver
# by
# ynh_port_available whatever
"yunohost app checkport": {
"pattern": r"(sudo )?yunohost app checkport",
"replace": r"ynh_port_available",
"important": True,
},
# We can't migrate easily port-available
# .. but at the time of writing this code, only two non-working apps are using it.
"yunohost tools port-available": {"important": True},
# Replace
# yunohost app checkurl "${domain}${path_url}" -a "${app}"
# by
# ynh_webpath_register --app=${app} --domain=${domain} --path_url=${path_url}
"yunohost app checkurl": {
"pattern": r"(sudo )?yunohost app checkurl \"?(\$\{?\w+\}?)\/?(\$\{?\w+\}?)\"?\s+-a\s\"?(\$\{?\w+\}?)\"?",
"replace": r"ynh_webpath_register --app=\4 --domain=\2 --path_url=\3",
"important": True,
},
# Remove
# Automatic diagnosis data from YunoHost
# __PRE_TAG1__$(yunohost tools diagnosis | ...)__PRE_TAG2__"
#
"yunohost tools diagnosis": {
"pattern": r"(Automatic diagnosis data from YunoHost( *\n)*)? *(__\w+__)? *\$\(yunohost tools diagnosis.*\)(__\w+__)?",
"replace": r"",
"important": False,
},
# Old $1, $2 in backup/restore scripts...
"app=$2": {
"only_for": ["scripts/backup", "scripts/restore"],
"pattern": r"app=\$2",
"replace": r"app=$YNH_APP_INSTANCE_NAME",
"important": True,
},
# Old $1, $2 in backup/restore scripts...
"backup_dir=$1": {
"only_for": ["scripts/backup", "scripts/restore"],
"pattern": r"backup_dir=\$1",
"replace": r"backup_dir=.",
"important": True,
},
# Old $1, $2 in backup/restore scripts...
"restore_dir=$1": {
"only_for": ["scripts/restore"],
"pattern": r"restore_dir=\$1",
"replace": r"restore_dir=.",
"important": True,
},
# Old $1, $2 in install scripts...
# We ain't patching that shit because it ain't trivial to patch all args...
"domain=$1": {"only_for": ["scripts/install"], "important": True},
}
for helper, infos in stuff_to_replace.items():
infos["pattern"] = (
re.compile(infos["pattern"]) if infos.get("pattern") else None
)
infos["replace"] = infos.get("replace")
for filename in files_to_patch:
# Ignore non-regular files
if not os.path.isfile(filename):
continue
try:
content = read_file(filename)
except MoulinetteError:
continue
replaced_stuff = False
show_warning = False
for helper, infos in stuff_to_replace.items():
# Ignore if not relevant for this file
if infos.get("only_for") and not any(
filename.endswith(f) for f in infos["only_for"]
):
continue
# If helper is used, attempt to patch the file
if helper in content and infos["pattern"]:
content = infos["pattern"].sub(infos["replace"], content)
replaced_stuff = True
if infos["important"]:
show_warning = True
# If the helper is *still* in the content, it means that we
# couldn't patch the deprecated helper in the previous lines. In
# that case, abort the install or whichever step is performed
if helper in content and infos["important"]:
raise YunohostError(
"This app is likely pretty old and uses deprecated / outdated helpers that can't be migrated easily. It can't be installed anymore.",
raw_msg=True,
)
if replaced_stuff:
# Check the app do load the helper
# If it doesn't, add the instruction ourselve (making sure it's after the #!/bin/bash if it's there...
if filename.split("/")[-1] in [
"install",
"remove",
"upgrade",
"backup",
"restore",
]:
source_helpers = "source /usr/share/yunohost/helpers"
if source_helpers not in content:
content.replace("#!/bin/bash", "#!/bin/bash\n" + source_helpers)
if source_helpers not in content:
content = source_helpers + "\n" + content
# Actually write the new content in the file
write_to_file(filename, content)
if show_warning:
# And complain about those damn deprecated helpers
logger.error(
r"/!\ Packagers ! This app uses a very old deprecated helpers ... Yunohost automatically patched the helpers to use the new recommended practice, but please do consider fixing the upstream code right now ..."
)
|
45,658 |
def layout():
return html.Div(id='oncoprint-body', children=[
dash_bio.OncoPrint(
id='oncoprint-chart',
height=550,
data=[]
),
html.Div(id='oncoprint-control-tabs', children=[
dcc.Tabs(
id='oncoprint-tabs',
children=[
dcc.Tab(
label='About',
value='what-is',
children=html.Div(className='oncoprint-tab', children=[
html.H4(
"What is OncoPrint?"
),
html.P(
"""
The OncoPrint component is used to view multiple genetic
alteration events through an interactive and zoomable
heatmap. It is a React/Dash port of the popular
oncoPrint() function from the BioConductor R
package. Under the hood, the rendering is done using
Plotly.js built upon D3. Plotly's interactivity allows
the user to bind clicks and hovers to genetic events,
allowing the user to create complex bioinformatic apps
or workflows that rely on crossfiltering.
"""
),
html.P(
"""
Read more about the component here:
https://github.com/plotly/react-oncoprint
"""
)
])
),
dcc.Tab(
label='Data',
value='data',
children=html.Div(className='oncoprint-tab', children=[
html.Div([
html.Div(
className='oncoprint-option-name',
children='Select dataset'
),
dcc.Dropdown(
id='oncoprint-dropdown',
className='oncoprint-select',
options=[
{
'label': '{}.json'.format(ds),
'value': ds
}
for ds in DATASETS
],
value='cBioPortalData',
),
]),
html.Hr(
className='oncoprint-separator'
),
html.Div([
html.H4('Hover, click, or event data'),
html.Div(
id='oncoprint-events'
),
])
])
),
dcc.Tab(
label='View',
value='view',
children=html.Div(className='oncoprint-tab', children=[
html.H4('Layout'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Overview'
),
daq.ToggleSwitch(
id='oncoprint-show-overview',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Legend'
),
daq.ToggleSwitch(
id='oncoprint-show-legend',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Padding'
),
dcc.Slider(
className='oncoprint-slider',
id='oncoprint-padding-input',
value=0.05,
min=0,
max=0.1,
step=0.01,
marks={
'0': '0',
'0.02': '0.02',
'0.04': '0.04',
'0.06': '0.06',
'0.08': '0.08',
'0.1': '0.1',
},
),
html.Br(),
html.Div(
'Adjust the padding (as percentage) '
'between two tracks.'
),
],
),
html.Hr(className='oncoprint-separator'),
html.Div([
html.H4('Colors'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Track color'
),
html.P(
'Change the default background '
'color for the tracks.'
),
daq.ColorPicker(
id='oncoprint-tracks-color',
value={'hex': '#AAAAAA'}
),
],
),
html.Hr(className='oncoprint-separator'),
html.H6("Mutation colors"),
html.P(
"Select a mutation type and a color "
"to customize its look."
),
html.Div(children=[
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation type'
),
dcc.Dropdown(
id='oncoprint-colorscale-mutation-dropdown',
options=[
{'label': mut_type, 'value': mut_type}
for mut_type in COLORSCALE_MUTATIONS_OPT
],
value=COLORSCALE_MUTATIONS_OPT[0],
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation color'
),
daq.ColorPicker(
id='oncoprint-mutation-color',
value={'hex': COLORSCALE_COLORS_OPT[0]}
)
],
),
])
])
])
)
]
)
]),
dcc.Store(id='oncoprint-store'),
]),
|
def layout():
return html.Div(id='oncoprint-body', children=[
dash_bio.OncoPrint(
id='oncoprint-chart',
height=550,
data=[]
),
html.Div(id='oncoprint-control-tabs', children=[
dcc.Tabs(
id='oncoprint-tabs',
children=[
dcc.Tab(
label='About',
value='what-is',
children=html.Div(className='oncoprint-tab', children=[
html.H4(
"What is OncoPrint?"
),
html.P(
"""
The OncoPrint component is used to view multiple genetic
alteration events through an interactive and zoomable
heatmap. It is a React/Dash port of the popular
oncoPrint() function from the BioConductor R
package. Under the hood, the rendering is done using
Plotly.js built upon D3. Plotly's interactivity allows
you to bind clicks and hovers to genetic events,
allowing the user to create complex bioinformatic apps
or workflows that rely on crossfiltering.
"""
),
html.P(
"""
Read more about the component here:
https://github.com/plotly/react-oncoprint
"""
)
])
),
dcc.Tab(
label='Data',
value='data',
children=html.Div(className='oncoprint-tab', children=[
html.Div([
html.Div(
className='oncoprint-option-name',
children='Select dataset'
),
dcc.Dropdown(
id='oncoprint-dropdown',
className='oncoprint-select',
options=[
{
'label': '{}.json'.format(ds),
'value': ds
}
for ds in DATASETS
],
value='cBioPortalData',
),
]),
html.Hr(
className='oncoprint-separator'
),
html.Div([
html.H4('Hover, click, or event data'),
html.Div(
id='oncoprint-events'
),
])
])
),
dcc.Tab(
label='View',
value='view',
children=html.Div(className='oncoprint-tab', children=[
html.H4('Layout'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Overview'
),
daq.ToggleSwitch(
id='oncoprint-show-overview',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Legend'
),
daq.ToggleSwitch(
id='oncoprint-show-legend',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Padding'
),
dcc.Slider(
className='oncoprint-slider',
id='oncoprint-padding-input',
value=0.05,
min=0,
max=0.1,
step=0.01,
marks={
'0': '0',
'0.02': '0.02',
'0.04': '0.04',
'0.06': '0.06',
'0.08': '0.08',
'0.1': '0.1',
},
),
html.Br(),
html.Div(
'Adjust the padding (as percentage) '
'between two tracks.'
),
],
),
html.Hr(className='oncoprint-separator'),
html.Div([
html.H4('Colors'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Track color'
),
html.P(
'Change the default background '
'color for the tracks.'
),
daq.ColorPicker(
id='oncoprint-tracks-color',
value={'hex': '#AAAAAA'}
),
],
),
html.Hr(className='oncoprint-separator'),
html.H6("Mutation colors"),
html.P(
"Select a mutation type and a color "
"to customize its look."
),
html.Div(children=[
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation type'
),
dcc.Dropdown(
id='oncoprint-colorscale-mutation-dropdown',
options=[
{'label': mut_type, 'value': mut_type}
for mut_type in COLORSCALE_MUTATIONS_OPT
],
value=COLORSCALE_MUTATIONS_OPT[0],
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation color'
),
daq.ColorPicker(
id='oncoprint-mutation-color',
value={'hex': COLORSCALE_COLORS_OPT[0]}
)
],
),
])
])
])
)
]
)
]),
dcc.Store(id='oncoprint-store'),
]),
|
31,817 |
def main():
try:
incident_ids = get_incident_ids()
last_incident_occurred = get_last_incident_occurred(incident_ids)
if last_incident_occurred:
html_readable_output = f"<div style='text-align:center; font-size:17px; padding: 15px;'>" \
f"Last Incident Occurred</br> <div style='font-size:24px;'> " \
f"{last_incident_occurred} </div></div>"
else:
html_readable_output = "<div style='text-align:center; font-size:17px; padding: 15px;'>" \
"Last Incident Occurred</br> <div style='font-size:20px;'> " \
"No last incident occurred found. </div></div>"
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': html_readable_output
})
except Exception as err:
return_error(str(err))
|
def main():
try:
incident_ids = get_incident_ids()
last_incident_occurred = get_last_incident_occurred(incident_ids)
if last_incident_occurred:
html_readable_output = f"<div style='text-align:center; font-size:17px; padding: 15px;'>" \
f"Last Incident Occurred</br> <div style='font-size:24px;'> " \
f"{last_incident_occurred} </div></div>"
else:
html_readable_output = "<div style='text-align:center; font-size:17px; padding: 15px;'>" \
"Last Incident Occurred</br> <div style='font-size:20px;'> " \
"N/A</div></div>"
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': html_readable_output
})
except Exception as err:
return_error(str(err))
|
31,938 |
def checkpoint_add_objects_batch_command(client: Client, object_type: str, ipaddress, name):
context_data = {}
readable_output = ''
ipaddress = argToList(ipaddress, ',')
name = argToList(name, ',')
add_list = []
for ip, n in zip(ipaddress, name):
tmp_dict = {'name': n, 'ip-address': ip}
add_list.append(tmp_dict)
result = current_result = client.add_objects_batch(object_type, add_list)
if result:
context_data = {'task-id': result.get('task-id')}
readable_output = tableToMarkdown('CheckPoint data for add-objects-batch command:',
context_data)
command_results = CommandResults(
outputs_prefix='CheckPoint.add_objects_batch',
outputs_key_field='uid',
readable_output=readable_output,
outputs=context_data,
raw_response=result
)
return command_results
|
def checkpoint_add_objects_batch_command(client: Client, object_type: str, ipaddress, name):
context_data = {}
readable_output = ''
ipaddress = argToList(ipaddress, ',')
name = argToList(name, ',')
add_list = []
for ip, n in zip(ipaddress, name):
tmp_dict = {'name': n, 'ip-address': ip}
add_list.append(tmp_dict)
result = current_result = client.add_objects_batch(object_type, add_list)
if result:
context_data = {'task-id': result.get('task-id')}
readable_output = tableToMarkdown('CheckPoint data for add-objects-batch command:',
context_data)
command_results = CommandResults(
outputs_prefix='CheckPoint.AddObjectBatch',
outputs_key_field='uid',
readable_output=readable_output,
outputs=context_data,
raw_response=result
)
return command_results
|
38,441 |
def set_iterate(data: Dict, iterate: Optional[Dict] = None) -> Dict:
"""Initialize or update an iterate dictionary.
Same as set_state for subfield pp.ITERATE
Also checks whether pp.STATE field is set, and adds it if not, see set_state.
"""
if not pp.STATE in data:
set_state(data)
if iterate is None:
iterate = {}
if pp.ITERATE in data[pp.STATE]:
data[pp.STATE][pp.ITERATE].update(iterate)
else:
data[pp.STATE][pp.ITERATE] = iterate
return data
|
def set_iterate(data: Dict, iterate: Optional[Dict] = None) -> Dict:
"""Initialize or update an iterate dictionary.
Same as set_state for subfield pp.ITERATE
Also checks whether pp.STATE field is set, and adds it if not, see set_state.
"""
if not pp.STATE in data:
set_state(data)
iterate = iterate or {}
if pp.ITERATE in data[pp.STATE]:
data[pp.STATE][pp.ITERATE].update(iterate)
else:
data[pp.STATE][pp.ITERATE] = iterate
return data
|
6,818 |
def get_permission_query_condition(user):
if not user: user = frappe.session.user
if user == "Administrator":
return None
from frappe.utils.user import UserPermissions
user = UserPermissions(user)
if "System Manager" in user.roles:
return None
reports = [ '"%s"'%report for report in user.get_all_reports().keys() ]
return """`tabPrepared Report`.ref_report_doctype in ({reports})"""\
.format(reports=','.join(reports))
|
def get_permission_query_condition(user):
if not user: user = frappe.session.user
if user == "Administrator":
return None
from frappe.utils.user import UserPermissions
user = UserPermissions(user)
if "System Manager" in user.roles:
return None
reports = [frappe.db.escape(report) for report in user.get_all_reports().keys()]
return """`tabPrepared Report`.ref_report_doctype in ({reports})"""\
.format(reports=','.join(reports))
|
19,845 |
def parse_query_string(query_string, operator=None, zero_terms=MATCH_NONE):
"""
This takes a query string typed in by a user and extracts the following:
- Quoted terms (for phrase search)
- Filters
For example, the following query:
`hello "this is a phrase" live:true` would be parsed into:
filters: {'live': 'true'}
tokens: [('hello', False), ('this is a phrase', True)]
"""
filters, query_string = separate_filters_from_query(query_string)
is_phrase = False
tokens = []
for part in query_string.split('"'):
part = part.strip()
if part:
if is_phrase:
tokens.append(Phrase(part))
else:
tokens.append(PlainText(part, operator=operator or PlainText.DEFAULT_OPERATOR))
is_phrase = not is_phrase
if tokens:
if operator == 'or':
search_query = OR(tokens)
else:
search_query = AND(tokens)
else:
search_query = zero_terms
return filters, search_query
|
def parse_query_string(query_string, operator=None, zero_terms=MATCH_NONE):
"""
This takes a query string typed in by a user and extracts the following:
- Quoted terms (for phrase search)
- Filters
For example, the following query:
`hello "this is a phrase" live:true` would be parsed into:
filters: {'live': 'true'}
tokens: And([PlainText('hello'), Phrase('this is a phrase')])
"""
filters, query_string = separate_filters_from_query(query_string)
is_phrase = False
tokens = []
for part in query_string.split('"'):
part = part.strip()
if part:
if is_phrase:
tokens.append(Phrase(part))
else:
tokens.append(PlainText(part, operator=operator or PlainText.DEFAULT_OPERATOR))
is_phrase = not is_phrase
if tokens:
if operator == 'or':
search_query = OR(tokens)
else:
search_query = AND(tokens)
else:
search_query = zero_terms
return filters, search_query
|
42,060 |
def run(args: argparse.Namespace) -> None:
kurobako_cmd = os.path.join(args.path_to_kurobako, "kurobako")
subprocess.run(f"{kurobako_cmd} --version", shell=True)
if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)):
raise ValueError(f"Data directory {args.data_dir} cannot be found.")
os.makedirs(args.out_dir, exist_ok=True)
study_json_fn = os.path.join(args.out_dir, "studies.json")
subprocess.check_call(f"echo >| {study_json_fn}", shell=True)
solvers_filename = os.path.join(args.out_dir, "solvers.json")
subprocess.check_call(f"echo >| {solvers_filename}", shell=True)
problems_filename = os.path.join(args.out_dir, "problems.json")
subprocess.check_call(f"echo >| {problems_filename}", shell=True)
# Create ZDT problems
cmd = f"{kurobako_cmd} problem-suite zdt | tee -a {problems_filename}"
subprocess.run(cmd, shell=True)
# Create NAS bench problem(C) (for Multi-Objective Settings).
dataset = os.path.join(args.data_dir, "nasbench_full.bin")
cmd = (
f'{kurobako_cmd} problem nasbench "{dataset}"'
f"--encoding C --metrics accuracy params | tee -a {problems_filename}"
)
subprocess.run(cmd, shell=True)
# Create solvers.
sampler_list = args.sampler_list.split()
sampler_kwargs_list = args.sampler_kwargs_list.split()
if len(sampler_list) != len(sampler_kwargs_list):
raise ValueError(
"The number of samplers does not match the given keyword arguments. \n"
f"sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."
)
for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list):
name = f"{args.name_prefix}_{sampler}"
python_command = f"mo_runner.py {sampler} {sampler_kwargs}"
cmd = (
f"{kurobako_cmd} solver --name {name} command python {python_command}"
f"| tee -a {solvers_filename}"
)
subprocess.run(cmd, shell=True)
# Create study.
cmd = (
f"{kurobako_cmd} studies --budget 1000 "
f"--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "
f"--repeats {args.n_runs} --seed {args.seed} "
f"> {study_json_fn}"
)
subprocess.run(cmd, shell=True)
result_filename = os.path.join(args.out_dir, "results.json")
cmd = (
f"cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "
f"> {result_filename}"
)
subprocess.run(cmd, shell=True)
# Report
report_filename = os.path.join(args.out_dir, "report.md")
cmd = f"cat {result_filename} | {kurobako_cmd} report > {report_filename}"
subprocess.run(cmd, shell=True)
# Plot pareto-front.
problem_names = ["NASBench", "ZDT1", "ZDT2", "ZDT3", "ZDT4", "ZDT5", "ZDT6"]
for problem_name in problem_names:
cmd = (
f"cat {result_filename} | grep {problem_name} | "
f"{kurobako_cmd} plot pareto-front -o {args.out_dir}"
)
subprocess.run(cmd, shell=True)
|
def run(args: argparse.Namespace) -> None:
kurobako_cmd = os.path.join(args.path_to_kurobako, "kurobako")
subprocess.run(f"{kurobako_cmd} --version", shell=True)
if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)):
raise ValueError(f"Data directory {args.data_dir} cannot be found.")
os.makedirs(args.out_dir, exist_ok=True)
study_json_fn = os.path.join(args.out_dir, "studies.json")
subprocess.check_call(f"echo >| {study_json_fn}", shell=True)
solvers_filename = os.path.join(args.out_dir, "solvers.json")
subprocess.check_call(f"echo >| {solvers_filename}", shell=True)
problems_filename = os.path.join(args.out_dir, "problems.json")
subprocess.check_call(f"echo >| {problems_filename}", shell=True)
# Create ZDT problems
cmd = f"{kurobako_cmd} problem-suite zdt | tee -a {problems_filename}"
subprocess.run(cmd, shell=True)
# Create NAS bench problem(C) (for Multi-Objective Settings).
dataset = os.path.join(args.data_dir, "nasbench_full.bin")
cmd = (
f'{kurobako_cmd} problem nasbench "{dataset}"'
f"--encoding C --metrics accuracy params | tee -a {problems_filename}"
)
subprocess.run(cmd, shell=True)
# Create solvers.
sampler_list = args.sampler_list.split()
sampler_kwargs_list = args.sampler_kwargs_list.split()
if len(sampler_list) != len(sampler_kwargs_list):
raise ValueError(
"The number of samplers does not match the given keyword arguments. \n"
f"sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."
)
for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list):
name = f"{args.name_prefix}_{sampler}"
python_command = f"mo_runner.py {sampler} {sampler_kwargs}"
cmd = (
f"{kurobako_cmd} solver --name {name} command python {python_command}"
f"| tee -a {solvers_filename}"
)
subprocess.run(cmd, shell=True)
# Create study.
cmd = (
f"{kurobako_cmd} studies --budget 1000 "
f"--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "
f"--repeats {args.n_runs} --seed {args.seed} "
f"> {study_json_filename}"
)
subprocess.run(cmd, shell=True)
result_filename = os.path.join(args.out_dir, "results.json")
cmd = (
f"cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "
f"> {result_filename}"
)
subprocess.run(cmd, shell=True)
# Report
report_filename = os.path.join(args.out_dir, "report.md")
cmd = f"cat {result_filename} | {kurobako_cmd} report > {report_filename}"
subprocess.run(cmd, shell=True)
# Plot pareto-front.
problem_names = ["NASBench", "ZDT1", "ZDT2", "ZDT3", "ZDT4", "ZDT5", "ZDT6"]
for problem_name in problem_names:
cmd = (
f"cat {result_filename} | grep {problem_name} | "
f"{kurobako_cmd} plot pareto-front -o {args.out_dir}"
)
subprocess.run(cmd, shell=True)
|
10,738 |
def main():
parser = argparse.ArgumentParser(description=program_description)
parser.add_argument(
'package', metavar='package', type=str,
help='Package to inspect',
)
parser.add_argument(
'--format', dest='format', default='html',
help='Output format; i.e. "html", "rst"',
)
parser.add_argument(
'--file', dest='file', default='inspector_output',
help='Output filename. Defaults to "inspector_output"',
)
args = parser.parse_args()
package_name = args.package
output_format = args.format
filename = args.file
write_listings(package_name, filename, output_format)
|
def main():
parser = argparse.ArgumentParser(description=program_description)
parser.add_argument(
'package', metavar='package', type=str,
help='Package to inspect',
)
parser.add_argument(
'--format', dest='format', default='html',
help='Output format; i.e. "html", "rst"',
)
parser.add_argument(
'--file', dest='file', default='inspector_output',
help='Output filename. Defaults to "inspector_output.<format>"',
)
args = parser.parse_args()
package_name = args.package
output_format = args.format
filename = args.file
write_listings(package_name, filename, output_format)
|
29,414 |
def get_current_picture(camera_config, width, height):
from motioneye import mjpgclient
jpg = mjpgclient.get_jpg(camera_config['@id'])
if jpg is None:
return None
if width is height is None:
return jpg # no server-side resize needed
sio = io.BytesIO(jpg) if isinstance(jpg, bytes) else io.StringIO(jpg)
image = Image.open(sio)
if width and width < 1: # given as percent
width = int(width * image.size[0])
if height and height < 1: # given as percent
height = int(height * image.size[1])
width = width and int(width) or image.size[0]
height = height and int(height) or image.size[1]
webcam_resolution = camera_config['@webcam_resolution']
max_width = image.size[0] * webcam_resolution / 100
max_height = image.size[1] * webcam_resolution / 100
width = min(max_width, width)
height = min(max_height, height)
if width >= image.size[0] and height >= image.size[1]:
return jpg # no enlarging of the picture on the server side
image.thumbnail((width, height), Image.CUBIC)
sio = io.StringIO()
image.save(sio, format='JPEG')
return sio.getvalue()
|
def get_current_picture(camera_config, width, height):
from motioneye import mjpgclient
jpg = mjpgclient.get_jpg(camera_config['@id'])
if jpg is None:
return None
if width is height is None:
return jpg # no server-side resize needed
sio = io.BytesIO(jpg)
image = Image.open(sio)
if width and width < 1: # given as percent
width = int(width * image.size[0])
if height and height < 1: # given as percent
height = int(height * image.size[1])
width = width and int(width) or image.size[0]
height = height and int(height) or image.size[1]
webcam_resolution = camera_config['@webcam_resolution']
max_width = image.size[0] * webcam_resolution / 100
max_height = image.size[1] * webcam_resolution / 100
width = min(max_width, width)
height = min(max_height, height)
if width >= image.size[0] and height >= image.size[1]:
return jpg # no enlarging of the picture on the server side
image.thumbnail((width, height), Image.CUBIC)
sio = io.StringIO()
image.save(sio, format='JPEG')
return sio.getvalue()
|
32,572 |
def update_remote_system_command(args, params, service, auth_token, mapper):
""" Pushes changes in XSOAR incident into the corresponding notable event in Splunk Server.
Args:
args (dict): Demisto args
params (dict): Demisto params
service (splunklib.client.Service): Splunk service object
auth_token (str) - The authentication token to use
Returns:
notable_id (str): The notable id
"""
parsed_args = UpdateRemoteSystemArgs(args)
delta = parsed_args.delta
notable_id = parsed_args.remote_incident_id
if parsed_args.incident_changed and delta:
demisto.debug('Got the following delta keys {} to update incident corresponding to notable '
'{}'.format(str(list(delta.keys())), notable_id))
changed_data = {field: None for field in OUTGOING_MIRRORED_FIELDS}
for field in delta:
if field == 'owner':
new_owner = mapper.get_splunk_user_by_xsoar(delta["owner"]) if mapper.should_map else None
if new_owner:
changed_data['owner'] = new_owner
elif field in OUTGOING_MIRRORED_FIELDS:
changed_data[field] = delta[field]
# Close notable if relevant
if parsed_args.inc_status == IncidentStatus.DONE and params.get('close_notable'):
demisto.debug('Closing notable {}'.format(notable_id))
changed_data['status'] = '5' # type: ignore
if any(changed_data.values()):
demisto.debug('Sending update request to Splunk for notable {}, data: {}'.format(notable_id, changed_data))
base_url = 'https://' + params['host'] + ':' + params['port'] + '/'
try:
session_key = get_auth_session_key(service) if not auth_token else None
response_info = update_notable_events(
baseurl=base_url, comment=changed_data['comment'], status=changed_data['status'],
urgency=changed_data['urgency'], owner=changed_data['owner'], eventIDs=[notable_id],
disposition=changed_data['disposition'], auth_token=auth_token, sessionKey=session_key
)
if 'success' not in response_info or not response_info['success']:
demisto.error('Failed updating notable {}: {}'.format(notable_id, str(response_info)))
else:
demisto.debug('update-remote-system for notable {}: {}'.format(notable_id,
response_info.get('message')))
except Exception as e:
demisto.error('Error in Splunk outgoing mirror for incident corresponding to notable {}. '
'Error message: {}'.format(notable_id, str(e)))
else:
demisto.debug("Didn't find changed data to update incident corresponding to notable {}".format(notable_id))
else:
demisto.debug('Incident corresponding to notable {} was not changed.'.format(notable_id))
return notable_id
|
def update_remote_system_command(args, params, service, auth_token, mapper):
""" Pushes changes in XSOAR incident into the corresponding notable event in Splunk Server.
Args:
args (dict): Demisto args
params (dict): Demisto params
service (splunklib.client.Service): Splunk service object
auth_token (str) - The authentication token to use
Returns:
notable_id (str): The notable id
"""
parsed_args = UpdateRemoteSystemArgs(args)
delta = parsed_args.delta
notable_id = parsed_args.remote_incident_id
if parsed_args.incident_changed and delta:
demisto.debug('Got the following delta keys {} to update incident corresponding to notable '
'{}'.format(str(list(delta.keys())), notable_id))
changed_data = {field: None for field in OUTGOING_MIRRORED_FIELDS}
for field in delta:
if field == 'owner':
new_owner = mapper.get_splunk_user_by_xsoar(delta["owner"]) if mapper.should_map else None
if new_owner:
changed_data['owner'] = new_owner
elif field in OUTGOING_MIRRORED_FIELDS:
changed_data[field] = delta[field]
# Close notable if relevant
if parsed_args.inc_status == IncidentStatus.DONE and params.get('close_notable'):
demisto.debug('Closing notable {}'.format(notable_id))
changed_data['status'] = '5' # type: ignore
if any(changed_data.values()):
demisto.debug('Sending update request to Splunk for notable {}, data: {}'.format(notable_id, changed_data))
base_url = 'https://' + params['host'] + ':' + params['port'] + '/'
try:
session_key = get_auth_session_key(service) if not auth_token else None
response_info = update_notable_events(
baseurl=base_url, comment=changed_data['comment'], status=changed_data['status'],
urgency=changed_data['urgency'], owner=changed_data['owner'], eventIDs=[notable_id],
disposition=changed_data.get('disposition'), auth_token=auth_token, sessionKey=session_key
)
if 'success' not in response_info or not response_info['success']:
demisto.error('Failed updating notable {}: {}'.format(notable_id, str(response_info)))
else:
demisto.debug('update-remote-system for notable {}: {}'.format(notable_id,
response_info.get('message')))
except Exception as e:
demisto.error('Error in Splunk outgoing mirror for incident corresponding to notable {}. '
'Error message: {}'.format(notable_id, str(e)))
else:
demisto.debug("Didn't find changed data to update incident corresponding to notable {}".format(notable_id))
else:
demisto.debug('Incident corresponding to notable {} was not changed.'.format(notable_id))
return notable_id
|
3,183 |
def send_webhooks(installation, event, **kwargs):
try:
servicehook = ServiceHook.objects.get(
organization_id=installation.organization_id,
actor_id=installation.id,
)
except ServiceHook.DoesNotExist:
return
if event not in servicehook.events:
return
# The service hook applies to all projects if there are no
# ServiceHookProject records. Otherwise want to check if we
# should send the webhook or not.
projects = servicehook.get_projects()
if not projects:
resource, action = event.split('.')
kwargs['resource'] = resource
kwargs['action'] = action
kwargs['install'] = installation
request_data = AppPlatformEvent(**kwargs)
safe_urlopen(
url=servicehook.sentry_app.webhook_url,
data=request_data.body,
headers=request_data.headers,
timeout=5,
)
|
def send_webhooks(installation, event, **kwargs):
try:
servicehook = ServiceHook.objects.get(
organization_id=installation.organization_id,
actor_id=installation.id,
)
except ServiceHook.DoesNotExist:
return
if event not in servicehook.events:
return
# The service hook applies to all projects if there are no
# ServiceHookProject records. Otherwise we want to check if we
# should send the webhook or not.
projects = servicehook.get_projects()
if not projects:
resource, action = event.split('.')
kwargs['resource'] = resource
kwargs['action'] = action
kwargs['install'] = installation
request_data = AppPlatformEvent(**kwargs)
safe_urlopen(
url=servicehook.sentry_app.webhook_url,
data=request_data.body,
headers=request_data.headers,
timeout=5,
)
|
43,589 |
def read_structure(filepath, outpath="."):
r"""Reads the molecular structure from a file and creates a list containing the
symbol and Cartesian coordinates of the atomic species. The xyz format is supported out of
the box. If Open Babel is installed, any format recognized by Open Babel is
also supported. Additionally, the new file ``structure.xyz``, containing the
geometry of the molecule, is created in a directory with path given by 'outpath'.
**Example usage:**
>>> read_structure('h2_ref.xyz')
[['H', (0.0, 0.0, -0.35)], ['H', (0.0, 0.0, 0.35)]]
Args:
filepath (str): name of the molecular structure file in the working directory
or the full path to the file if it is located in a different folder
outpath (str): path to the output directory
Returns:
list: for each atomic species, a list containing the symbol and the Cartesian coordinates
"""
obabel_error_message = (
"Open Babel converter not found:\n"
"Try: 'sudo apt install openbabel' "
"or download it from http://openbabel.org/wiki/Main_Page \n"
"and make sure you add it to the PATH environment variable."
)
extension = filepath.split(".")[-1].strip().lower()
file_in = filepath.strip()
file_out = os.path.join(outpath, "structure.xyz")
if extension != "xyz":
if not _exec_exists("obabel"):
raise TypeError(obabel_error_message)
try:
subprocess.run(["obabel", "-i" + extension, file_in, "-oxyz", "-O", file_out],
check=True)
except subprocess.CalledProcessError as e:
raise RuntimeError(
"Open Babel error. See the following Open Babel "
"output for details:\n\n {}\n{}".format(e.stdout, e.stderr)
)
else:
copyfile(file_in, file_out)
geometry = []
with open(file_out) as f:
for line in f.readlines()[2:]:
species, x, y, z = line.split()
geometry.append([species, (float(x), float(y), float(z))])
return geometry
|
def read_structure(filepath, outpath="."):
r"""Reads the molecular structure from a file and creates a list containing the
symbol and Cartesian coordinates of the atomic species. The xyz format is supported out of
the box. If Open Babel is installed, any format recognized by Open Babel is
also supported. Additionally, the new file ``structure.xyz``, containing the
geometry of the molecule, is created in a directory with path given by ``outpath``.
**Example usage:**
>>> read_structure('h2_ref.xyz')
[['H', (0.0, 0.0, -0.35)], ['H', (0.0, 0.0, 0.35)]]
Args:
filepath (str): name of the molecular structure file in the working directory
or the full path to the file if it is located in a different folder
outpath (str): path to the output directory
Returns:
list: for each atomic species, a list containing the symbol and the Cartesian coordinates
"""
obabel_error_message = (
"Open Babel converter not found:\n"
"Try: 'sudo apt install openbabel' "
"or download it from http://openbabel.org/wiki/Main_Page \n"
"and make sure you add it to the PATH environment variable."
)
extension = filepath.split(".")[-1].strip().lower()
file_in = filepath.strip()
file_out = os.path.join(outpath, "structure.xyz")
if extension != "xyz":
if not _exec_exists("obabel"):
raise TypeError(obabel_error_message)
try:
subprocess.run(["obabel", "-i" + extension, file_in, "-oxyz", "-O", file_out],
check=True)
except subprocess.CalledProcessError as e:
raise RuntimeError(
"Open Babel error. See the following Open Babel "
"output for details:\n\n {}\n{}".format(e.stdout, e.stderr)
)
else:
copyfile(file_in, file_out)
geometry = []
with open(file_out) as f:
for line in f.readlines()[2:]:
species, x, y, z = line.split()
geometry.append([species, (float(x), float(y), float(z))])
return geometry
|
4,686 |
def calculate_rms(expected_image, actual_image):
"Calculate the per-pixel errors, then compute the root mean square error."
if expected_image.shape != actual_image.shape:
raise _imageComparisonFailure(
"_image sizes do not match expected size: {} "
"actual size {}".format(expected_image.shape, actual_image.shape))
# Convert to float to avoid overflowing finite integer types.
return np.sqrt(((expected_image - actual_image).astype(float) ** 2).mean())
|
def calculate_rms(expected_image, actual_image):
"Calculate the per-pixel errors, then compute the root mean square error."
if expected_image.shape != actual_image.shape:
raise _imageComparisonFailure(
"Image sizes do not match expected size: {} "
"actual size {}".format(expected_image.shape, actual_image.shape))
# Convert to float to avoid overflowing finite integer types.
return np.sqrt(((expected_image - actual_image).astype(float) ** 2).mean())
|
33,317 |
def now():
""" Let's standardize how we do "now". """
return datetime.now(timezone.utc)
|
def now():
""" Now now() is a standardized function to obtain "now" when you need it. """
return datetime.now(timezone.utc)
|
57,909 |
def redlock_list_scans():
"""
List DevOps Scans
"""
group_by = demisto.args().get('group_by', 'scanId')
page_size = demisto.args().get('page_size', 25)
page_number = demisto.args().get('page_number', 1)
sort = demisto.args().get('sort', None)
filter_type = demisto.args().get('filter_type', 'relative')
filter_time_amount = demisto.args().get('filter_time_amount', 1)
filter_time_unit = demisto.args().get('filter_time_unit', 'day')
filter_user = demisto.args().get('filter_user', None)
filter_status = demisto.args().get('filter_status', None)
filter_asset_type = demisto.args().get('filter_asset_type', None)
filter_asset_name = demisto.args().get('filter_asset_name', None)
filter_start_time = demisto.args().get('filter_start_time', None)
filter_end_time = demisto.args().get('filter_end_time', None)
list_filter = {
'groupBy': group_by,
'page[size]': page_size,
'page[number]': page_number,
'filter[timeType]': filter_type
}
if sort:
list_filter['sort'] = sort
if filter_type == 'relative':
if filter_time_unit and filter_time_amount:
list_filter['filter[timeUnit]'] = filter_time_unit
list_filter['filter[timeAmount]'] = filter_time_amount
else:
return_error('You must specify a filter_time_unit and filter_time_amount with relative type filter')
elif filter_type == 'to_now':
if filter_start_time:
list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format="%m/%d/%Y %H:%M:%S")
else:
return_error('You must specify filter_start_time with to_now type filter')
elif filter_type == 'absolute':
if filter_start_time and filter_end_time:
list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format="%m/%d/%Y %H:%M:%S")
list_filter['filter[endTime]'] = convert_date_to_unix(filter_end_time, date_format="%m/%d/%Y %H:%M:%S")
else:
return_error('You must specify a filter_start_time and filter_end_time with absolute type filter')
if filter_user:
list_filter['filter[user]'] = filter_user
if filter_status:
list_filter['filter[status]'] = filter_status
if filter_asset_type:
list_filter['filter[assetType]'] = filter_asset_type
if filter_asset_name:
list_filter['filter[assetName]'] = filter_asset_name
response = req('GET', 'iac/v2/scans', param_data=list_filter, data={})
if (
not response
or 'data' not in response
or not isinstance(response['data'], list)
):
demisto.results('No results found')
else:
items = response['data']
readable_output = []
for item in items:
readable_output.append({
"ID": item.get('id'),
"Name": item.get('attributes')['name'],
"Type": item.get('attributes')['type'],
"Scan Time": item.get('attributes')['scanTime'],
"User": item.get('attributes')['user']
})
md = tableToMarkdown("Scans List:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': items,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': items},
'HumanReadable': md
})
|
def redlock_list_scans():
"""
List DevOps Scans
"""
args = demisto.args()
group_by = args.get('group_by', 'scanId')
page_size = args.get('page_size', 25)
page_number = args.get('page_number', 1)
sort = args.get('sort', None)
filter_type = args.get('filter_type', 'relative')
filter_time_amount = args.get('filter_time_amount', 1)
filter_time_unit = args.get('filter_time_unit', 'day')
filter_user = args.get('filter_user', None)
filter_status = args.get('filter_status', None)
filter_asset_type = args.get('filter_asset_type', None)
filter_asset_name = args.get('filter_asset_name', None)
filter_start_time = args.get('filter_start_time', None)
filter_end_time = args.get('filter_end_time', None)
list_filter = {
'groupBy': group_by,
'page[size]': page_size,
'page[number]': page_number,
'filter[timeType]': filter_type
}
if sort:
list_filter['sort'] = sort
if filter_type == 'relative':
if filter_time_unit and filter_time_amount:
list_filter['filter[timeUnit]'] = filter_time_unit
list_filter['filter[timeAmount]'] = filter_time_amount
else:
return_error('You must specify a filter_time_unit and filter_time_amount with relative type filter')
elif filter_type == 'to_now':
if filter_start_time:
list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format="%m/%d/%Y %H:%M:%S")
else:
return_error('You must specify filter_start_time with to_now type filter')
elif filter_type == 'absolute':
if filter_start_time and filter_end_time:
list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format="%m/%d/%Y %H:%M:%S")
list_filter['filter[endTime]'] = convert_date_to_unix(filter_end_time, date_format="%m/%d/%Y %H:%M:%S")
else:
return_error('You must specify a filter_start_time and filter_end_time with absolute type filter')
if filter_user:
list_filter['filter[user]'] = filter_user
if filter_status:
list_filter['filter[status]'] = filter_status
if filter_asset_type:
list_filter['filter[assetType]'] = filter_asset_type
if filter_asset_name:
list_filter['filter[assetName]'] = filter_asset_name
response = req('GET', 'iac/v2/scans', param_data=list_filter, data={})
if (
not response
or 'data' not in response
or not isinstance(response['data'], list)
):
demisto.results('No results found')
else:
items = response['data']
readable_output = []
for item in items:
readable_output.append({
"ID": item.get('id'),
"Name": item.get('attributes')['name'],
"Type": item.get('attributes')['type'],
"Scan Time": item.get('attributes')['scanTime'],
"User": item.get('attributes')['user']
})
md = tableToMarkdown("Scans List:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': items,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': items},
'HumanReadable': md
})
|
10,591 |
def _doclink(url):
# assume that if it is relative, it is for docsite, ignore rest
if not url.startswith("http"):
url = get_versioned_doclink(url)
return url
|
def _doclink(url):
# assume that if it is relative, it is for docsite, ignore rest
if not url.startswith("http") and not url.startswith(".."):
url = get_versioned_doclink(url)
return url
|
5,410 |
def test_prepend():
"""
Test to ensure that some text appears at the beginning of a file.
"""
name = "/tmp/etc/motd"
if salt.utils.platform.is_windows():
name = "c:\\tmp\\etc\\motd"
assert not os.path.exists(os.path.split(name)[0])
source = ["salt://motd/hr-messages.tmpl"]
sources = ["salt://motd/devops-messages.tmpl"]
text = ["Trust no one unless you have eaten much salt with him."]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
comt = "Must provide name to file.prepend"
ret.update({"comment": comt, "name": ""})
assert filestate.prepend("") == ret
comt = "source and sources are mutually exclusive"
ret.update({"comment": comt, "name": name})
assert filestate.prepend(name, source=source, sources=sources) == ret
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
with patch.dict(
filestate.__salt__,
{
"file.directory_exists": mock_f,
"file.makedirs": mock_t,
"file.stats": mock_f,
"cp.get_template": mock_f,
"file.search": mock_f,
"file.prepend": mock_t,
},
):
comt = "The following files will be changed:\n/tmp/etc:" " directory - new\n"
changes = {"/tmp/etc": {"directory": "new"}}
if salt.utils.platform.is_windows():
comt = 'The directory "c:\\tmp\\etc" will be changed'
changes = {"c:\\tmp\\etc": {"directory": "new"}}
ret.update({"comment": comt, "name": name, "changes": changes})
assert filestate.prepend(name, makedirs=True) == ret
with patch.object(os.path, "isabs", mock_f):
comt = "Specified file {} is not an absolute path".format(name)
ret.update({"comment": comt, "changes": {}})
assert filestate.prepend(name) == ret
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "exists", mock_t):
comt = "Failed to load template file {}".format(source)
ret.update({"comment": comt, "name": source, "data": []})
assert filestate.prepend(name, source=source) == ret
ret.pop("data", None)
ret.update({"name": name})
with patch.object(
salt.utils.files, "fopen", MagicMock(mock_open(read_data=""))
):
with patch.dict(filestate.__utils__, {"files.is_text": mock_f}):
with patch.dict(filestate.__opts__, {"test": True}):
change = {"diff": "Replace binary file"}
comt = "File {} is set to be updated".format(name)
ret.update(
{"comment": comt, "result": None, "changes": change}
)
assert filestate.prepend(name, text=text) == ret
with patch.dict(filestate.__opts__, {"test": False}):
comt = "Prepended 1 lines"
ret.update({"comment": comt, "result": True, "changes": {}})
assert filestate.prepend(name, text=text) == ret
|
def test_prepend():
"""
Test to ensure that some text appears at the beginning of a file.
"""
name = "/tmp/etc/motd"
if salt.utils.platform.is_windows():
name = "c:\\tmp\\etc\\motd"
assert not os.path.exists(os.path.split(name)[0])
source = ["salt://motd/hr-messages.tmpl"]
sources = ["salt://motd/devops-messages.tmpl"]
text = ["Trust no one unless you have eaten much salt with him."]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
comt = "Must provide name to file.prepend"
ret.update({"comment": comt, "name": ""})
assert filestate.prepend("") == ret
comt = "source and sources are mutually exclusive"
ret.update({"comment": comt, "name": name})
assert filestate.prepend(name, source=source, sources=sources) == ret
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
with patch.dict(
filestate.__salt__,
{
"file.directory_exists": mock_f,
"file.makedirs": mock_t,
"file.stats": mock_f,
"cp.get_template": mock_f,
"file.search": mock_f,
"file.prepend": mock_t,
},
):
comt = "The following files will be changed:\n/tmp/etc: directory - new\n"
changes = {"/tmp/etc": {"directory": "new"}}
if salt.utils.platform.is_windows():
comt = 'The directory "c:\\tmp\\etc" will be changed'
changes = {"c:\\tmp\\etc": {"directory": "new"}}
ret.update({"comment": comt, "name": name, "changes": changes})
assert filestate.prepend(name, makedirs=True) == ret
with patch.object(os.path, "isabs", mock_f):
comt = "Specified file {} is not an absolute path".format(name)
ret.update({"comment": comt, "changes": {}})
assert filestate.prepend(name) == ret
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "exists", mock_t):
comt = "Failed to load template file {}".format(source)
ret.update({"comment": comt, "name": source, "data": []})
assert filestate.prepend(name, source=source) == ret
ret.pop("data", None)
ret.update({"name": name})
with patch.object(
salt.utils.files, "fopen", MagicMock(mock_open(read_data=""))
):
with patch.dict(filestate.__utils__, {"files.is_text": mock_f}):
with patch.dict(filestate.__opts__, {"test": True}):
change = {"diff": "Replace binary file"}
comt = "File {} is set to be updated".format(name)
ret.update(
{"comment": comt, "result": None, "changes": change}
)
assert filestate.prepend(name, text=text) == ret
with patch.dict(filestate.__opts__, {"test": False}):
comt = "Prepended 1 lines"
ret.update({"comment": comt, "result": True, "changes": {}})
assert filestate.prepend(name, text=text) == ret
|
42,385 |
def main():
#Load Lightning DataModule
dat = metabrick(num_workers=0)
dat.setup('fit') #allows for input / output features to be configured in the model
#Load Lightning Module
model = surv_model(lr=1e-3, in_features=dat.in_dims, out_features=dat.out_dims)
trainer = pl.Trainer(gpus = 0, num_sanity_val_steps = 0, max_epochs = 20, fast_dev_run = False)
#Train model
trainer.fit(model,dat)
#Load model from best checkpoint & freeze
print('Running in Evaluation Mode...')
model.freeze()
#Setup test data (prepared from lightning module)
dat.setup('test')
#Predict survival on testing dataset
output = model(dat.x_test)
surv = logistic_hazard.output2surv(output)
surv_df = pd.DataFrame(surv.numpy().transpose(), dat.labtrans.cuts)
ev = EvalSurv(surv_df, dat.df_test.duration.values, dat.df_test.event.values)
#Print evaluation metrics
print(f"Concordance: {ev.concordance_td()}")
|
def main():
#Load Lightning DataModule
dat = metabrick(num_workers=0)
dat.setup('fit') #allows for input / output features to be configured in the model
#Load Lightning Module
model = surv_model(lr=1e-3, in_features=dat.in_dims, out_features=dat.out_dims)
trainer = pl.Trainer(gpus=0, num_sanity_val_steps=0, max_epochs=20, fast_dev_run=False)
#Train model
trainer.fit(model,dat)
#Load model from best checkpoint & freeze
print('Running in Evaluation Mode...')
model.freeze()
#Setup test data (prepared from lightning module)
dat.setup('test')
#Predict survival on testing dataset
output = model(dat.x_test)
surv = logistic_hazard.output2surv(output)
surv_df = pd.DataFrame(surv.numpy().transpose(), dat.labtrans.cuts)
ev = EvalSurv(surv_df, dat.df_test.duration.values, dat.df_test.event.values)
#Print evaluation metrics
print(f"Concordance: {ev.concordance_td()}")
|
12,994 |
def test_order_query_with_filter_channels_without_many_channel(
orders_query_with_filter,
staff_api_client,
permission_manage_orders,
orders,
channel_USD,
channel_PLN,
other_channel_USD,
):
# given
Order.objects.create(channel=other_channel_USD)
channel_usd_id = graphene.Node.to_global_id("Channel", channel_USD.pk)
channel_pln_id = graphene.Node.to_global_id("Channel", channel_PLN.pk)
variables = {"filter": {"channels": [channel_pln_id, channel_usd_id]}}
# when
response = staff_api_client.post_graphql(
orders_query_with_filter, variables, permissions=(permission_manage_orders,)
)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 5
assert Order.objects.non_draft().count() == 6
|
def test_order_query_with_filter_channels_with_many_channel(
orders_query_with_filter,
staff_api_client,
permission_manage_orders,
orders,
channel_USD,
channel_PLN,
other_channel_USD,
):
# given
Order.objects.create(channel=other_channel_USD)
channel_usd_id = graphene.Node.to_global_id("Channel", channel_USD.pk)
channel_pln_id = graphene.Node.to_global_id("Channel", channel_PLN.pk)
variables = {"filter": {"channels": [channel_pln_id, channel_usd_id]}}
# when
response = staff_api_client.post_graphql(
orders_query_with_filter, variables, permissions=(permission_manage_orders,)
)
# then
content = get_graphql_content(response)
orders = content["data"]["orders"]["edges"]
assert len(orders) == 5
assert Order.objects.non_draft().count() == 6
|
13,574 |
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(ew.real)
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
4,782 |
def test_hist_stepfilled_geometry():
bins = [0, 1, 2, 3]
data = [0, 0, 1, 1, 1, 2]
_, _, (polygon, ) = plt.hist(data,
bins=bins,
histtype='stepfilled')
xy = [[0, 0], [0, 2], [1, 2], [1, 3], [2, 3], [2, 1], [3, 1],
[3, 0], [2, 0], [2, 0], [1, 0], [1, 0], [0, 0]]
assert (polygon.get_xy() == xy).all()
|
def test_hist_stepfilled_geometry():
bins = [0, 1, 2, 3]
data = [0, 0, 1, 1, 1, 2]
_, _, (polygon, ) = plt.hist(data,
bins=bins,
histtype='stepfilled')
xy = [[0, 0], [0, 2], [1, 2], [1, 3], [2, 3], [2, 1], [3, 1],
[3, 0], [2, 0], [2, 0], [1, 0], [1, 0], [0, 0]]
assert (polygon.get_xy() == xy).all()
|
9,849 |
def member_normalize(member_spec):
''' Transforms the member module arguments into a valid WAPI struct
This function will transform the arguments into a structure that
is a valid WAPI structure in the format of:
{
key: <value>,
}
It will remove any arguments that are set to None since WAPI will error on
that condition.
The remainder of the value validation is performed by WAPI
Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
In this function, they are converted to dictionary.
'''
member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
'pre_provisioning', 'network_setting', 'v6_network_setting',
'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
for key in member_spec.keys():
if key in member_elements and not(member_spec[key] is None):
member_spec[key] = member_spec[key][0]
if isinstance(member_spec[key], dict):
member_spec[key] = member_normalize(member_spec[key])
elif isinstance(member_spec[key], list):
for x in member_spec[key]:
if isinstance(x, dict):
x = member_normalize(x)
elif member_spec[key] is None:
del member_spec[key]
return member_spec
|
def member_normalize(member_spec):
''' Transforms the member module arguments into a valid WAPI struct
This function will transform the arguments into a structure that
is a valid WAPI structure in the format of:
{
key: <value>,
}
It will remove any arguments that are set to None since WAPI will error on
that condition.
The remainder of the value validation is performed by WAPI
Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
In this function, they are converted to dictionary.
'''
member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
'pre_provisioning', 'network_setting', 'v6_network_setting',
'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
for key in member_spec.keys():
if key in member_elements and member_spec[key] is not None:
member_spec[key] = member_spec[key][0]
if isinstance(member_spec[key], dict):
member_spec[key] = member_normalize(member_spec[key])
elif isinstance(member_spec[key], list):
for x in member_spec[key]:
if isinstance(x, dict):
x = member_normalize(x)
elif member_spec[key] is None:
del member_spec[key]
return member_spec
|
52,975 |
def boolbox(msg="Shall I continue?", title=" ",
choices=("[T]rue", "[F]alse"), image=None,
default_choice='[T]rue', cancel_choice='[F]alse'):
"""
The ``boolbox()`` (boolean box) displays two buttons. Returns returns
``True`` if the first button is chosen. Otherwise returns ``False``.
import easygui
message = "What do they say?"
title = "Romantic Question"
if easygui.boolbox(message, title, ["They love me", "They love me not"]):
easygui.msgbox('You should send them flowers.')
else:
easygui.msgbox('It was not meant to be.')
:param str msg: The message shown in the center of the dialog window.
:param str title: The window title text.
:param list choices: A list or tuple of strings for the buttons' text.
:param str image: The filename of an image to display in the dialog window.
:param str default_choice: The text of the default selected button.
:param str cancel_choice: If the user presses the 'X' close, which button
should be pressed
:return: `True` if first button pressed or dialog is cancelled, `False`
if second button is pressed.
"""
if len(choices) != 2:
raise AssertionError(
'boolbox() takes exactly 2 choices! Consider using indexbox() instead.'
)
reply = buttonbox(msg=msg,
title=title,
choices=choices,
image=image,
default_choice=default_choice,
cancel_choice=cancel_choice)
if reply == choices[0]:
return True # The first button (True) was selected.
elif reply == choices[1]:
return False # The second button (False) was selected.
elif reply is None:
return None # The window was closed.
assert False, "The user selected an unexpected response."
|
def boolbox(msg="Shall I continue?", title=" ",
choices=("[T]rue", "[F]alse"), image=None,
default_choice='[T]rue', cancel_choice='[F]alse'):
"""
The ``boolbox()`` (boolean box) displays two buttons. Returns returns
``True`` if the first button is chosen. Otherwise returns ``False``.
import easygui
message = "What do they say?"
title = "Romantic Question"
if easygui.boolbox(message, title, ["They love me", "They love me not"]):
easygui.msgbox('You should send them flowers.')
else:
easygui.msgbox('It was not meant to be.')
:param str msg: The message shown in the center of the dialog window.
:param str title: The window title text.
:param list choices: A list or tuple of strings for the buttons' text.
:param str image: The filename of an image to display in the dialog window.
:param str default_choice: The text of the default selected button.
:param str cancel_choice: If the user presses the 'X' close, which button
should be pressed
:return: ``True`` if first button pressed or dialog is cancelled, ``False``
if second button is pressed.
"""
if len(choices) != 2:
raise AssertionError(
'boolbox() takes exactly 2 choices! Consider using indexbox() instead.'
)
reply = buttonbox(msg=msg,
title=title,
choices=choices,
image=image,
default_choice=default_choice,
cancel_choice=cancel_choice)
if reply == choices[0]:
return True # The first button (True) was selected.
elif reply == choices[1]:
return False # The second button (False) was selected.
elif reply is None:
return None # The window was closed.
assert False, "The user selected an unexpected response."
|
19,882 |
def main():
"""
Build the icons using Icomoon. Also optimize the svgs.
"""
runner = None
try:
args = arg_getters.get_selenium_runner_args()
new_icons = get_icons_for_building(args.icomoon_json_path, args.devicon_json_path, args.token)
if len(new_icons) == 0:
sys.exit("No files need to be uploaded. Ending script...")
print(f"There are {len(new_icons)} icons to be build. Here are they:", *new_icons, sep = "\n")
print("Begin optimizing files...")
optimize_svgs(new_icons, args.icons_folder_path)
print("Updating the icomoon json...")
update_icomoon_json(new_icons, args.icomoon_json_path)
print("Start the building icons process...")
icon_svgs = filehandler.get_svgs_paths(
new_icons, args.icons_folder_path, icon_versions_only=True)
zip_name = "devicon-v1.0.zip"
zip_path = Path(args.download_path, zip_name)
screenshot_folder = filehandler.create_screenshot_folder("./")
runner = BuildSeleniumRunner(args.download_path,
args.geckodriver_path, args.headless)
runner.build_icons(args.icomoon_json_path, zip_path,
icon_svgs, screenshot_folder)
filehandler.extract_files(str(zip_path), args.download_path)
filehandler.rename_extracted_files(args.download_path)
print("Creating the release message by querying the GitHub API...")
get_release_message(args.token)
print("Closing the issues with the label of `in-develop`.")
issues = api_handler.get_issues_by_labels(args.token, ["in-develop"])
issue_nums = [issue_num["number"] for issue_num in issues]
api_handler.close_issues(args.token, issue_nums)
print("Task completed.")
except TimeoutException as e:
util.exit_with_err("Selenium Time Out Error: \n" + str(e))
except Exception as e:
util.exit_with_err(e)
finally:
if runner is not None:
runner.close()
|
def main():
"""
Build the icons using Icomoon. Also optimize the svgs.
"""
runner = None
try:
args = arg_getters.get_selenium_runner_args()
new_icons = get_icons_for_building(args.icomoon_json_path, args.devicon_json_path, args.token)
if len(new_icons) == 0:
sys.exit("No files need to be uploaded. Ending script...")
print(f"There are {len(new_icons)} icons to be build. Here are they:", *new_icons, sep = "\n")
print("Begin optimizing files...")
optimize_svgs(new_icons, args.icons_folder_path)
print("Updating the icomoon json...")
update_icomoon_json(new_icons, args.icomoon_json_path)
print("Start the building icons process...")
icon_svgs = filehandler.get_svgs_paths(
new_icons, args.icons_folder_path, icon_versions_only=True)
zip_name = "devicon-v1.0.zip"
zip_path = Path(args.download_path, zip_name)
screenshot_folder = filehandler.create_screenshot_folder("./")
runner = BuildSeleniumRunner(args.download_path,
args.geckodriver_path, args.headless)
runner.build_icons(args.icomoon_json_path, zip_path,
icon_svgs, screenshot_folder)
filehandler.extract_files(str(zip_path), args.download_path)
filehandler.rename_extracted_files(args.download_path)
print("Creating the release message by querying the GitHub API...")
get_release_message(args.token)
print("Closing issues with the `in-develop` label.")
issues = api_handler.get_issues_by_labels(args.token, ["in-develop"])
issue_nums = [issue_num["number"] for issue_num in issues]
api_handler.close_issues(args.token, issue_nums)
print("Task completed.")
except TimeoutException as e:
util.exit_with_err("Selenium Time Out Error: \n" + str(e))
except Exception as e:
util.exit_with_err(e)
finally:
if runner is not None:
runner.close()
|
46,540 |
def build_spec(version: str, source_files: List[str]) -> str:
all_spescs = [get_spec(spec) for spec in source_files]
spec_object = all_spescs[0]
for value in all_spescs[1:]:
spec_object = combine_spec_objects(spec_object, value)
dependency_order_spec(spec_object)
return objects_to_spec(spec_object, version_imports[version], version)
|
def build_spec(version: str, source_files: List[str]) -> str:
all_specs = [get_spec(spec) for spec in source_files]
spec_object = all_spescs[0]
for value in all_spescs[1:]:
spec_object = combine_spec_objects(spec_object, value)
dependency_order_spec(spec_object)
return objects_to_spec(spec_object, version_imports[version], version)
|
10,854 |
def run(manual_args=None):
args = parse_args(manual_args)
configure_logger(args.debug)
args.text = [p.decode('utf-8') if util.PY2 and not isinstance(p, unicode) else p for p in args.text]
if args.version:
version_str = "{0} version {1} (Python {3})".format(jrnl.__title__, jrnl.__version__, sys.version)
print(util.py2encode(version_str))
sys.exit(0)
try:
config = install.load_or_install_jrnl()
except UserAbort as err:
util.prompt("\n{}".format(err))
sys.exit(1)
if args.ls:
util.prnt(list_journals(config))
sys.exit(0)
log.debug('Using configuration "%s"', config)
original_config = config.copy()
# If the first textual argument points to a journal file,
# use this!
journal_name = args.text[0] if (args.text and args.text[0] in config['journals']) else 'default'
if journal_name is not 'default':
args.text = args.text[1:]
elif "default" not in config['journals']:
util.prompt("No default journal configured.")
util.prompt(list_journals(config))
sys.exit(1)
config = util.scope_config(config, journal_name)
# If the first remaining argument looks like e.g. '-3', interpret that as a limiter
if not args.limit and args.text and args.text[0].startswith("-"):
try:
args.limit = int(args.text[0].lstrip("-"))
args.text = args.text[1:]
except:
pass
log.debug('Using journal "%s"', journal_name)
mode_compose, mode_export, mode_import = guess_mode(args, config)
# How to quit writing?
if "win32" in sys.platform:
_exit_multiline_code = "on a blank line, press Ctrl+Z and then Enter"
else:
_exit_multiline_code = "press Ctrl+D"
if mode_compose and not args.text:
if not sys.stdin.isatty():
# Piping data into jrnl
raw = util.py23_read()
elif config['editor']:
template = ""
if config['template']:
try:
template = open(config['template']).read()
except:
util.prompt("[Could not read template at '']".format(config['template']))
sys.exit(1)
raw = util.get_text_from_editor(config, template)
else:
try:
raw = util.py23_read("[Compose Entry; " + _exit_multiline_code + " to finish writing]\n")
except KeyboardInterrupt:
util.prompt("[Entry NOT saved to journal.]")
sys.exit(0)
if raw:
args.text = [raw]
else:
mode_compose = False
# This is where we finally open the journal!
try:
journal = Journal.open_journal(journal_name, config)
except KeyboardInterrupt:
util.prompt("[Interrupted while opening journal]".format(journal_name))
sys.exit(1)
# Import mode
if mode_import:
plugins.get_importer(args.import_).import_(journal, args.input)
# Writing mode
elif mode_compose:
raw = " ".join(args.text).strip()
if util.PY2 and type(raw) is not unicode:
raw = raw.decode(sys.getfilesystemencoding())
log.debug('Appending raw line "%s" to journal "%s"', raw, journal_name)
journal.new_entry(raw)
util.prompt("[Entry added to {0} journal]".format(journal_name))
journal.write()
if not mode_compose:
old_entries = journal.entries
if args.on_date:
args.start_date = args.end_date = args.on_date
journal.filter(tags=args.text,
start_date=args.start_date, end_date=args.end_date,
strict=args.strict,
short=args.short,
starred=args.starred,
exclude=args.excluded)
journal.limit(args.limit)
# Reading mode
if not mode_compose and not mode_export and not mode_import:
print(util.py2encode(journal.pprint()))
# Various export modes
elif args.short:
print(util.py2encode(journal.pprint(short=True)))
elif args.tags:
print(util.py2encode(plugins.get_exporter("tags").export(journal)))
elif args.export is not False:
exporter = plugins.get_exporter(args.export)
print(exporter.export(journal, args.output))
elif args.encrypt is not False:
encrypt(journal, filename=args.encrypt)
# Not encrypting to a separate file: update config!
if not args.encrypt:
update_config(original_config, {"encrypt": True}, journal_name, force_local=True)
install.save_config(original_config)
elif args.decrypt is not False:
decrypt(journal, filename=args.decrypt)
# Not decrypting to a separate file: update config!
if not args.decrypt:
update_config(original_config, {"encrypt": False}, journal_name, force_local=True)
install.save_config(original_config)
elif args.edit:
if not config['editor']:
util.prompt("[{1}ERROR{2}: You need to specify an editor in {0} to use the --edit function.]".format(install.CONFIG_FILE_PATH, ERROR_COLOR, RESET_COLOR))
sys.exit(1)
other_entries = [e for e in old_entries if e not in journal.entries]
# Edit
old_num_entries = len(journal)
edited = util.get_text_from_editor(config, journal.editable_str())
journal.parse_editable_str(edited)
num_deleted = old_num_entries - len(journal)
num_edited = len([e for e in journal.entries if e.modified])
prompts = []
if num_deleted:
prompts.append("{0} {1} deleted".format(num_deleted, "entry" if num_deleted == 1 else "entries"))
if num_edited:
prompts.append("{0} {1} modified".format(num_edited, "entry" if num_deleted == 1 else "entries"))
if prompts:
util.prompt("[{0}]".format(", ".join(prompts).capitalize()))
journal.entries += other_entries
journal.sort()
journal.write()
|
def run(manual_args=None):
args = parse_args(manual_args)
configure_logger(args.debug)
args.text = [p.decode('utf-8') if util.PY2 and not isinstance(p, unicode) else p for p in args.text]
if args.version:
version_str = "{0} version {1} (Python {2})".format(jrnl.__title__, jrnl.__version__, sys.version)
print(util.py2encode(version_str))
sys.exit(0)
try:
config = install.load_or_install_jrnl()
except UserAbort as err:
util.prompt("\n{}".format(err))
sys.exit(1)
if args.ls:
util.prnt(list_journals(config))
sys.exit(0)
log.debug('Using configuration "%s"', config)
original_config = config.copy()
# If the first textual argument points to a journal file,
# use this!
journal_name = args.text[0] if (args.text and args.text[0] in config['journals']) else 'default'
if journal_name is not 'default':
args.text = args.text[1:]
elif "default" not in config['journals']:
util.prompt("No default journal configured.")
util.prompt(list_journals(config))
sys.exit(1)
config = util.scope_config(config, journal_name)
# If the first remaining argument looks like e.g. '-3', interpret that as a limiter
if not args.limit and args.text and args.text[0].startswith("-"):
try:
args.limit = int(args.text[0].lstrip("-"))
args.text = args.text[1:]
except:
pass
log.debug('Using journal "%s"', journal_name)
mode_compose, mode_export, mode_import = guess_mode(args, config)
# How to quit writing?
if "win32" in sys.platform:
_exit_multiline_code = "on a blank line, press Ctrl+Z and then Enter"
else:
_exit_multiline_code = "press Ctrl+D"
if mode_compose and not args.text:
if not sys.stdin.isatty():
# Piping data into jrnl
raw = util.py23_read()
elif config['editor']:
template = ""
if config['template']:
try:
template = open(config['template']).read()
except:
util.prompt("[Could not read template at '']".format(config['template']))
sys.exit(1)
raw = util.get_text_from_editor(config, template)
else:
try:
raw = util.py23_read("[Compose Entry; " + _exit_multiline_code + " to finish writing]\n")
except KeyboardInterrupt:
util.prompt("[Entry NOT saved to journal.]")
sys.exit(0)
if raw:
args.text = [raw]
else:
mode_compose = False
# This is where we finally open the journal!
try:
journal = Journal.open_journal(journal_name, config)
except KeyboardInterrupt:
util.prompt("[Interrupted while opening journal]".format(journal_name))
sys.exit(1)
# Import mode
if mode_import:
plugins.get_importer(args.import_).import_(journal, args.input)
# Writing mode
elif mode_compose:
raw = " ".join(args.text).strip()
if util.PY2 and type(raw) is not unicode:
raw = raw.decode(sys.getfilesystemencoding())
log.debug('Appending raw line "%s" to journal "%s"', raw, journal_name)
journal.new_entry(raw)
util.prompt("[Entry added to {0} journal]".format(journal_name))
journal.write()
if not mode_compose:
old_entries = journal.entries
if args.on_date:
args.start_date = args.end_date = args.on_date
journal.filter(tags=args.text,
start_date=args.start_date, end_date=args.end_date,
strict=args.strict,
short=args.short,
starred=args.starred,
exclude=args.excluded)
journal.limit(args.limit)
# Reading mode
if not mode_compose and not mode_export and not mode_import:
print(util.py2encode(journal.pprint()))
# Various export modes
elif args.short:
print(util.py2encode(journal.pprint(short=True)))
elif args.tags:
print(util.py2encode(plugins.get_exporter("tags").export(journal)))
elif args.export is not False:
exporter = plugins.get_exporter(args.export)
print(exporter.export(journal, args.output))
elif args.encrypt is not False:
encrypt(journal, filename=args.encrypt)
# Not encrypting to a separate file: update config!
if not args.encrypt:
update_config(original_config, {"encrypt": True}, journal_name, force_local=True)
install.save_config(original_config)
elif args.decrypt is not False:
decrypt(journal, filename=args.decrypt)
# Not decrypting to a separate file: update config!
if not args.decrypt:
update_config(original_config, {"encrypt": False}, journal_name, force_local=True)
install.save_config(original_config)
elif args.edit:
if not config['editor']:
util.prompt("[{1}ERROR{2}: You need to specify an editor in {0} to use the --edit function.]".format(install.CONFIG_FILE_PATH, ERROR_COLOR, RESET_COLOR))
sys.exit(1)
other_entries = [e for e in old_entries if e not in journal.entries]
# Edit
old_num_entries = len(journal)
edited = util.get_text_from_editor(config, journal.editable_str())
journal.parse_editable_str(edited)
num_deleted = old_num_entries - len(journal)
num_edited = len([e for e in journal.entries if e.modified])
prompts = []
if num_deleted:
prompts.append("{0} {1} deleted".format(num_deleted, "entry" if num_deleted == 1 else "entries"))
if num_edited:
prompts.append("{0} {1} modified".format(num_edited, "entry" if num_deleted == 1 else "entries"))
if prompts:
util.prompt("[{0}]".format(", ".join(prompts).capitalize()))
journal.entries += other_entries
journal.sort()
journal.write()
|
35,650 |
def wide_resnet101_2(weights: Optional[WideResNet101_2Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = WideResNet101_2Weights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = WideResNet101_2Weights.verify(weights)
kwargs["width_per_group"] = 64 * 2
return _resnet(BasicBlock, [3, 4, 23, 3], weights, progress, **kwargs)
|
def wide_resnet101_2(weights: Optional[WideResNet101_2Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = WideResNet101_2Weights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = WideResNet101_2Weights.verify(weights)
kwargs["width_per_group"] = 64 * 2
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
|
42,893 |
def about():
"""About box for Strawberry Fields.
Prints the installed version numbers for SF and its dependencies,
and some system info. Please include this information in bug reports.
"""
import sys
import platform
import os
import numpy
import scipy
# a QuTiP-style infobox
print('\nStrawberry Fields: a Python library for continuous variable quantum circuits.')
print('Copyright 2019 Xanadu Quantum Technologies Inc.\n')
print('Python version: {}.{}.{}'.format(*sys.version_info[0:3]))
print('Platform info: {}'.format(platform.platform()))
print('Installation path: {}'.format(os.path.dirname(__file__)))
print('Strawberry Fields version: {}'.format(__version__))
print('Numpy version: {}'.format(numpy.__version__))
print('Scipy version: {}'.format(scipy.__version__))
try:
import tensorflow
tf_version = tensorflow.__version__
except ModuleNotFoundError:
tf_version = None
print('TensorFlow version: {}'.format(tf_version))
|
def about():
"""About box for Strawberry Fields.
Prints the installed version numbers for SF and its dependencies,
and some system info. Please include this information in bug reports.
"""
import sys
import platform
import os
import numpy
import scipy
# a QuTiP-style infobox
print('\nStrawberry Fields: a Python library for continuous-variable quantum circuits.')
print('Copyright 2019 Xanadu Quantum Technologies Inc.\n')
print('Python version: {}.{}.{}'.format(*sys.version_info[0:3]))
print('Platform info: {}'.format(platform.platform()))
print('Installation path: {}'.format(os.path.dirname(__file__)))
print('Strawberry Fields version: {}'.format(__version__))
print('Numpy version: {}'.format(numpy.__version__))
print('Scipy version: {}'.format(scipy.__version__))
try:
import tensorflow
tf_version = tensorflow.__version__
except ModuleNotFoundError:
tf_version = None
print('TensorFlow version: {}'.format(tf_version))
|
36,197 |
def generate_callout_field():
value = ' '.join(fake.words(nb=10))
return generate_field('callout', value)
|
def generate_callout_field():
value = Faker('sentence', nb_words=10)
return generate_field('callout', value)
|
22,768 |
def generate_csr(privkey: util.Key, names: Set[str], path: str,
must_staple: bool = False, strict_permissions: bool = True) -> util.CSR:
"""Initialize a CSR with the given private key.
:param privkey: Key to include in the CSR
:type privkey: :class:`certbot.util.Key`
:param set names: `str` names to include in the CSR
:param str path: Certificate save directory.
:param boolean must_staple: If true, include the TLS Feature extension "OCSP Must Staple"
:param boolean strict_permissions: If true, the CSR file will be saved with strict
permissions (POSIX mode 0600).
:returns: CSR
:rtype: :class:`certbot.util.CSR`
"""
csr_pem = acme_crypto_util.make_csr(
privkey.pem, names, must_staple=must_staple)
# Save CSR
util.make_or_verify_dir(path, 0o755, strict_permissions)
csr_f, csr_filename = util.unique_file(
os.path.join(path, "csr-certbot.pem"), 0o644, "wb")
with csr_f:
csr_f.write(csr_pem)
logger.debug("Creating CSR: %s", csr_filename)
return util.CSR(csr_filename, csr_pem, "pem")
|
def generate_csr(privkey: util.Key, names: Set[str], path: str,
must_staple: bool = False, strict_permissions: bool = True) -> util.CSR:
"""Initialize a CSR with the given private key.
:param privkey: Key to include in the CSR
:type privkey: :class:`certbot.util.Key`
:param set names: `str` names to include in the CSR
:param str path: Certificate save directory.
:param boolean must_staple: If true, include the TLS Feature extension "OCSP Must Staple"
:param bool strict_permissions: If true, the CSR file will be saved with strict
permissions (POSIX mode 0600).
:returns: CSR
:rtype: :class:`certbot.util.CSR`
"""
csr_pem = acme_crypto_util.make_csr(
privkey.pem, names, must_staple=must_staple)
# Save CSR
util.make_or_verify_dir(path, 0o755, strict_permissions)
csr_f, csr_filename = util.unique_file(
os.path.join(path, "csr-certbot.pem"), 0o644, "wb")
with csr_f:
csr_f.write(csr_pem)
logger.debug("Creating CSR: %s", csr_filename)
return util.CSR(csr_filename, csr_pem, "pem")
|
29,677 |
def load_cluster_dump(url: str):
if url.endswith(".msgpack.gz"):
mode = "rb"
reader = msgpack.unpack
elif url.endswith(".yaml"):
import yaml
mode = "r"
reader = yaml.safe_load
else:
raise ValueError(f"url ({url}) must have a .msgpack.gz or .yaml suffix")
with fsspec.open(url, mode, compression="infer") as f:
return reader(f)
|
def load_cluster_dump(url: str) -> dict:
if url.endswith(".msgpack.gz"):
mode = "rb"
reader = msgpack.unpack
elif url.endswith(".yaml"):
import yaml
mode = "r"
reader = yaml.safe_load
else:
raise ValueError(f"url ({url}) must have a .msgpack.gz or .yaml suffix")
with fsspec.open(url, mode, compression="infer") as f:
return reader(f)
|
15,254 |
def _get_events(
hass,
start_day,
end_day,
entity_id=None,
filters=None,
entities_filter=None,
entity_matches_only=None,
):
"""Get events for a period of time."""
entity_attr_cache = EntityAttributeCache(hass)
context_lookup = {None: None}
def yield_events(query):
"""Yield Events that are not filtered away."""
for row in query.yield_per(1000):
event = LazyEventPartialState(row)
context_lookup.setdefault(event.context_id, event)
if _keep_event(hass, event, entities_filter):
yield event
with session_scope(hass=hass) as session:
if entity_id is not None:
entity_ids = [entity_id.lower()]
entities_filter = generate_filter([], entity_ids, [], [])
apply_sql_entities_filter = False
else:
entity_ids = None
apply_sql_entities_filter = True
old_state = aliased(States, name="old_state")
query = (
session.query(
Events.event_type,
Events.event_data,
Events.time_fired,
Events.context_id,
Events.context_user_id,
States.state,
States.entity_id,
States.domain,
States.attributes,
)
.order_by(Events.time_fired)
.outerjoin(States, (Events.event_id == States.event_id))
.outerjoin(old_state, (States.old_state_id == old_state.state_id))
# The below filter, removes state change events that do not have
# and old_state, new_state, or the old and
# new state.
#
.filter(
(Events.event_type != EVENT_STATE_CHANGED)
| (
(States.state_id.isnot(None))
& (old_state.state_id.isnot(None))
& (States.state.isnot(None))
& (States.state != old_state.state)
)
)
#
# Prefilter out continuous domains that have
# ATTR_UNIT_OF_MEASUREMENT as its much faster in sql.
#
.filter(
(Events.event_type != EVENT_STATE_CHANGED)
| sqlalchemy.not_(States.domain.in_(CONTINUOUS_DOMAINS))
| sqlalchemy.not_(States.attributes.contains(UNIT_OF_MEASUREMENT_JSON))
)
.filter(
Events.event_type.in_(ALL_EVENT_TYPES + list(hass.data.get(DOMAIN, {})))
)
.filter((Events.time_fired > start_day) & (Events.time_fired < end_day))
)
if entity_matches_only and entity_ids and len(entity_ids) == 1:
# When entity_matches_only is provided, contexts and events that do not
# contain the entity_id are not included in the logbook response.
entity_id = entity_ids[0]
entity_id_json = ENTITY_ID_JSON_TEMPLATE.format(entity_id)
query = query.filter(
(
(States.last_updated == States.last_changed)
& States.entity_id.in_(entity_ids)
)
| (
States.state_id.is_(None)
& Events.event_data.contains(entity_id_json)
)
)
elif entity_ids:
query = query.filter(
(
(States.last_updated == States.last_changed)
& States.entity_id.in_(entity_ids)
)
| (States.state_id.is_(None))
)
else:
query = query.filter(
(States.last_updated == States.last_changed)
| (States.state_id.is_(None))
)
if apply_sql_entities_filter and filters:
entity_filter = filters.entity_filter()
if entity_filter is not None:
query = query.filter(
entity_filter | (Events.event_type != EVENT_STATE_CHANGED)
)
return list(
humanify(hass, yield_events(query), entity_attr_cache, context_lookup)
)
|
def _get_events(
hass,
start_day,
end_day,
entity_id=None,
filters=None,
entities_filter=None,
entity_matches_only=False,
):
"""Get events for a period of time."""
entity_attr_cache = EntityAttributeCache(hass)
context_lookup = {None: None}
def yield_events(query):
"""Yield Events that are not filtered away."""
for row in query.yield_per(1000):
event = LazyEventPartialState(row)
context_lookup.setdefault(event.context_id, event)
if _keep_event(hass, event, entities_filter):
yield event
with session_scope(hass=hass) as session:
if entity_id is not None:
entity_ids = [entity_id.lower()]
entities_filter = generate_filter([], entity_ids, [], [])
apply_sql_entities_filter = False
else:
entity_ids = None
apply_sql_entities_filter = True
old_state = aliased(States, name="old_state")
query = (
session.query(
Events.event_type,
Events.event_data,
Events.time_fired,
Events.context_id,
Events.context_user_id,
States.state,
States.entity_id,
States.domain,
States.attributes,
)
.order_by(Events.time_fired)
.outerjoin(States, (Events.event_id == States.event_id))
.outerjoin(old_state, (States.old_state_id == old_state.state_id))
# The below filter, removes state change events that do not have
# and old_state, new_state, or the old and
# new state.
#
.filter(
(Events.event_type != EVENT_STATE_CHANGED)
| (
(States.state_id.isnot(None))
& (old_state.state_id.isnot(None))
& (States.state.isnot(None))
& (States.state != old_state.state)
)
)
#
# Prefilter out continuous domains that have
# ATTR_UNIT_OF_MEASUREMENT as its much faster in sql.
#
.filter(
(Events.event_type != EVENT_STATE_CHANGED)
| sqlalchemy.not_(States.domain.in_(CONTINUOUS_DOMAINS))
| sqlalchemy.not_(States.attributes.contains(UNIT_OF_MEASUREMENT_JSON))
)
.filter(
Events.event_type.in_(ALL_EVENT_TYPES + list(hass.data.get(DOMAIN, {})))
)
.filter((Events.time_fired > start_day) & (Events.time_fired < end_day))
)
if entity_matches_only and entity_ids and len(entity_ids) == 1:
# When entity_matches_only is provided, contexts and events that do not
# contain the entity_id are not included in the logbook response.
entity_id = entity_ids[0]
entity_id_json = ENTITY_ID_JSON_TEMPLATE.format(entity_id)
query = query.filter(
(
(States.last_updated == States.last_changed)
& States.entity_id.in_(entity_ids)
)
| (
States.state_id.is_(None)
& Events.event_data.contains(entity_id_json)
)
)
elif entity_ids:
query = query.filter(
(
(States.last_updated == States.last_changed)
& States.entity_id.in_(entity_ids)
)
| (States.state_id.is_(None))
)
else:
query = query.filter(
(States.last_updated == States.last_changed)
| (States.state_id.is_(None))
)
if apply_sql_entities_filter and filters:
entity_filter = filters.entity_filter()
if entity_filter is not None:
query = query.filter(
entity_filter | (Events.event_type != EVENT_STATE_CHANGED)
)
return list(
humanify(hass, yield_events(query), entity_attr_cache, context_lookup)
)
|
48,174 |
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get("dependencies", []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params["generator_flags"]["output_dir"]
except KeyError:
filepath = "."
filename = os.path.join(filepath, "dump.json")
with open(filename, "w") as f:
json.dump(edges, f)
print("Wrote json to %s." % filename)
|
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get("dependencies", []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params["generator_flags"]["output_dir"]
except KeyError:
filepath = "."
filename = os.path.join(filepath, "dump.json")
with open(filename, "w") as out_file:
json.dump(edges, out_file)
print("Wrote json to %s." % filename)
|
33,735 |
def push_readmes_and_tags():
if not _merge_build():
print("Not pushing because this is a PR build.")
return
username, password = _get_docker_creds()
environment = {"DOCKER_USE": username, "DOCKER_PASS": password}
for image, tag_line in DOCKER_HUB_DESCRIPTION.items():
cmd_string = (f"--file /myvol/docker/{image}/README.md "
f"--short {tag_line} --debug rayproject/{image}")
DOCKER_CLIENT.containers.run(
"chko/docker-pushrm:1",
command=cmd_string,
mounts=[
docker.types.Mount(target="/myvol/", source=_get_root_dir())
],
environment=environment,
remove=True,
tty=True)
|
def push_readmes_and_tags():
if not _merge_build():
print("Not pushing readme because this is a PR build.")
return
username, password = _get_docker_creds()
environment = {"DOCKER_USE": username, "DOCKER_PASS": password}
for image, tag_line in DOCKER_HUB_DESCRIPTION.items():
cmd_string = (f"--file /myvol/docker/{image}/README.md "
f"--short {tag_line} --debug rayproject/{image}")
DOCKER_CLIENT.containers.run(
"chko/docker-pushrm:1",
command=cmd_string,
mounts=[
docker.types.Mount(target="/myvol/", source=_get_root_dir())
],
environment=environment,
remove=True,
tty=True)
|
27,989 |
def main(args):
"""
Entry point for the command handling automatic fixes.
TODO: Currently clang-tidy is the only tool which supports the dumping of
fixit replacements. In this script we assume that the replacement dump
.yaml files are in the format so clang-apply-replacement Clang tool can
consume them.
"""
logger.setup_logger(args.verbose if 'verbose' in args else None)
context = analyzer_context.get_context()
if not context.replacer_binary:
LOG.warning("clang-apply-replacements tool is not found")
return
if 'list' in args:
list_fixits(args.input, args.checker_name, args.file)
else:
apply_fixits(args.input, args.checker_name, args.file)
|
def main(args):
"""
Entry point for the command handling automatic fixes.
TODO: Currently clang-tidy is the only tool which supports the dumping of
fixit replacements. In this script we assume that the replacement dump
.yaml files are in the format so clang-apply-replacement Clang tool can
consume them.
"""
logger.setup_logger(args.verbose if 'verbose' in args else None)
context = analyzer_context.get_context()
if not context.replacer_binary:
LOG.warning("clang-apply-replacements tool is not found")
LOG.error("clang-apply-replacements tool is not found")
if 'list' in args:
list_fixits(args.input, args.checker_name, args.file)
else:
apply_fixits(args.input, args.checker_name, args.file)
|
48,865 |
def replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:
"""
Replaces extra requirement with provider package. The intention here is that when
the provider is added as dependency of extra, there is no need to add the dependencies
separately. This is not needed and even harmful, because in case of future versions of
the provider, the requirements might change, so hard-coding requirements from the version
that was available at the release time might cause dependency conflicts in the future.
Say for example that you have salesforce provider with those deps:
{ 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }
Initially ['salesforce'] extra has those requirements and it works like that when you install
it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when
the production installation is used, The dependencies are changed:
{ 'salesforce': ['apache-airflow-providers-salesforce'] }
And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:
['simple-salesforce>=1.0.0', 'tableauserverclient']
So transitively 'salesforce' extra has all the requirements it needs and in case the provider
changes its dependencies, they will transitively change as well.
In the constraint mechanism we save both - provider versions and it's dependencies
version, which means that installation using constraints is repeatable.
For K8s, Celery and Dask which are both "Core executors" and "Providers" we have to
add the base dependencies to the core as well - in order to mitigate problems where
newer version of provider will have less strict limits. This should be done for both:
extras and their deprecated aliases. This is not a full protection however, the way
extras work, this will not add "hard" limits for airflow and the user who does not use
constraints
:param extra: Name of the extra to add providers to
:param providers: list of provider ids
"""
if extra in ['cncf.kubernetes', 'kubernetes', 'celery']:
EXTRAS_REQUIREMENTS[extra].extend(
[get_provider_package_from_package_id(package_name) for package_name in providers]
)
else:
EXTRAS_REQUIREMENTS[extra] = [
get_provider_package_from_package_id(package_name) for package_name in providers
]
|
def replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:
"""
Replaces extra requirement with provider package. The intention here is that when
the provider is added as dependency of extra, there is no need to add the dependencies
separately. This is not needed and even harmful, because in case of future versions of
the provider, the requirements might change, so hard-coding requirements from the version
that was available at the release time might cause dependency conflicts in the future.
Say for example that you have salesforce provider with those deps:
{ 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }
Initially ['salesforce'] extra has those requirements and it works like that when you install
it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when
the production installation is used, The dependencies are changed:
{ 'salesforce': ['apache-airflow-providers-salesforce'] }
And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:
['simple-salesforce>=1.0.0', 'tableauserverclient']
So transitively 'salesforce' extra has all the requirements it needs and in case the provider
changes its dependencies, they will transitively change as well.
In the constraint mechanism we save both - provider versions and it's dependencies
version, which means that installation using constraints is repeatable.
For K8s, Celery and Dask which are both "Core executors" and "Providers" we have to
add the base dependencies to the core as well - in order to mitigate problems where
newer version of provider will have less strict limits. This should be done for both:
extras and their deprecated aliases. This is not a full protection however, the way
extras work, this will not add "hard" limits for Airflow and the user who does not use
constraints.
:param extra: Name of the extra to add providers to
:param providers: list of provider ids
"""
if extra in ['cncf.kubernetes', 'kubernetes', 'celery']:
EXTRAS_REQUIREMENTS[extra].extend(
[get_provider_package_from_package_id(package_name) for package_name in providers]
)
else:
EXTRAS_REQUIREMENTS[extra] = [
get_provider_package_from_package_id(package_name) for package_name in providers
]
|
32,560 |
def main():
args = demisto.args()
entry_id = args.get("entryid")
if isinstance(entry_id, list):
entry_id = entry_id[0]
dictlist = demisto.executeCommand("getEntry", {"id": entry_id})[0]["Contents"]
csv_final = json_to_csv(dictlist)
if "filename" in demisto.args():
# Send CSV as file in War Room
demisto.results(fileResult(args.get("filename"), csv_final))
else:
# Send CSV to War Room
demisto.results(csv_final)
|
def main():
args = demisto.args()
entry_id = args.get("entryid")
if isinstance(entry_id, list):
entry_id = entry_id[0]
dictlist = demisto.executeCommand("getEntry", {"id": entry_id})[0]["Contents"]
csv_final = json_to_csv(dictlist)
if "filename" in args:
# Send CSV as file in War Room
demisto.results(fileResult(args.get("filename"), csv_final))
else:
# Send CSV to War Room
demisto.results(csv_final)
|
47,878 |
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__)
parser.add_argument('--demo-build-dir', type=Path, required=True, metavar='DIR',
help='directory with demo binaries')
parser.add_argument('--test-data-dir', type=Path, required=True, metavar='DIR',
help='directory with test data')
parser.add_argument('--downloader-cache-dir', type=Path, required=True, metavar='DIR',
help='directory to use as the cache for the model downloader')
parser.add_argument('--demos', metavar='DEMO[,DEMO...]',
help='list of demos to run tests for (by default, every demo is tested)')
parser.add_argument('--mo', type=Path, metavar='MO.PY',
help='Model Optimizer entry point script')
parser.add_argument('--device_list', required=False, default="CPU, GPU",
help='List of devices to test')
parser.add_argument('--report_file', type=Path, required=False, default="demo_execution_time_report.csv",
help='Path to report file')
return parser.parse_args()
|
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__)
parser.add_argument('--demo-build-dir', type=Path, required=True, metavar='DIR',
help='directory with demo binaries')
parser.add_argument('--test-data-dir', type=Path, required=True, metavar='DIR',
help='directory with test data')
parser.add_argument('--downloader-cache-dir', type=Path, required=True, metavar='DIR',
help='directory to use as the cache for the model downloader')
parser.add_argument('--demos', metavar='DEMO[,DEMO...]',
help='list of demos to run tests for (by default, every demo is tested)')
parser.add_argument('--mo', type=Path, metavar='MO.PY',
help='Model Optimizer entry point script')
parser.add_argument('--device_list', required=False, default="CPU, GPU",
help='List of devices to test')
parser.add_argument('--report-file', type=Path,
help='Path to report file')
return parser.parse_args()
|
58,828 |
def scal(a, x):
"""Computes x *= a.
(*) x will be updated.
"""
if x.ndim != 1:
raise ValueError('x must be a 1D array (actual: {})'.format(x.ndim))
dtype = x.dtype.char
if dtype == 'f':
func = cublas.sscal
elif dtype == 'd':
func = cublas.dscal
elif dtype == 'F':
func = cublas.cscal
elif dtype == 'D':
func = cublas.zscal
else:
raise TypeError('invalid dtype')
handle = device.get_cublas_handle()
a_ptr, mode = _setup_scalar_ptr(handle, a, dtype)
func(handle, x.size, a_ptr, x.data.ptr, 1)
cublas.setPointerMode(handle, mode)
return x
|
def scal(a, x):
"""Computes x *= a.
(*) x will be updated.
"""
if x.ndim != 1:
raise ValueError('x must be a 1D array (actual: {})'.format(x.ndim))
dtype = x.dtype.char
if dtype == 'f':
func = cublas.sscal
elif dtype == 'd':
func = cublas.dscal
elif dtype == 'F':
func = cublas.cscal
elif dtype == 'D':
func = cublas.zscal
else:
raise TypeError('invalid dtype')
handle = device.get_cublas_handle()
a_ptr, orig_mode = _setup_scalar_ptr(handle, a, dtype)
func(handle, x.size, a_ptr, x.data.ptr, 1)
cublas.setPointerMode(handle, orig_mode)
return x
|
25,861 |
def to_kilograms(weight, unit):
"""
Convert the given length to kilograms.
"""
try:
if weight < 0:
raise ValueError("Weight must be a positive number")
except TypeError:
raise TypeError(f"Invalid value '{weight}' for weight (must be a number)")
valid_units = DeviceWeightUnitChoices.values()
if unit not in valid_units:
raise ValueError(f"Unknown unit {unit}. Must be one of the following: {', '.join(valid_units)}")
UNIT_KILOGRAM = 'kg'
UNIT_GRAM = 'g'
# Imperial
UNIT_POUND = 'lb'
UNIT_OUNCE = 'oz'
if unit == DeviceWeightUnitChoices.UNIT_KILOGRAM:
return weight
if unit == DeviceWeightUnitChoices.UNIT_GRAM:
return weight * 1000
if unit == DeviceWeightUnitChoices.UNIT_POUND:
return weight * Decimal(0.453592)
if unit == DeviceWeightUnitChoices.UNIT_OUNCE:
return weight * Decimal(0.0283495)
raise ValueError(f"Unknown unit {unit}. Must be 'kg', 'g', 'lb', 'oz'.")
|
def to_kilograms(weight, unit):
"""
Convert the given weight to kilograms.
"""
try:
if weight < 0:
raise ValueError("Weight must be a positive number")
except TypeError:
raise TypeError(f"Invalid value '{weight}' for weight (must be a number)")
valid_units = DeviceWeightUnitChoices.values()
if unit not in valid_units:
raise ValueError(f"Unknown unit {unit}. Must be one of the following: {', '.join(valid_units)}")
UNIT_KILOGRAM = 'kg'
UNIT_GRAM = 'g'
# Imperial
UNIT_POUND = 'lb'
UNIT_OUNCE = 'oz'
if unit == DeviceWeightUnitChoices.UNIT_KILOGRAM:
return weight
if unit == DeviceWeightUnitChoices.UNIT_GRAM:
return weight * 1000
if unit == DeviceWeightUnitChoices.UNIT_POUND:
return weight * Decimal(0.453592)
if unit == DeviceWeightUnitChoices.UNIT_OUNCE:
return weight * Decimal(0.0283495)
raise ValueError(f"Unknown unit {unit}. Must be 'kg', 'g', 'lb', 'oz'.")
|
20,297 |
def find_ldc_dmd_frontend_version(version_output: str):
version_regex = re.search(r'DMD v(\d+\.\d+\.\d+)', version_output)
if version_regex is not None and len(version_regex.groups()):
return version_regex.groups()[0]
return ''
|
def find_ldc_dmd_frontend_version(version_output: str) -> str:
version_regex = re.search(r'DMD v(\d+\.\d+\.\d+)', version_output)
if version_regex is not None and len(version_regex.groups()):
return version_regex.groups()[0]
return ''
|
29,007 |
def update_config_from_file(config: IslandConfigOptions, config_path: Path):
try:
config_from_file = load_server_config_from_file(config_path)
config.update(config_from_file)
logger.info(f"Server config updated from {config_path}")
except OSError:
logger.info(f"Server config not found in path {config_path}")
|
def update_config_from_file(config: IslandConfigOptions, config_path: Path):
try:
config_from_file = load_server_config_from_file(config_path)
config.update(config_from_file)
logger.info(f"Server config updated from {config_path}")
except OSError:
logger.error(f"Server config not found in path {config_path}")
|
7,175 |
def _get_fourier_filter(size, filter_name):
"""Construct the Fourier filter
This computation lessens artifacts and removes a small bias as
explained in [1], Chap 3. Equation 61
Parameters
----------
size: int
filter size.
filter_name: str, optional
Filter used in frequency domain filtering. Ramp filter used by
default. Filters available: ramp, shepp-logan, cosine,
hamming, hann. Assign None to use no filter.
Returns
-------
fourier_filter: ndarray
The computed Fourier filter.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int),
np.arange(size / 2 - 1, 0, -2, dtype=np.int)))
f = np.zeros(size)
f[0] = 0.25
f[1::2] = -1 / (np.pi * n) ** 2
# Computing the ramp filter from the fourier transform of its
# frequency domain representation lessens artifacts and removes a
# small bias as explained in [1], Chap 3. Equation 61
fourier_filter = 2 * np.real(fft(f)) # ramp filter
if filter_name == "ramp":
pass
elif filter_name == "shepp-logan":
# Start from first element to avoid divide by zero
omega = np.pi * fftmodule.fftfreq(size)[1:]
fourier_filter[1:] *= np.sin(omega) / omega
elif filter_name == "cosine":
freq = np.pi * np.linspace(0, 1, size, endpoint=False)
cosine_filter = fftmodule.fftshift(np.sin(freq))
fourier_filter *= cosine_filter
elif filter_name == "hamming":
fourier_filter *= fftmodule.fftshift(np.hamming(size))
elif filter_name == "hann":
fourier_filter *= fftmodule.fftshift(np.hanning(size))
elif filter_name is None:
fourier_filter[:] = 1
return fourier_filter[:, np.newaxis]
|
def _get_fourier_filter(size, filter_name):
"""Construct the Fourier filter
This computation lessens artifacts and removes a small bias as
explained in [1], Chap 3. Equation 61.
Parameters
----------
size: int
filter size.
filter_name: str, optional
Filter used in frequency domain filtering. Ramp filter used by
default. Filters available: ramp, shepp-logan, cosine,
hamming, hann. Assign None to use no filter.
Returns
-------
fourier_filter: ndarray
The computed Fourier filter.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int),
np.arange(size / 2 - 1, 0, -2, dtype=np.int)))
f = np.zeros(size)
f[0] = 0.25
f[1::2] = -1 / (np.pi * n) ** 2
# Computing the ramp filter from the fourier transform of its
# frequency domain representation lessens artifacts and removes a
# small bias as explained in [1], Chap 3. Equation 61
fourier_filter = 2 * np.real(fft(f)) # ramp filter
if filter_name == "ramp":
pass
elif filter_name == "shepp-logan":
# Start from first element to avoid divide by zero
omega = np.pi * fftmodule.fftfreq(size)[1:]
fourier_filter[1:] *= np.sin(omega) / omega
elif filter_name == "cosine":
freq = np.pi * np.linspace(0, 1, size, endpoint=False)
cosine_filter = fftmodule.fftshift(np.sin(freq))
fourier_filter *= cosine_filter
elif filter_name == "hamming":
fourier_filter *= fftmodule.fftshift(np.hamming(size))
elif filter_name == "hann":
fourier_filter *= fftmodule.fftshift(np.hanning(size))
elif filter_name is None:
fourier_filter[:] = 1
return fourier_filter[:, np.newaxis]
|
32,622 |
def scan_start(client: Client, args: dict) -> CommandResults:
"""
quttera-scan-start command: initiates scan of the provided URL or domain.
:type client: ``Client`e
:param Client: Qutter API client
:type args: ``Dict[str, Any]``
:param args: all command arguments from ``demisto.args()``
:return: ``CommandResults`` object contain the start scan response
:rtype: ``CommandResults``
"""
domain = args['domain']
if not domain:
raise ValueError('domain is missing')
start = client._api_request(domain=domain, request_type="POST", operation="scan")
demisto.debug(f"Start output {start}")
readable_output = f'Started scan of {domain}'
if start.get('errorstr') != 'success':
readable_output = f'Failed to scan domain {domain}'
return CommandResults(
outputs_prefix="QutteraWebsiteMalwareScanning.Start",
outputs_key_field="error",
outputs=start
)
|
def scan_start(client: Client, args: dict) -> CommandResults:
"""
quttera-scan-start command: initiates scan of the provided URL or domain.
:type client: ``Client`e
:param Client: Qutter API client
:type args: ``Dict[str, Any]``
:param args: all command arguments from ``demisto.args()``
:return: ``CommandResults`` object contain the start scan response
:rtype: ``CommandResults``
"""
domain = args.get('domain')
if not domain:
raise ValueError('domain is missing')
start = client._api_request(domain=domain, request_type="POST", operation="scan")
demisto.debug(f"Start output {start}")
readable_output = f'Started scan of {domain}'
if start.get('errorstr') != 'success':
readable_output = f'Failed to scan domain {domain}'
return CommandResults(
outputs_prefix="QutteraWebsiteMalwareScanning.Start",
outputs_key_field="error",
outputs=start
)
|
32,402 |
def validate_input(args):
"""
Check if the input params for the command are valid. Return an error if any
:param args: dictionary of input params
"""
try:
# we assume all the params to be non-empty, as cortex ensures it
if args.get('limit') and int(args.get('limit', '1')) <= 0:
raise ValueError(f"Limit should be positive, limit: {args.get('limit')}")
try:
if args.get('begin', None):
_start_date = parser.parse(args.get('begin')).replace(tzinfo=pytz.UTC)
if args.get('end', None):
_end_date = parser.parse(args.get('end')).replace(tzinfo=pytz.UTC)
except Exception as e:
raise ValueError("Invalid date format received")
if args.get('begin', None) and _start_date > datetime.now(timezone.utc):
raise ValueError(f"Start date must be a date before or equal to current")
if args.get('end', None) and _end_date > datetime.now(timezone.utc):
raise ValueError(f"End date must be a date before or equal to current")
if args.get('begin', None) and args.get('end', None) and _start_date > _end_date:
raise ValueError(f"Start date cannot be after end date")
if not args.get('collection', False):
raise ValueError(f"Collection Name should be provided: {arg_to_number(args.get('collection', None))}")
return None
except Exception as e:
demisto.error("Exception with validating inputs [{}]".format(e))
raise e
|
def validate_input(args: Dict[str, Any]):
"""
Check if the input params for the command are valid. Return an error if any
:param args: dictionary of input params
"""
try:
# we assume all the params to be non-empty, as cortex ensures it
if args.get('limit') and int(args.get('limit', '1')) <= 0:
raise ValueError(f"Limit should be positive, limit: {args.get('limit')}")
try:
if args.get('begin', None):
_start_date = parser.parse(args.get('begin')).replace(tzinfo=pytz.UTC)
if args.get('end', None):
_end_date = parser.parse(args.get('end')).replace(tzinfo=pytz.UTC)
except Exception as e:
raise ValueError("Invalid date format received")
if args.get('begin', None) and _start_date > datetime.now(timezone.utc):
raise ValueError(f"Start date must be a date before or equal to current")
if args.get('end', None) and _end_date > datetime.now(timezone.utc):
raise ValueError(f"End date must be a date before or equal to current")
if args.get('begin', None) and args.get('end', None) and _start_date > _end_date:
raise ValueError(f"Start date cannot be after end date")
if not args.get('collection', False):
raise ValueError(f"Collection Name should be provided: {arg_to_number(args.get('collection', None))}")
return None
except Exception as e:
demisto.error("Exception with validating inputs [{}]".format(e))
raise e
|
25,722 |
def _assert_shapes(
func: C,
print_specs: Sequence[ParsedArgumentSpec],
check_specs: Sequence[ParsedArgumentSpec],
arg_map: Mapping[str, Any],
context: Dict[str, Union[int, List[Optional[int]]]],
) -> None:
def _assert(condition: bool) -> None:
if not condition:
raise ShapeMismatchError(func, print_specs, arg_map)
for arg_spec in check_specs:
actual_shape = arg_spec.argument_ref.get(func, arg_map).shape
if isinstance(actual_shape, tf.TensorShape) and actual_shape.rank is None:
continue
actual = list(actual_shape)
actual_len = len(actual)
actual_i = 0
expected = arg_spec.shape.dims
expected_len = len(expected)
n_variable_rank = sum(dim_spec.variable_rank for dim_spec in expected)
assert n_variable_rank <= 1
if n_variable_rank == 0:
_assert(expected_len == actual_len)
else:
_assert(expected_len - n_variable_rank <= actual_len)
for dim_spec in expected:
if dim_spec.variable_rank:
assert dim_spec.variable_name is not None
expected_name = dim_spec.variable_name
variable_rank_len = actual_len - (expected_len - n_variable_rank)
actual_dims = actual[actual_i : actual_i + variable_rank_len]
actual_i += variable_rank_len
expected_dims = context.get(expected_name)
if expected_dims is None:
expected_dims = cast(List[Optional[int]], variable_rank_len * [None])
context[expected_name] = expected_dims
assert isinstance(expected_dims, list)
_assert(len(expected_dims) == len(actual_dims))
for i, actual_dim in enumerate(actual_dims):
if actual_dim is None:
continue
if expected_dims[i] is None:
expected_dims[i] = actual_dim
else:
_assert(expected_dims[i] == actual_dim)
else:
actual_dim = actual[actual_i]
if actual_dim is not None:
if dim_spec.constant is not None:
_assert(dim_spec.constant == actual_dim)
else:
assert dim_spec.variable_name is not None
expected_dim = context.setdefault(dim_spec.variable_name, actual_dim)
_assert(expected_dim == actual_dim)
actual_i += 1
|
def _assert_shapes(
func: C,
print_specs: Sequence[ParsedArgumentSpec],
check_specs: Sequence[ParsedArgumentSpec],
arg_map: Mapping[str, Any],
context: Dict[str, Union[int, List[Optional[int]]]],
) -> None:
def _assert(condition: bool) -> None:
if not condition:
raise ShapeMismatchError(func, print_specs, arg_map)
for arg_spec in check_specs:
actual_shape = arg_spec.argument_ref.get(func, arg_map).shape
if isinstance(actual_shape, tf.TensorShape) and actual_shape.rank is None:
continue
actual = list(actual_shape)
actual_len = len(actual)
actual_i = 0
expected = arg_spec.shape.dims
expected_len = len(expected)
n_variable_rank = sum(dim_spec.variable_rank for dim_spec in expected)
assert n_variable_rank <= 1
if n_variable_rank == 0:
_assert(expected_len == actual_len)
else:
_assert(expected_len - n_variable_rank <= actual_len)
for dim_spec in expected:
if dim_spec.variable_rank:
assert dim_spec.variable_name is not None, "??"
expected_name = dim_spec.variable_name
variable_rank_len = actual_len - (expected_len - n_variable_rank)
actual_dims = actual[actual_i : actual_i + variable_rank_len]
actual_i += variable_rank_len
expected_dims = context.get(expected_name)
if expected_dims is None:
expected_dims = cast(List[Optional[int]], variable_rank_len * [None])
context[expected_name] = expected_dims
assert isinstance(expected_dims, list)
_assert(len(expected_dims) == len(actual_dims))
for i, actual_dim in enumerate(actual_dims):
if actual_dim is None:
continue
if expected_dims[i] is None:
expected_dims[i] = actual_dim
else:
_assert(expected_dims[i] == actual_dim)
else:
actual_dim = actual[actual_i]
if actual_dim is not None:
if dim_spec.constant is not None:
_assert(dim_spec.constant == actual_dim)
else:
assert dim_spec.variable_name is not None
expected_dim = context.setdefault(dim_spec.variable_name, actual_dim)
_assert(expected_dim == actual_dim)
actual_i += 1
|
36,517 |
def build_c_generator(
grammar: Grammar,
grammar_file: str,
tokens_file: str,
output_file: str,
compile_extension: bool = False,
verbose_c_extension: bool = False,
keep_asserts_in_extension: bool = True,
skip_actions: bool = False,
) -> ParserGenerator:
with open(tokens_file, "r") as tok_file:
exact_tok, non_exac_tok = generate_token_definitions(tok_file)
with open(output_file, "w") as file:
gen: ParserGenerator = CParserGenerator(
grammar, exact_tok, non_exac_tok, file, skip_actions=skip_actions
)
gen.generate(grammar_file)
if compile_extension:
compile_c_extension(
output_file, verbose=verbose_c_extension, keep_asserts=keep_asserts_in_extension
)
return gen
|
def build_c_generator(
grammar: Grammar,
grammar_file: str,
tokens_file: str,
output_file: str,
compile_extension: bool = False,
verbose_c_extension: bool = False,
keep_asserts_in_extension: bool = True,
skip_actions: bool = False,
) -> ParserGenerator:
with open(tokens_file, "r") as tok_file:
exact_tok, non_exac_tok = generate_token_definitions(tok_file)
with open(output_file, "w") as file:
gen: ParserGenerator = CParserGenerator(
grammar, exact_tok, non_exact_tok, file, skip_actions=skip_actions
)
gen.generate(grammar_file)
if compile_extension:
compile_c_extension(
output_file, verbose=verbose_c_extension, keep_asserts=keep_asserts_in_extension
)
return gen
|
56,413 |
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('products', sa.Column('confidentiality', sa.String(), server_default='Confidential classified', nullable=False))
# ### end Alembic commands ###
|
def upgrade():
op.add_column(
'products',
sa.Column(
'confidentiality',
sa.String(),
server_default='Confidential classified',
nullable=False))
|
32,441 |
def compare_tables(left: list, right: list, index_key: str = "id", ignore_keys: list = None, table_id="compare",
**kwargs) -> ScriptResult:
"""
Given two tables, compare by keys to look for difference and return the result.
:param left: Left table
:param right: Right table
:param index_key: Key to use as index
:param ignore_keys: Keys in table dictionary to ignore in comparison
:param table_id: The string identifier for the table - appears in output
:param kwargs: Keyword args !no-auto-argument
"""
differences = []
if ignore_keys is str:
ignore_keys = [ignore_keys]
left = remove_dict_keys(left, ignore_keys)
right = remove_dict_keys(right, ignore_keys)
pairs = zip(left, right)
# Calculate objects missing in right table
for left_object in left:
left_value = left_object.get(index_key)
right_object = next((item for item in right if item.get(index_key) == left_object.get(index_key)),
None)
if not right_object:
differences.append(Difference(
key=index_key,
value=left_value,
description=f"{left_value} missing.",
table_id=table_id
))
# Calculate differences where index_key value exists in both
dict_differences = [(x, y) for x, y in pairs if x != y and x.get(index_key) == y.get(index_key)]
for left_dict, right_dict in dict_differences:
dict_differences = compare_two_dicts(left_dict, right_dict)
s = ", ".join([f"{x} different to {y}" for (x, y) in dict_differences])
differences.append(Difference(
key=index_key,
value=left_dict.get(index_key),
description=f"{s}",
table_id=table_id
))
return ScriptResult(
differences=differences
)
|
def compare_tables(left: list, right: list, index_key: str = "id", ignore_keys: list = None, table_id="compare",
**kwargs) -> ScriptResult:
"""
Given two tables, compare by keys to look for difference and return the result.
:param left: Left table
:param right: Right table
:param index_key: Key to use as index
:param ignore_keys: Keys in table dictionary to ignore in comparison
:param table_id: The string identifier for the table - appears in output
:param kwargs: Keyword args !no-auto-argument
"""
differences = []
if isinstance(ignore_keys, str):
ignore_keys = [ignore_keys]
left = remove_dict_keys(left, ignore_keys)
right = remove_dict_keys(right, ignore_keys)
pairs = zip(left, right)
# Calculate objects missing in right table
for left_object in left:
left_value = left_object.get(index_key)
right_object = next((item for item in right if item.get(index_key) == left_object.get(index_key)),
None)
if not right_object:
differences.append(Difference(
key=index_key,
value=left_value,
description=f"{left_value} missing.",
table_id=table_id
))
# Calculate differences where index_key value exists in both
dict_differences = [(x, y) for x, y in pairs if x != y and x.get(index_key) == y.get(index_key)]
for left_dict, right_dict in dict_differences:
dict_differences = compare_two_dicts(left_dict, right_dict)
s = ", ".join([f"{x} different to {y}" for (x, y) in dict_differences])
differences.append(Difference(
key=index_key,
value=left_dict.get(index_key),
description=f"{s}",
table_id=table_id
))
return ScriptResult(
differences=differences
)
|
43,631 |
def convert_observable(qubit_observable):
r"""Converts OpenFermion :class:`~.QubitOperator` operator to a Pennylane VQE observable
**Example usage**
>>> h_of = decompose_hamiltonian('h2', './pyscf/sto-3g/')
>>> h_pl = convert_observable(h_of)
>>> h_pl.coeffs
[-0.04207898+0.j 0.17771287+0.j 0.17771287+0.j -0.2427428 +0.j -0.2427428 +0.j 0.17059738+0.j
0.04475014+0.j 0.04475014+0.j 0.04475014+0.j 0.04475014+0.j 0.12293305+0.j 0.16768319+0.j
0.16768319+0.j 0.12293305+0.j 0.17627641+0.j]
Args:
qubit_observable (QubitOperator): Observable represented as an OpenFermion `QubitOperator`
Returns:
(pennylane.Hamiltonian): Pennylane VQE observable
"""
return Hamiltonian(*_qubit_operator_to_terms(qubit_observable))
|
def convert_observable(qubit_observable):
r"""Converts a OpenFermion :class:`~.QubitOperator` operator to a Pennylane VQE observable
**Example usage**
>>> h_of = decompose_hamiltonian('h2', './pyscf/sto-3g/')
>>> h_pl = convert_observable(h_of)
>>> h_pl.coeffs
[-0.04207898+0.j 0.17771287+0.j 0.17771287+0.j -0.2427428 +0.j -0.2427428 +0.j 0.17059738+0.j
0.04475014+0.j 0.04475014+0.j 0.04475014+0.j 0.04475014+0.j 0.12293305+0.j 0.16768319+0.j
0.16768319+0.j 0.12293305+0.j 0.17627641+0.j]
Args:
qubit_observable (QubitOperator): Observable represented as an OpenFermion `QubitOperator`
Returns:
(pennylane.Hamiltonian): Pennylane VQE observable
"""
return Hamiltonian(*_qubit_operator_to_terms(qubit_observable))
|
56,044 |
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
# set up weights and biases if available
if is_wandb_available() and args.wandb:
import wandb
wandb.init(project=args.output_dir.split("/")[-1])
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Load dataset
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# TODO support datasets from local folders
dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir)
# Rename column names to standardized names (only "image" and "label" need to be present)
if "pixel_values" in dataset["train"].column_names:
dataset = dataset.rename_columns({"pixel_values": "image"})
if "annotation" in dataset["train"].column_names:
dataset = dataset.rename_columns({"annotation": "label"})
# If we don't have a validation split, split off a percentage of train as validation.
args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split
if isinstance(args.train_val_split, float) and args.train_val_split > 0.0:
split = dataset["train"].train_test_split(args.train_val_split)
dataset["train"] = split["train"]
dataset["validation"] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
if args.dataset_name == "scene_parse_150":
repo_id = "datasets/huggingface/label-files"
filename = "ade20k-id2label.json"
num_labels = 150
else:
repo_id = f"datasets/{args.dataset_name}"
filename = "id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename), "r"))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {v: k for k, v in id2label.items()}
num_labels = len(id2label)
# Load pretrained model and feature extractor
config = AutoConfig.from_pretrained(
args.model_name_or_path, num_labels=num_labels, id2label=id2label, label2id=label2id
)
feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path)
model = AutoModelForSemanticSegmentation.from_pretrained(
args.model_name_or_path,
config=config,
)
# Preprocessing the datasets
# Define torchvision transforms to be applied to each image + target.
# Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9
# Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py
_train_transforms = Compose(
[
ReduceLabels() if args.reduce_labels else Identity(),
RandomCrop(size=feature_extractor.size),
RandomHorizontalFlip(flip_prob=0.5),
PILToTensor(),
ConvertImageDtype(torch.float),
Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std),
]
)
# Define torchvision transform to be applied to each image.
# jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
_val_transforms = Compose(
[
ReduceLabels() if args.reduce_labels else Identity(),
Resize(size=(feature_extractor.size, feature_extractor.size)),
PILToTensor(),
ConvertImageDtype(torch.float),
Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std),
]
)
def train_transforms(example_batch):
pixel_values = []
labels = []
for image, target in zip(example_batch["image"], example_batch["label"]):
image, target = _train_transforms(image.convert("RGB"), target)
pixel_values.append(image)
labels.append(target)
encoding = dict()
encoding["pixel_values"] = torch.stack(pixel_values)
encoding["labels"] = torch.stack(labels)
return encoding
def val_transforms(example_batch):
pixel_values = []
labels = []
for image, target in zip(example_batch["image"], example_batch["label"]):
image, target = _val_transforms(image.convert("RGB"), target)
pixel_values.append(image)
labels.append(target)
encoding = dict()
encoding["pixel_values"] = torch.stack(pixel_values)
encoding["labels"] = torch.stack(labels)
return encoding
with accelerator.main_process_first():
train_dataset = dataset["train"].with_transform(train_transforms)
eval_dataset = dataset["validation"].with_transform(val_transforms)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size
)
# Optimizer
optimizer = torch.optim.AdamW(
list(model.parameters()),
lr=args.learning_rate,
betas=[args.adam_beta1, args.adam_beta2],
eps=args.adam_epsilon,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Instantiate metric
metric = load_metric("mean_iou")
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
# Log all results
if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0:
loss.detach()
if accelerator.state.num_processes > 1:
loss = accelerator.gather(loss).sum() / accelerator.num_processes
train_logs = {
"loss": loss,
"lr": torch.tensor(optimizer.param_groups[0]["lr"]),
}
# Evaluate (gather required)
with torch.no_grad():
upsampled_logits = torch.nn.functional.interpolate(
outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False
)
predictions = upsampled_logits.argmax(dim=1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
train_metrics = metric.compute(
num_labels=len(id2label),
ignore_index=255,
reduce_labels=False, # we've already reduced the labels before
)
train_logs["mean_iou"] = train_metrics["mean_iou"]
train_logs["mean_accuracy"] = train_metrics["mean_accuracy"]
train_logs["overall_accuracy"] = train_metrics["overall_accuracy"]
log_str = ""
for k, v in train_logs.items():
if isinstance(v, torch.Tensor):
log_str += "| {}: {:.3e}".format(k, v.item())
else:
log_str += "| {}: {:.3e}".format(k, v)
if accelerator.is_local_main_process:
progress_bar.write(log_str)
if is_wandb_available() and args.wandb:
wandb.log(train_logs)
# Save model every `args.saving_steps` steps
if (step + 1) % (args.gradient_accumulation_steps * args.saving_steps) == 0:
if (args.push_to_hub and epoch < args.num_train_epochs - 1) or args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if (args.push_to_hub and epoch < args.num_train_epochs - 1) and accelerator.is_main_process:
repo.push_to_hub(
commit_message=f"Training in progress step {completed_steps}",
blocking=False,
auto_lfs_prune=True,
)
logger.info("***** Running evaluation *****")
model.eval()
for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)):
outputs = model(**batch)
upsampled_logits = torch.nn.functional.interpolate(
outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False
)
predictions = upsampled_logits.argmax(dim=1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metrics = metric.compute(
num_labels=len(id2label),
ignore_index=255,
reduce_labels=False, # we've already reduced the labels before
)
logger.info(f"epoch {epoch}: {eval_metrics}")
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
feature_extractor.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
feature_extractor.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
|
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment
accelerator = Accelerator(log_with="wandb") if args.wandb else Accelerator()
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
# set up weights and biases if available
if is_wandb_available() and args.wandb:
import wandb
wandb.init(project=args.output_dir.split("/")[-1])
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Load dataset
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# TODO support datasets from local folders
dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir)
# Rename column names to standardized names (only "image" and "label" need to be present)
if "pixel_values" in dataset["train"].column_names:
dataset = dataset.rename_columns({"pixel_values": "image"})
if "annotation" in dataset["train"].column_names:
dataset = dataset.rename_columns({"annotation": "label"})
# If we don't have a validation split, split off a percentage of train as validation.
args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split
if isinstance(args.train_val_split, float) and args.train_val_split > 0.0:
split = dataset["train"].train_test_split(args.train_val_split)
dataset["train"] = split["train"]
dataset["validation"] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
if args.dataset_name == "scene_parse_150":
repo_id = "datasets/huggingface/label-files"
filename = "ade20k-id2label.json"
num_labels = 150
else:
repo_id = f"datasets/{args.dataset_name}"
filename = "id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename), "r"))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {v: k for k, v in id2label.items()}
num_labels = len(id2label)
# Load pretrained model and feature extractor
config = AutoConfig.from_pretrained(
args.model_name_or_path, num_labels=num_labels, id2label=id2label, label2id=label2id
)
feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path)
model = AutoModelForSemanticSegmentation.from_pretrained(
args.model_name_or_path,
config=config,
)
# Preprocessing the datasets
# Define torchvision transforms to be applied to each image + target.
# Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9
# Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py
_train_transforms = Compose(
[
ReduceLabels() if args.reduce_labels else Identity(),
RandomCrop(size=feature_extractor.size),
RandomHorizontalFlip(flip_prob=0.5),
PILToTensor(),
ConvertImageDtype(torch.float),
Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std),
]
)
# Define torchvision transform to be applied to each image.
# jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
_val_transforms = Compose(
[
ReduceLabels() if args.reduce_labels else Identity(),
Resize(size=(feature_extractor.size, feature_extractor.size)),
PILToTensor(),
ConvertImageDtype(torch.float),
Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std),
]
)
def train_transforms(example_batch):
pixel_values = []
labels = []
for image, target in zip(example_batch["image"], example_batch["label"]):
image, target = _train_transforms(image.convert("RGB"), target)
pixel_values.append(image)
labels.append(target)
encoding = dict()
encoding["pixel_values"] = torch.stack(pixel_values)
encoding["labels"] = torch.stack(labels)
return encoding
def val_transforms(example_batch):
pixel_values = []
labels = []
for image, target in zip(example_batch["image"], example_batch["label"]):
image, target = _val_transforms(image.convert("RGB"), target)
pixel_values.append(image)
labels.append(target)
encoding = dict()
encoding["pixel_values"] = torch.stack(pixel_values)
encoding["labels"] = torch.stack(labels)
return encoding
with accelerator.main_process_first():
train_dataset = dataset["train"].with_transform(train_transforms)
eval_dataset = dataset["validation"].with_transform(val_transforms)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size
)
# Optimizer
optimizer = torch.optim.AdamW(
list(model.parameters()),
lr=args.learning_rate,
betas=[args.adam_beta1, args.adam_beta2],
eps=args.adam_epsilon,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Instantiate metric
metric = load_metric("mean_iou")
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
# Log all results
if (step + 1) % (args.gradient_accumulation_steps * args.logging_steps) == 0:
loss.detach()
if accelerator.state.num_processes > 1:
loss = accelerator.gather(loss).sum() / accelerator.num_processes
train_logs = {
"loss": loss,
"lr": torch.tensor(optimizer.param_groups[0]["lr"]),
}
# Evaluate (gather required)
with torch.no_grad():
upsampled_logits = torch.nn.functional.interpolate(
outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False
)
predictions = upsampled_logits.argmax(dim=1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
train_metrics = metric.compute(
num_labels=len(id2label),
ignore_index=255,
reduce_labels=False, # we've already reduced the labels before
)
train_logs["mean_iou"] = train_metrics["mean_iou"]
train_logs["mean_accuracy"] = train_metrics["mean_accuracy"]
train_logs["overall_accuracy"] = train_metrics["overall_accuracy"]
log_str = ""
for k, v in train_logs.items():
if isinstance(v, torch.Tensor):
log_str += "| {}: {:.3e}".format(k, v.item())
else:
log_str += "| {}: {:.3e}".format(k, v)
if accelerator.is_local_main_process:
progress_bar.write(log_str)
if is_wandb_available() and args.wandb:
wandb.log(train_logs)
# Save model every `args.saving_steps` steps
if (step + 1) % (args.gradient_accumulation_steps * args.saving_steps) == 0:
if (args.push_to_hub and epoch < args.num_train_epochs - 1) or args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if (args.push_to_hub and epoch < args.num_train_epochs - 1) and accelerator.is_main_process:
repo.push_to_hub(
commit_message=f"Training in progress step {completed_steps}",
blocking=False,
auto_lfs_prune=True,
)
logger.info("***** Running evaluation *****")
model.eval()
for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)):
outputs = model(**batch)
upsampled_logits = torch.nn.functional.interpolate(
outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False
)
predictions = upsampled_logits.argmax(dim=1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metrics = metric.compute(
num_labels=len(id2label),
ignore_index=255,
reduce_labels=False, # we've already reduced the labels before
)
logger.info(f"epoch {epoch}: {eval_metrics}")
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
feature_extractor.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
feature_extractor.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
|
42,504 |
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
"""
Compute the first Wasserstein distance between two 1D distributions.
:param u_values: Values of first distribution with shape (nb_samples, feature_dim_1, ..., feature_dim_n)
:type u_values: `np.ndarray`
:param v_values: Values of second distribution with shape (nb_samples, feature_dim_1, ..., feature_dim_n)
:type v_values: `np.ndarray`
:param u_weights: Weight for each value. If None equal weights will be used.
:type u_weights: `np.ndarray`
:param v_weights: Weight for each value. If None equal weights will be used.
:type v_weights: `np.ndarray`
:return: The Wasserstein distance between the two distributions
:rtype: `np.ndarray`
"""
from scipy.stats import wasserstein_distance
assert u_values.shape == v_values.shape
if u_weights is not None:
assert v_weights is not None
if u_weights is None:
assert v_weights is None
if u_weights is not None and v_weights is not None:
assert u_weights.shape == v_weights.shape
if u_weights is not None:
assert u_values.shape[0] == u_weights.shape[0]
u_values = u_values.flatten().reshape(u_values.shape[0], -1)
v_values = v_values.flatten().reshape(v_values.shape[0], -1)
wd = np.zeros(u_values.shape[0])
for i in range(u_values.shape[0]):
if u_weights is None and v_weights is None:
wd[i] = wasserstein_distance(u_values[i], v_values[i])
elif u_weights is not None and v_weights is not None:
wd[i] = wasserstein_distance(u_values[i], v_values[i], u_weights[i], v_weights[i])
return wd
|
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
"""
Compute the first Wasserstein distance between two 1D distributions.
:param u_values: Values of first distribution with shape (nb_samples, feature_dim_1, ..., feature_dim_n)
:type u_values: `np.ndarray`
:param v_values: Values of second distribution with shape (nb_samples, feature_dim_1, ..., feature_dim_n)
:type v_values: `np.ndarray`
:param u_weights: Weight for each value. If None equal weights will be used.
:type u_weights: `np.ndarray`
:param v_weights: Weight for each value. If None equal weights will be used.
:type v_weights: `np.ndarray`
:return: The Wasserstein distance between the two distributions
:rtype: `np.ndarray`
"""
from scipy.stats import wasserstein_distance
assert u_values.shape == v_values.shape
if u_weights is not None:
assert v_weights is not None
if u_weights is None:
assert v_weights is None
if u_weights is not None and v_weights is not None:
assert u_weights.shape == v_weights.shape
if u_weights is not None:
assert u_values.shape[0] == u_weights.shape[0]
u_values = u_values.flatten().reshape(u_values.shape[0], -1)
v_values = v_values.flatten().reshape(v_values.shape[0], -1)
wd = np.zeros(u_values.shape[0])
for i in range(u_values.shape[0]):
if u_weights is None and v_weights is None:
wd[i] = wasserstein_distance(u_values[i], v_values[i])
else:
wd[i] = wasserstein_distance(u_values[i], v_values[i], u_weights[i], v_weights[i])
return wd
|
1,343 |
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
If True, for binary y_true, the score function is supposed to accpet
1d y_pred (i.e., probability of the positive class, shape
``(n_samples,)``).
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
If True, for binary y_true, the score function is supposed to accpet
1d y_pred (i.e., probability of the positive class or the decision
function, shape ``(n_samples,)``).
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
|
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
If True, for binary y_true, the score function is supposed to accpet
1d y_pred (i.e., probability of the positive class, shape
``(n_samples,)``).
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
If True, for binary y_true, the score function is supposed to accept
1d y_pred (i.e., probability of the positive class or the decision
function, shape ``(n_samples,)``).
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
|
32,056 |
def qradar_offense_update_command(client: Client, args: Dict) -> CommandResults:
"""
Updates offense that corresponds to the given offense ID.
possible arguments:
- offense_id (Required): Update offense that corresponds to ID given.
- protected: Whether the offense is protected.
- follow_up: Whether the offense should be marked for follow up.
- status: Status of the offense. One of 'OPEN', 'HIDDEN', 'CLOSED'.
- closing_reason_id: The ID of the reason the offense was closed. full list of closing reason IDs,
full list of closing reason IDs can be retrieved by 'qradar-closing-reasons' command.
- assigned_to: The user whom to assign the offense to.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
offense_id: int = int(args['offense_id'])
protected = args.get('protected')
follow_up = args.get('follow_up')
closing_reason_name = args.get('closing_reason_name')
status = args.get('status')
closing_reason_id = args.get('closing_reason_id')
if status == 'CLOSED' and (not closing_reason_id and not closing_reason_name):
raise DemistoException(
'''Closing reason ID must be provided when closing an offense. Available closing reasons can be achieved
by 'qradar-closing-reasons' command.'''
)
if closing_reason_name:
# if this call fails raise an error and stop command execution
closing_reasons_list = client.closing_reasons_list(include_deleted=True, include_reserved=True)
for closing_reason in closing_reasons_list:
if closing_reason.get('text') == closing_reason_name:
closing_reason_id = closing_reason.get('id')
if not closing_reason_id:
raise DemistoException(f'Could not find closing reason name {closing_reason_name}. Please provide a valid'
' closing reason name. Closing reasons can be retrieved by running the '
'qradar-closing-reasons command.')
assigned_to = args.get('assigned_to')
fields = args.get('fields')
ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None'))
# if this call fails raise an error and stop command execution
response = client.offense_update(offense_id, protected, follow_up, status, closing_reason_id, assigned_to,
fields)
enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich)
final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP)
headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'],
set(OFFENSE_OLD_NEW_NAMES_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('offense Update', final_outputs, headers, removeNull=True),
outputs_prefix='QRadar.Offense',
outputs_key_field='ID',
outputs=final_outputs,
raw_response=response
)
|
def qradar_offense_update_command(client: Client, args: Dict) -> CommandResults:
"""
Updates offense that corresponds to the given offense ID.
possible arguments:
- offense_id (Required): Update offense that corresponds to ID given.
- protected: Whether the offense is protected.
- follow_up: Whether the offense should be marked for follow up.
- status: Status of the offense. One of 'OPEN', 'HIDDEN', 'CLOSED'.
- closing_reason_id: The ID of the reason the offense was closed. full list of closing reason IDs,
full list of closing reason IDs can be retrieved by 'qradar-closing-reasons' command.
- assigned_to: The user whom to assign the offense to.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
offense_id: int = int(args['offense_id'])
protected = args.get('protected')
follow_up = args.get('follow_up')
closing_reason_name = args.get('closing_reason_name')
status = args.get('status')
closing_reason_id = args.get('closing_reason_id')
if status == 'CLOSED' and (not closing_reason_id and not closing_reason_name):
raise DemistoException(
'''Closing reason ID must be provided when closing an offense. Available closing reasons can be achieved
by 'qradar-closing-reasons' command.'''
)
if closing_reason_name:
# if this call fails, raise an error and stop command execution
closing_reasons_list = client.closing_reasons_list(include_deleted=True, include_reserved=True)
for closing_reason in closing_reasons_list:
if closing_reason.get('text') == closing_reason_name:
closing_reason_id = closing_reason.get('id')
if not closing_reason_id:
raise DemistoException(f'Could not find closing reason name {closing_reason_name}. Please provide a valid'
' closing reason name. Closing reasons can be retrieved by running the '
'qradar-closing-reasons command.')
assigned_to = args.get('assigned_to')
fields = args.get('fields')
ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None'))
# if this call fails raise an error and stop command execution
response = client.offense_update(offense_id, protected, follow_up, status, closing_reason_id, assigned_to,
fields)
enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich)
final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP)
headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'],
set(OFFENSE_OLD_NEW_NAMES_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('offense Update', final_outputs, headers, removeNull=True),
outputs_prefix='QRadar.Offense',
outputs_key_field='ID',
outputs=final_outputs,
raw_response=response
)
|
17,662 |
def main(args=None):
lgr.log(5, "Starting main(%r)", args)
args = args or sys.argv
if on_msys_tainted_paths:
# Possibly present DataLadRIs were stripped of a leading /
args = [_fix_datalad_ri(s) for s in args]
# PYTHON_ARGCOMPLETE_OK
parser = setup_parser(args, completing="_ARGCOMPLETE" in os.environ)
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
# parse cmd args
lgr.debug("Parsing known args among %s", repr(args))
cmdlineargs, unparsed_args = parser.parse_known_args(args[1:])
has_func = hasattr(cmdlineargs, 'func') and cmdlineargs.func is not None
if unparsed_args:
if has_func and cmdlineargs.func.__self__.__name__ != 'Export':
lgr.error('unknown argument{}: {}'.format(
's' if len(unparsed_args) > 1 else '',
unparsed_args if len(unparsed_args) > 1 else unparsed_args[0]))
cmdlineargs.subparser.print_usage()
sys.exit(1)
else:
# store all unparsed arguments
cmdlineargs.datalad_unparsed_args = unparsed_args
# to possibly be passed into PBS scheduled call
args_ = args or sys.argv
if cmdlineargs.cfg_overrides is not None:
datalad.cfg.overrides.update(
_parse_overrides_from_cmdline(cmdlineargs)
)
# enable overrides
datalad.cfg.reload(force=True)
if cmdlineargs.change_path is not None:
from .common_args import change_path as change_path_opt
for path in cmdlineargs.change_path:
chpwd(path)
args_ = strip_arg_from_argv(args_, path, change_path_opt[1])
ret = None
if cmdlineargs.pbs_runner:
from .helpers import run_via_pbs
from .common_args import pbs_runner as pbs_runner_opt
args_ = strip_arg_from_argv(args_, cmdlineargs.pbs_runner, pbs_runner_opt[1])
# run the function associated with the selected command
run_via_pbs(args_, cmdlineargs.pbs_runner)
elif has_func:
if cmdlineargs.common_debug or cmdlineargs.common_idebug:
# so we could see/stop clearly at the point of failure
setup_exceptionhook(ipython=cmdlineargs.common_idebug)
from datalad.interface.base import Interface
Interface._interrupted_exit_code = None
ret = cmdlineargs.func(cmdlineargs)
else:
# otherwise - guard and only log the summary. Postmortem is not
# as convenient if being caught in this ultimate except
try:
ret = cmdlineargs.func(cmdlineargs)
except InsufficientArgumentsError as exc:
# if the func reports inappropriate usage, give help output
ce = CapturedException(exc)
ce.log(lgr, logging.ERROR, "%s", ce.format_short())
cmdlineargs.subparser.print_usage(sys.stderr)
sys.exit(2)
except IncompleteResultsError as exc:
# rendering for almost all commands now happens 'online'
# hence we are no longer attempting to render the actual
# results in an IncompleteResultsError, but rather trust that
# this happened before
# in general we do not want to see the error again, but
# present in debug output
ce = CapturedException(exc)
ce.log(lgr, logging.DEBUG,
"could not perform all requested actions: %s",
ce.format_short())
sys.exit(1)
except CommandError as exc:
# behave as if the command ran directly, importantly pass
# exit code as is
# to not duplicate any captured output in the exception
# rendering, will come next
exc_msg = exc.to_str(include_output=False)
if exc_msg:
msg = exc_msg.encode() if isinstance(exc_msg, str) else exc_msg
os.write(2, msg + b"\n")
if exc.stdout:
os.write(1, exc.stdout.encode() if isinstance(exc.stdout, str) else exc.stdout)
if exc.stderr:
os.write(2, exc.stderr.encode() if isinstance(exc.stderr, str) else exc.stderr)
# We must not exit with 0 code if any exception got here but
# had no code defined
sys.exit(exc.code if exc.code is not None else 1)
except Exception as exc:
ce = CapturedException(exc)
ce.log(lgr, logging.ERROR, "%s", ce.format_short())
sys.exit(1)
else:
# just let argparser spit out its error, since there is smth wrong
parser.parse_args(args)
# if that one didn't puke -- we should
parser.print_usage()
lgr.error("Please specify the command")
sys.exit(2)
try:
if hasattr(cmdlineargs, 'result_renderer'):
cmdlineargs.result_renderer(ret, cmdlineargs)
except Exception as exc:
ce = CapturedException(exc)
ce.log(lgr, logging.ERROR,
"Failed to render results due to %s", ce.format_short())
sys.exit(1)
|
def main(args=None):
lgr.log(5, "Starting main(%r)", args)
args = args or sys.argv
if on_msys_tainted_paths:
# Possibly present DataLadRIs were stripped of a leading /
args = [_fix_datalad_ri(s) for s in args]
# PYTHON_ARGCOMPLETE_OK
parser = setup_parser(args, completing="_ARGCOMPLETE" in os.environ)
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
# parse cmd args
lgr.debug("Parsing known args among %s", repr(args))
cmdlineargs, unparsed_args = parser.parse_known_args(args[1:])
has_func = hasattr(cmdlineargs, 'func') and cmdlineargs.func is not None
if unparsed_args:
if has_func and cmdlineargs.func.__self__.__name__ != 'Export':
lgr.error('unknown argument{}: {}'.format(
's' if len(unparsed_args) > 1 else '',
unparsed_args if len(unparsed_args) > 1 else unparsed_args[0]))
cmdlineargs.subparser.print_usage()
sys.exit(1)
else:
# store all unparsed arguments
cmdlineargs.datalad_unparsed_args = unparsed_args
# to possibly be passed into PBS scheduled call
args_ = args or sys.argv
if cmdlineargs.cfg_overrides is not None:
datalad.cfg.overrides.update(
_parse_overrides_from_cmdline(cmdlineargs)
)
# enable overrides
datalad.cfg.reload(force=True)
if cmdlineargs.change_path is not None:
from .common_args import change_path as change_path_opt
for path in cmdlineargs.change_path:
chpwd(path)
args_ = strip_arg_from_argv(args_, path, change_path_opt[1])
ret = None
if cmdlineargs.pbs_runner:
from .helpers import run_via_pbs
from .common_args import pbs_runner as pbs_runner_opt
args_ = strip_arg_from_argv(args_, cmdlineargs.pbs_runner, pbs_runner_opt[1])
# run the function associated with the selected command
run_via_pbs(args_, cmdlineargs.pbs_runner)
elif has_func:
if cmdlineargs.common_debug or cmdlineargs.common_idebug:
# so we could see/stop clearly at the point of failure
setup_exceptionhook(ipython=cmdlineargs.common_idebug)
from datalad.interface.base import Interface
Interface._interrupted_exit_code = None
ret = cmdlineargs.func(cmdlineargs)
else:
# otherwise - guard and only log the summary. Postmortem is not
# as convenient if being caught in this ultimate except
try:
ret = cmdlineargs.func(cmdlineargs)
except InsufficientArgumentsError as exc:
# if the func reports inappropriate usage, give help output
lgr.error("", exc_info=exc)
cmdlineargs.subparser.print_usage(sys.stderr)
sys.exit(2)
except IncompleteResultsError as exc:
# rendering for almost all commands now happens 'online'
# hence we are no longer attempting to render the actual
# results in an IncompleteResultsError, but rather trust that
# this happened before
# in general we do not want to see the error again, but
# present in debug output
ce = CapturedException(exc)
ce.log(lgr, logging.DEBUG,
"could not perform all requested actions: %s",
ce.format_short())
sys.exit(1)
except CommandError as exc:
# behave as if the command ran directly, importantly pass
# exit code as is
# to not duplicate any captured output in the exception
# rendering, will come next
exc_msg = exc.to_str(include_output=False)
if exc_msg:
msg = exc_msg.encode() if isinstance(exc_msg, str) else exc_msg
os.write(2, msg + b"\n")
if exc.stdout:
os.write(1, exc.stdout.encode() if isinstance(exc.stdout, str) else exc.stdout)
if exc.stderr:
os.write(2, exc.stderr.encode() if isinstance(exc.stderr, str) else exc.stderr)
# We must not exit with 0 code if any exception got here but
# had no code defined
sys.exit(exc.code if exc.code is not None else 1)
except Exception as exc:
ce = CapturedException(exc)
ce.log(lgr, logging.ERROR, "%s", ce.format_short())
sys.exit(1)
else:
# just let argparser spit out its error, since there is smth wrong
parser.parse_args(args)
# if that one didn't puke -- we should
parser.print_usage()
lgr.error("Please specify the command")
sys.exit(2)
try:
if hasattr(cmdlineargs, 'result_renderer'):
cmdlineargs.result_renderer(ret, cmdlineargs)
except Exception as exc:
ce = CapturedException(exc)
ce.log(lgr, logging.ERROR,
"Failed to render results due to %s", ce.format_short())
sys.exit(1)
|
3,410 |
def get_available_derived_metrics(
supported_metric_ids_in_entities: Dict[MetricType, Sequence[int]],
) -> Set[str]:
"""
Function that takes as input a dictionary of the available ids in each entity, , and in turn
goes through each derived metric, and returns back the set of the derived metrics that have
data in the dataset in respect to the project filter. For instances of
SingularEntityDerivedMetrics, it is enough to make sure that the constituent metric ids span
a single entity and are present in the passed in dictionary. On the other hand, the available
instances of CompositeEntityDerivedMetrics are computed from the found constituent instances
of SingularEntityDerivedMetric
"""
found_derived_metrics = set()
composite_entity_derived_metrics = set()
for derived_metric_name, derived_metric_obj in DERIVED_METRICS.items():
try:
derived_metric_obj_ids = derived_metric_obj.generate_metric_ids()
except NotSupportedOverCompositeEntityException:
# If we encounter a derived metric composed of constituents spanning multiple
# entities then we store it in this set
composite_entity_derived_metrics.add(derived_metric_obj.metric_name)
continue
for ids_per_entity in supported_metric_ids_in_entities.values():
if derived_metric_obj_ids.intersection(ids_per_entity) == derived_metric_obj_ids:
found_derived_metrics.add(derived_metric_name)
# If we find a match in ids in one entity, then skip checks across entities
break
for composite_derived_metric_name in composite_entity_derived_metrics:
# We naively loop over singular entity derived metric constituents of a composite entity
# derived metric and check if they have already been found and if that is the case,
# then we add that instance of composite metric to the found derived metric.
composite_derived_metric_obj = DERIVED_METRICS[composite_derived_metric_name]
single_entity_constituents = set(
list(
composite_derived_metric_obj.naively_generate_singular_entity_constituents().values()
).pop()
)
if single_entity_constituents.issubset(found_derived_metrics):
found_derived_metrics.add(composite_derived_metric_obj.metric_name)
return found_derived_metrics
|
def get_available_derived_metrics(
supported_metric_ids_in_entities: Dict[MetricType, Sequence[int]],
) -> Set[str]:
"""
Function that takes as input a dictionary of the available ids in each entity, and in turn
goes through each derived metric, and returns back the set of the derived metrics that have
data in the dataset in respect to the project filter. For instances of
SingularEntityDerivedMetrics, it is enough to make sure that the constituent metric ids span
a single entity and are present in the passed in dictionary. On the other hand, the available
instances of CompositeEntityDerivedMetrics are computed from the found constituent instances
of SingularEntityDerivedMetric
"""
found_derived_metrics = set()
composite_entity_derived_metrics = set()
for derived_metric_name, derived_metric_obj in DERIVED_METRICS.items():
try:
derived_metric_obj_ids = derived_metric_obj.generate_metric_ids()
except NotSupportedOverCompositeEntityException:
# If we encounter a derived metric composed of constituents spanning multiple
# entities then we store it in this set
composite_entity_derived_metrics.add(derived_metric_obj.metric_name)
continue
for ids_per_entity in supported_metric_ids_in_entities.values():
if derived_metric_obj_ids.intersection(ids_per_entity) == derived_metric_obj_ids:
found_derived_metrics.add(derived_metric_name)
# If we find a match in ids in one entity, then skip checks across entities
break
for composite_derived_metric_name in composite_entity_derived_metrics:
# We naively loop over singular entity derived metric constituents of a composite entity
# derived metric and check if they have already been found and if that is the case,
# then we add that instance of composite metric to the found derived metric.
composite_derived_metric_obj = DERIVED_METRICS[composite_derived_metric_name]
single_entity_constituents = set(
list(
composite_derived_metric_obj.naively_generate_singular_entity_constituents().values()
).pop()
)
if single_entity_constituents.issubset(found_derived_metrics):
found_derived_metrics.add(composite_derived_metric_obj.metric_name)
return found_derived_metrics
|
24,871 |
def check_config_7(machine, old_conf, new_conf):
"""Example code must not trigger the message
Given an if construct without an elif
When the body of the if ends with an if
Then no message shall be triggered.
"""
if old_conf:
if not new_conf:
machine.disable()
elif old_conf.value != new_conf.value:
machine.disable()
machine.enable(new_conf.value)
else:
pass
|
def not_triggered_if_outer_block_does_not_have_elif(machine, old_conf, new_conf):
"""Example code must not trigger the message
Given an if construct without an elif
When the body of the if ends with an if
Then no message shall be triggered.
"""
if old_conf:
if not new_conf:
machine.disable()
elif old_conf.value != new_conf.value:
machine.disable()
machine.enable(new_conf.value)
else:
pass
|
28,056 |
def ch_workdir(metadata: Optional[Dict]):
""" Change workspace directory """
if not metadata or 'working_directory' not in metadata:
return
working_dir = metadata['working_directory']
try:
os.chdir(working_dir)
except OSError as oerr:
LOG.debug(oerr)
LOG.error("Working directory %s is missing.\nCan not parse reports "
"safely.", working_dir)
sys.exit(1)
|
def ch_workdir(metadata: Optional[Dict]):
""" Change working directory to the one noted in metadata.json if this file exists and contains "working_directory". """
if not metadata or 'working_directory' not in metadata:
return
working_dir = metadata['working_directory']
try:
os.chdir(working_dir)
except OSError as oerr:
LOG.debug(oerr)
LOG.error("Working directory %s is missing.\nCan not parse reports "
"safely.", working_dir)
sys.exit(1)
|
26,259 |
def package_config_from_data(packages_data):
if packages_data is None:
packages_data = {'packages': []}
try:
packages = PackageConfig.from_dict(packages_data)
except ValidationError as e:
raise DbtProjectError(
MALFORMED_PACKAGE_ERROR
) from e
return packages
|
def package_config_from_data(packages_data):
if packages_data is None:
packages_data = {'packages': []}
try:
packages = PackageConfig.from_dict(packages_data)
except ValidationError as e:
raise DbtProjectError(
MALFORMED_PACKAGE_ERROR.format(error=str(e))
) from e
return packages
|
43,265 |
def create_Unsupervised_graphSAGE_model(graph):
generator = GraphSAGELinkGenerator(graph, batch_size=2, num_samples=[2, 2])
unsupervisedSamples = UnsupervisedSampler(
graph, nodes=graph.nodes(), length=3, number_of_walks=2
)
train_gen = generator.flow(unsupervisedSamples)
base_model = GraphSAGE(
layer_sizes=[8, 8], generator=train_gen, bias=True, dropout=0.5
)
x_inp, x_out = base_model.build()
prediction = link_classification(
output_dim=1, output_act="relu", edge_embedding_method="ip"
)(x_out)
keras_model = Model(inputs=x_inp, outputs=prediction)
return base_model, keras_model, generator, train_gen
|
def create_Unsupervised_graphSAGE_model(graph):
generator = GraphSAGELinkGenerator(graph, batch_size=2, num_samples=[2, 2])
unsupervisedSamples = UnsupervisedSampler(
graph, nodes=graph.nodes(), length=3, number_of_walks=2
)
train_gen = generator.flow(unsupervisedSamples)
base_model = GraphSAGE(
layer_sizes=[8, 8], generator=generator, bias=True, dropout=0.5
)
x_inp, x_out = base_model.build()
prediction = link_classification(
output_dim=1, output_act="relu", edge_embedding_method="ip"
)(x_out)
keras_model = Model(inputs=x_inp, outputs=prediction)
return base_model, keras_model, generator, train_gen
|
24,833 |
def run_using_a_configuration_file(
configuration_path: Union[Path, str], file_to_lint: str = __file__
) -> Tuple[Mock, Mock, Run]:
"""Simulate a run with a configuration without really launching the checks."""
configuration_path = str(configuration_path)
args = ["--rcfile", configuration_path, file_to_lint]
# If we used `pytest.raises(SystemExit)`, the `runner` variable
# would not be accessible outside the `with` block.
with unittest.mock.patch("sys.exit") as mocked_exit:
# Do not actually run checks, that could be slow. Do not mock
# `Pylinter.check`: it calls `Pylinter.initialize` which is
# needed to properly set up messages inclusion/exclusion
# in `_msg_states`, used by `is_message_enabled`.
check = "pylint.lint.pylinter.check_parallel"
with unittest.mock.patch(check) as mocked_check_parallel:
runner = Run(args)
return mocked_exit, mocked_check_parallel, runner
|
def run_using_a_configuration_file(
configuration_path: Union[Path, str], file_to_lint: str = __file__
) -> Tuple[Mock, Mock, Run]:
"""Simulate a run with a configuration without really launching the checks."""
configuration_path = str(configuration_path)
args = ["--rcfile", configuration_path, file_to_lint]
# We do not capture the `SystemExit` as then the `runner` variable
# would not be accessible outside the `with` block.
with unittest.mock.patch("sys.exit") as mocked_exit:
# Do not actually run checks, that could be slow. Do not mock
# `Pylinter.check`: it calls `Pylinter.initialize` which is
# needed to properly set up messages inclusion/exclusion
# in `_msg_states`, used by `is_message_enabled`.
check = "pylint.lint.pylinter.check_parallel"
with unittest.mock.patch(check) as mocked_check_parallel:
runner = Run(args)
return mocked_exit, mocked_check_parallel, runner
|
49,627 |
def from_dict(data, npartitions=None, orient='columns', dtype=None, columns=None):
"""
Construct a Dask DataFrame from a Python Dictionary
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
npartitions : int, optional
The number of partitions of the index to create. Note that depending on
the size and index of the dataframe, the output may have fewer
partitions than requested.
orient : {'columns', 'index', 'tight'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names'].
dtype: bool
Data type to force, otherwise infer.
columns: string, optional
Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``.
Examples
--------
>>> import dask.dataframe as dd
>>> ddf = dd.from_dict({"num1": [1, 2, 3, 4], "num2": [7, 8, 9, 10]}, npartitions=2)
"""
pdf = pd.DataFrame.from_dict(data, orient, dtype, columns)
ddf = from_pandas(pdf, npartitions)
return ddf
|
def from_dict(data, npartitions=None, orient='columns', dtype=None, columns=None):
"""
Construct a Dask DataFrame from a Python Dictionary
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
npartitions : int, optional
The number of partitions of the index to create. Note that depending on
the size and index of the dataframe, the output may have fewer
partitions than requested.
orient : {'columns', 'index', 'tight'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names'].
dtype: bool
Data type to force, otherwise infer.
columns: string, optional
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'`` or ``orient='tight'``.
Examples
--------
>>> import dask.dataframe as dd
>>> ddf = dd.from_dict({"num1": [1, 2, 3, 4], "num2": [7, 8, 9, 10]}, npartitions=2)
"""
pdf = pd.DataFrame.from_dict(data, orient, dtype, columns)
ddf = from_pandas(pdf, npartitions)
return ddf
|
30,778 |
def fetch_incidents():
last_run = demisto.getLastRun()
last_incidents_ids = []
if last_run:
last_fetch = last_run.get('time')
last_fetch = datetime.strptime(last_fetch, TIME_FORMAT)
last_incidents_ids = last_run.get('last_event_ids')
else:
# first time fetching
last_fetch = parse_date_range(demisto.params().get('fetch_time', '3 days'), TIME_FORMAT)[0]
LOG('iterating on detections, looking for more recent than {}'.format(last_fetch))
incidents = []
new_incidents_ids = []
for raw_detection in get_unacknowledged_detections(last_fetch, per_page=2):
LOG('found detection #{}'.format(raw_detection['id']))
incident = detection_to_incident(raw_detection)
# the rewJson is a string of dictionary e.g. - ('{"ID":2,"Type":5}')
incident_id = json.loads(incident['rawJSON']).get("ID")
if incident_id not in last_incidents_ids:
# makes sure that the incidents wasn't fetched before
incidents.append(incident)
new_incidents_ids.append(incident_id)
if incidents:
last_fetch = max([get_time_obj(incident['occurred']) for incident in incidents]) # noqa:F812
last_run = {'time': get_time_str(last_fetch), 'last_event_ids': new_incidents_ids}
return last_run, incidents
|
def fetch_incidents():
last_run = demisto.getLastRun()
last_incidents_ids = []
if last_run:
last_fetch = last_run.get('time')
last_fetch = datetime.strptime(last_fetch, TIME_FORMAT)
last_incidents_ids = last_run.get('last_event_ids')
else:
# first time fetching
last_fetch = parse_date_range(demisto.params().get('fetch_time', '3 days'), TIME_FORMAT)[0]
LOG('iterating on detections, looking for more recent than {}'.format(last_fetch))
incidents = []
new_incidents_ids = []
for raw_detection in get_unacknowledged_detections(last_fetch, per_page=2):
LOG('found detection #{}'.format(raw_detection['id']))
incident = detection_to_incident(raw_detection)
# the rewJson is a string of dictionary e.g. - ('{"ID":2,"Type":5}')
incident_id = json.loads(incident.get('rawJSON')).get("ID")
if incident_id not in last_incidents_ids:
# makes sure that the incidents wasn't fetched before
incidents.append(incident)
new_incidents_ids.append(incident_id)
if incidents:
last_fetch = max([get_time_obj(incident['occurred']) for incident in incidents]) # noqa:F812
last_run = {'time': get_time_str(last_fetch), 'last_event_ids': new_incidents_ids}
return last_run, incidents
|
47,157 |
def get_adafactor_schedule(optimizer, initial_lr=0.0):
"""
Get a proxy schedule for :class:`~transformers.optimization.Adafactor`
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
initial_lr (:obj:`float`, `optional`, defaults to 0.0):
Initial lr
Return:
:class:`~transformers.optimization.Adafactor` proxy schedule object.
"""
return AdafactorSchedule(optimizer, initial_lr)
|
def get_adafactor_schedule(optimizer, initial_lr=0.0):
"""
Get a proxy schedule for :class:`~transformers.optimization.Adafactor`
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
initial_lr (:obj:`float`, `optional`, defaults to 0.0):
Initial lr
Return:
:class:`~transformers.optimization.Adafactor` proxy schedule object.
"""
return AdafactorSchedule(optimizer, initial_lr)
|
24,853 |
def my_func(self, doc_type): # [missing-return-doc]
"""This is a docstring.
Arguments
---------
doc_type : str
Numpy
Returns
-------
bool
"""
return False
|
def my_func(self, doc_type): # [missing-return-doc]
"""warn_partial_numpy_returns_type
Arguments
---------
doc_type : str
Numpy
Returns
-------
bool
"""
return False
|
35,196 |
def kronsum(A, B, format=None):
"""Kronecker sum of sparse matrices A and B.
Kronecker sum is matrix sum defined as sum of two Kronecker products
kron(I_n, A) + kron(B, I_m), where I_n and I_m are identity matrices
Args:
A (cupyx.scipy.sparse.spmatrix): a sparse matrix.
B (cupyx.scipy.sparse.spmatrix): a sparse matrix.
format (str): the format of the returned sparse matrix.
Returns:
cupyx.scipy.sparse.spmatrix:
Generated sparse matrix with the specified ``format``.
.. seealso:: :func:`scipy.sparse.kronsum`
"""
A = coo.coo_matrix(A)
B = coo.coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square matrix')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square matrix')
dtype = sputils.upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0], dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0], dtype=dtype), format=format)
return (L + R).asformat(format)
|
def kronsum(A, B, format=None):
"""Kronecker sum of sparse matrices A and B.
Kronecker sum is matrix sum defined as sum of two Kronecker products
kron(I_n, A) + kron(B, I_m), where I_n and I_m are identity matrices
Args:
A (cupyx.scipy.sparse.spmatrix): a sparse matrix.
B (cupyx.scipy.sparse.spmatrix): a sparse matrix.
format (str): the format of the returned sparse matrix.
Returns:
cupyx.scipy.sparse.spmatrix:
Generated sparse matrix with the specified ``format``.
.. seealso:: :func:`scipy.sparse.kronsum`
"""
A = coo.coo_matrix(A)
B = coo.coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square matrix')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square matrix')
dtype = sputils.upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0], dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0], dtype=dtype), format=format)
return L + R
|
28,791 |
def cooldown(rate, per, type=BucketType.default):
"""A decorator that adds a cooldown to a :class:`.Command`
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
Parameters
------------
rate: :class:`int`
The number of times a command can be used before triggering a cooldown.
per: :class:`float`
The amount of seconds to wait for a cooldown when it's been triggered.
type: Union[:class:`.BucketType`, Callable[:class:`.Message`, Any]]
The type of cooldown to have. If callable, should return a key for the mapping.
"""
def decorator(func):
if isinstance(func, Command):
func._buckets = CooldownMapping(Cooldown(rate, per, type))
else:
func.__commands_cooldown__ = Cooldown(rate, per, type)
return func
return decorator
|
def cooldown(rate, per, type=BucketType.default):
"""A decorator that adds a cooldown to a :class:`.Command`
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
Parameters
------------
rate: :class:`int`
The number of times a command can be used before triggering a cooldown.
per: :class:`float`
The amount of seconds to wait for a cooldown when it's been triggered.
type: Union[:class:`.BucketType`, Callable[[:class:`.Context`], Any]]
The type of cooldown to have. If callable, should return a key for the mapping.
"""
def decorator(func):
if isinstance(func, Command):
func._buckets = CooldownMapping(Cooldown(rate, per, type))
else:
func.__commands_cooldown__ = Cooldown(rate, per, type)
return func
return decorator
|
30,625 |
def appendContext(key, data, dedup=False):
"""
Append data to the investigation context
:type key: ``str``
:param key: The context path (required)
:type data: ``any``
:param data: Data to be added to the context (required)
:type dedup: ``bool``
:param dedup: True if de-duplication is required. Default is False.
:return: No data returned
:rtype: ``None``
"""
if data is None:
return
existing = demisto.get(demisto.context(), key)
if existing:
if isinstance(existing, STRING_TYPES):
if isinstance(data, STRING_TYPES):
new_val = data + ',' + existing
else:
return_error("Cannot append data to the existing context - \n The data is of instance {} while the "
"context in the specified path is of instance {}.".format(type(data), type(existing)))
if isinstance(existing, dict):
if isinstance(data, dict):
existing.update(data)
new_val = existing
else:
return_error("Cannot append data to the existing context - \n The data is of instance {} while the "
"context in the specified path is of instance {}.".format(type(data), type(existing)))
if isinstance(existing, list):
if isinstance(data, list):
existing.extend(data)
else:
existing.append(data)
new_val = existing
if dedup:
new_val = list(set(new_val))
demisto.setContext(key, new_val)
else:
demisto.setContext(key, data)
|
def appendContext(key, data, dedup=False):
"""
Append data to the investigation context
:type key: ``str``
:param key: The context path (required)
:type data: ``any``
:param data: Data to be added to the context (required)
:type dedup: ``bool``
:param dedup: True if de-duplication is required. Default is False.
:return: No data returned
:rtype: ``None``
"""
if data is None:
return
existing = demisto.get(demisto.context(), key)
if existing:
if isinstance(existing, STRING_TYPES):
if isinstance(data, STRING_TYPES):
new_val = data + ',' + existing
else:
return_error("Cannot append data to the existing context - \n The data is of type {} while the "
"context in the specified path is of instance {}.".format(type(data), type(existing)))
if isinstance(existing, dict):
if isinstance(data, dict):
existing.update(data)
new_val = existing
else:
return_error("Cannot append data to the existing context - \n The data is of instance {} while the "
"context in the specified path is of instance {}.".format(type(data), type(existing)))
if isinstance(existing, list):
if isinstance(data, list):
existing.extend(data)
else:
existing.append(data)
new_val = existing
if dedup:
new_val = list(set(new_val))
demisto.setContext(key, new_val)
else:
demisto.setContext(key, data)
|
33,298 |
def _get_peertube_url(url):
url_parsed = urlparse(url)
embed_url = [x for x in url_parsed]
embed_url[2] = embed_url[2].replace('/videos/watch/', '/videos/embed/')
return urlunparse(embed_url)
|
def _get_peertube_url(url):
url_parsed = urlparse(url)
embed_url = list(url_parsed)
embed_url[2] = embed_url[2].replace('/videos/watch/', '/videos/embed/')
return urlunparse(embed_url)
|
33,749 |
def mlflow_mixin(func: Callable):
"""mlflow_mixin
MLFlow (https://mlflow.org) Tracking is an open source library for
recording and querying experiments. This Ray Tune Trainable mixin helps
initialize the MLflow API for use with the ``Trainable`` class or the
`@mlflow_mixin` function API. This mixin automatically configures mlflow
and creates a run in the same process as each Tune trial. You can then
use the mlflow API inside the your training function and it will
automatically get reported to the correct run.
For basic usage, just prepend your training function with the
``@mlflow_mixin`` decorator:
.. code-block:: python
from ray.tune.integration.mlflow import mlflow_mixin
@mlflow_mixin
def train_fn(config):
...
mlflow.log_metric(...)
You can also use Mlflow's autologging feature if using a training
framework like Pytorch Lightning, XGBoost, etc. More information can be
found here (https://mlflow.org/docs/latest/tracking.html#automatic
-logging).
.. code-block:: python
from ray.tune.integration.mlflow import mlflow_mixin
@mlflow_mixin
def train_fn(config):
mlflow.autolog()
xgboost_results = xgb.train(config, ...)
The Mlflow configuration is done by passing a ``mlflow`` key to
the ``config`` parameter of ``tune.run()`` (see example below).
The content of the ``mlflow`` config entry is used to
configure Mlflow. Here are the keys you can pass in to this config entry:
Args:
tracking_uri (str): The tracking URI for MLflow tracking. If using
Tune in a multi-node setting, make sure to use a remote server for
tracking.
experiment_id (str): The id of an already created MLflow experiment.
All logs from all trials in ``tune.run`` will be reported to this
experiment. If this is not provided, you must provide an
``experiment_name``.
experiment_name (str): The name of an already created MLflow
experiment. All logs from all trials in ``tune.run`` will be
reported to this experiment. If this is not provided, you must
provide an ``experiment_id``.
Example:
.. code-block:: python
from ray import tune
from ray.tune.integration.mlflow import mlflow_mixin
import mlflow
# Create the Mlflow expriment.
mlflow.create_experiment("my_experiment")
@mlflow_mixin
def train_fn(config):
for i in range(10):
loss = self.config["a"] + self.config["b"]
mlflow.log_metric(key="loss", value=loss})
tune.report(loss=loss, done=True)
tune.run(
train_fn,
config={
# define search space here
"a": tune.choice([1, 2, 3]),
"b": tune.choice([4, 5, 6]),
# mlflow configuration
"mlflow": {
"experiment_name": "my_experiment",
"tracking_uri": mlflow.get_tracking_uri()
}
})
"""
if mlflow is None:
raise RuntimeError("mlflow has not been installed. Please `pip "
"install mlflow` to use the mlflow_mixin.")
func.__mixins__ = (MLFlowTrainableMixin, )
return func
|
def mlflow_mixin(func: Callable):
"""mlflow_mixin
MLFlow (https://mlflow.org) Tracking is an open source library for
recording and querying experiments. This Ray Tune Trainable mixin helps
initialize the MLflow API for use with the ``Trainable`` class or the
``@mlflow_mixin`` function API. This mixin automatically configures MLFlow
and creates a run in the same process as each Tune trial. You can then
use the mlflow API inside the your training function and it will
automatically get reported to the correct run.
For basic usage, just prepend your training function with the
``@mlflow_mixin`` decorator:
.. code-block:: python
from ray.tune.integration.mlflow import mlflow_mixin
@mlflow_mixin
def train_fn(config):
...
mlflow.log_metric(...)
You can also use Mlflow's autologging feature if using a training
framework like Pytorch Lightning, XGBoost, etc. More information can be
found here (https://mlflow.org/docs/latest/tracking.html#automatic
-logging).
.. code-block:: python
from ray.tune.integration.mlflow import mlflow_mixin
@mlflow_mixin
def train_fn(config):
mlflow.autolog()
xgboost_results = xgb.train(config, ...)
The Mlflow configuration is done by passing a ``mlflow`` key to
the ``config`` parameter of ``tune.run()`` (see example below).
The content of the ``mlflow`` config entry is used to
configure Mlflow. Here are the keys you can pass in to this config entry:
Args:
tracking_uri (str): The tracking URI for MLflow tracking. If using
Tune in a multi-node setting, make sure to use a remote server for
tracking.
experiment_id (str): The id of an already created MLflow experiment.
All logs from all trials in ``tune.run`` will be reported to this
experiment. If this is not provided, you must provide an
``experiment_name``.
experiment_name (str): The name of an already created MLflow
experiment. All logs from all trials in ``tune.run`` will be
reported to this experiment. If this is not provided, you must
provide an ``experiment_id``.
Example:
.. code-block:: python
from ray import tune
from ray.tune.integration.mlflow import mlflow_mixin
import mlflow
# Create the Mlflow expriment.
mlflow.create_experiment("my_experiment")
@mlflow_mixin
def train_fn(config):
for i in range(10):
loss = self.config["a"] + self.config["b"]
mlflow.log_metric(key="loss", value=loss})
tune.report(loss=loss, done=True)
tune.run(
train_fn,
config={
# define search space here
"a": tune.choice([1, 2, 3]),
"b": tune.choice([4, 5, 6]),
# mlflow configuration
"mlflow": {
"experiment_name": "my_experiment",
"tracking_uri": mlflow.get_tracking_uri()
}
})
"""
if mlflow is None:
raise RuntimeError("mlflow has not been installed. Please `pip "
"install mlflow` to use the mlflow_mixin.")
func.__mixins__ = (MLFlowTrainableMixin, )
return func
|
2,243 |
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([label.argmax() for label in Y_proba])
assert not (pred - Y_pred).any()
|
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label with the greatest predictive probability.
pred = np.array([label.argmax() for label in Y_proba])
assert not (pred - Y_pred).any()
|
29,151 |
def open_file(filename, mode, encoding='utf-8', newline=None):
"""Open file and return a corresponding file object.
Args:
filename: str. The file to be opened.
mode: str. Mode in which the file is opened.
encoding: str. Encoding in which the file is opened.
newline: None|str. Controls how universal newlines work.
Returns:
_io.TextIOWrapper. The file object.
Raises:
IOError. If the file is unable to open.
"""
# The try/except is needed here to unify the errors because io.open in
# Python 3 throws FileNotFoundError while in Python 2 it throws an IOError.
# This should be removed after we fully migrate to Python 3.
try:
return io.open(filename, mode, encoding=encoding, newline=newline)
except:
raise IOError('Unable to open file: %s' % filename)
|
def open_file(filename, mode, encoding='utf-8', newline=None):
"""Open file and return a corresponding file object.
Args:
filename: str. The file to be opened.
mode: str. Mode in which the file is opened.
encoding: str. Encoding in which the file is opened.
newline: None|str. Controls how universal newlines work.
Returns:
_io.TextIOWrapper. The file object.
Raises:
IOError. If the file can't be opened.
"""
# The try/except is needed here to unify the errors because io.open in
# Python 3 throws FileNotFoundError while in Python 2 it throws an IOError.
# This should be removed after we fully migrate to Python 3.
try:
return io.open(filename, mode, encoding=encoding, newline=newline)
except:
raise IOError('Unable to open file: %s' % filename)
|
25,779 |
def define_ramp_limit_constraints(n, sns, c='Generator', commitable=True):
"""
Defines ramp limits for generators and links with valid ramplimit.
"""
test_components = ['Generator', 'Link']
assert c in test_components, 'Ramp limit constraints were only tested for Generator and Link.'
rup_i = n.df(c).query('ramp_limit_up == ramp_limit_up').index
rdown_i = n.df(c).query('ramp_limit_down == ramp_limit_down').index
if rup_i.empty & rdown_i.empty:
return
fix_i = get_non_extendable_i(n, c)
ext_i = get_extendable_i(n, c)
p = get_var(n, c, 'p').loc[sns[1:]]
p_prev = get_var(n, c, 'p').shift(1).loc[sns[1:]]
active = get_activity_mask(n, c, sns[1:])
# fix up
gens_i = rup_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
kwargs = dict(spec='nonext.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# ext up
gens_i = rup_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_up'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom))
kwargs = dict(spec='ext.', mask=active[gens_i])
define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', **kwargs)
# fix down
gens_i = rdown_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom')
kwargs = dict(spec='nonext.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
# ext down
gens_i = rdown_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_down'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom))
kwargs = dict(spec='ext.', mask=active[gens_i])
define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', **kwargs)
if commitable:
assert c=='Generator', 'Commitable contraints were only tested for Generator.'
com_i = n.df(c).query('committable').index.difference(ext_i)
# com up
gens_i = rup_i.intersection(com_i)
if not gens_i.empty:
limit_start = n.df(c).loc[gens_i].eval('ramp_limit_start_up * p_nom')
limit_up = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
status = get_var(n, c, 'status').loc[sns[1:], gens_i]
status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_start - limit_up, status_prev),
(- limit_start, status))
kwargs = dict(spec='com.', mask=active[gens_i])
define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', **kwargs)
# com down
gens_i = rdown_i.intersection(com_i)
if not gens_i.empty:
limit_shut = n.df(c).loc[gens_i].eval('ramp_limit_shut_down * p_nom')
limit_down = n.df(c).loc[gens_i].eval('ramp_limit_down * p_nom')
status = get_var(n, c, 'status').loc[sns[1:], gens_i]
status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_down - limit_shut, status),
(limit_shut, status_prev))
kwargs = dict(spec='com.', mask=active[gens_i])
define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', **kwargs)
|
def define_ramp_limit_constraints(n, sns, c):
"""
Defines ramp limits for generators and links with valid ramplimit.
"""
test_components = ['Generator', 'Link']
assert c in test_components, 'Ramp limit constraints were only tested for Generator and Link.'
rup_i = n.df(c).query('ramp_limit_up == ramp_limit_up').index
rdown_i = n.df(c).query('ramp_limit_down == ramp_limit_down').index
if rup_i.empty & rdown_i.empty:
return
fix_i = get_non_extendable_i(n, c)
ext_i = get_extendable_i(n, c)
p = get_var(n, c, 'p').loc[sns[1:]]
p_prev = get_var(n, c, 'p').shift(1).loc[sns[1:]]
active = get_activity_mask(n, c, sns[1:])
# fix up
gens_i = rup_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
kwargs = dict(spec='nonext.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# ext up
gens_i = rup_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_up'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom))
kwargs = dict(spec='ext.', mask=active[gens_i])
define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', **kwargs)
# fix down
gens_i = rdown_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom')
kwargs = dict(spec='nonext.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
# ext down
gens_i = rdown_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_down'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom))
kwargs = dict(spec='ext.', mask=active[gens_i])
define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', **kwargs)
if commitable:
assert c=='Generator', 'Commitable contraints were only tested for Generator.'
com_i = n.df(c).query('committable').index.difference(ext_i)
# com up
gens_i = rup_i.intersection(com_i)
if not gens_i.empty:
limit_start = n.df(c).loc[gens_i].eval('ramp_limit_start_up * p_nom')
limit_up = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
status = get_var(n, c, 'status').loc[sns[1:], gens_i]
status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_start - limit_up, status_prev),
(- limit_start, status))
kwargs = dict(spec='com.', mask=active[gens_i])
define_constraints(n, lhs, '<=', 0, c, 'mu_ramp_limit_up', **kwargs)
# com down
gens_i = rdown_i.intersection(com_i)
if not gens_i.empty:
limit_shut = n.df(c).loc[gens_i].eval('ramp_limit_shut_down * p_nom')
limit_down = n.df(c).loc[gens_i].eval('ramp_limit_down * p_nom')
status = get_var(n, c, 'status').loc[sns[1:], gens_i]
status_prev = get_var(n, c, 'status').shift(1).loc[sns[1:], gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_down - limit_shut, status),
(limit_shut, status_prev))
kwargs = dict(spec='com.', mask=active[gens_i])
define_constraints(n, lhs, '>=', 0, c, 'mu_ramp_limit_down', **kwargs)
|
35,434 |
def get_lag_adjusted_curvature(CP, v_ego, psis, curvatures, curvature_rates):
if not len(psis) == CONTROL_N:
psis = [0.0 for i in range(CONTROL_N)]
curvatures = [0.0 for i in range(CONTROL_N)]
curvature_rates = [0.0 for i in range(CONTROL_N)]
# TODO this needs more thought, use .2s extra for now to estimate other delays
delay = CP.steerActuatorDelay + .2
current_curvature = curvatures[0]
psi = interp(delay, T_IDXS[:CONTROL_N], psis)
next_curvature_rate = curvature_rates[0]
# MPC can plan to turn the wheel and turn back before t_delay. This means
# in high delay cases some corrections never even get commanded. So just use
# psi to calculate a simple linearization of desired curvature
curvature_diff_from_psi = psi / (max(v_ego, 1e-1) * delay) - current_curvature
next_curvature = current_curvature + 2 * curvature_diff_from_psi
desired_curvature = next_curvature
desired_curvature_rate = next_curvature_rate
max_curvature_rate = interp(v_ego, MAX_CURVATURE_RATE_SPEEDS, MAX_CURVATURE_RATES)
safe_desired_curvature_rate = clip(desired_curvature_rate,
-max_curvature_rate,
max_curvature_rate)
safe_desired_curvature = clip(desired_curvature,
current_curvature - max_curvature_rate/DT_MDL,
current_curvature + max_curvature_rate/DT_MDL)
return safe_desired_curvature, safe_desired_curvature_rate
|
def get_lag_adjusted_curvature(CP, v_ego, psis, curvatures, curvature_rates):
if len(psis) != CONTROL_N:
psis = [0.0 for i in range(CONTROL_N)]
curvatures = [0.0 for i in range(CONTROL_N)]
curvature_rates = [0.0 for i in range(CONTROL_N)]
# TODO this needs more thought, use .2s extra for now to estimate other delays
delay = CP.steerActuatorDelay + .2
current_curvature = curvatures[0]
psi = interp(delay, T_IDXS[:CONTROL_N], psis)
next_curvature_rate = curvature_rates[0]
# MPC can plan to turn the wheel and turn back before t_delay. This means
# in high delay cases some corrections never even get commanded. So just use
# psi to calculate a simple linearization of desired curvature
curvature_diff_from_psi = psi / (max(v_ego, 1e-1) * delay) - current_curvature
next_curvature = current_curvature + 2 * curvature_diff_from_psi
desired_curvature = next_curvature
desired_curvature_rate = next_curvature_rate
max_curvature_rate = interp(v_ego, MAX_CURVATURE_RATE_SPEEDS, MAX_CURVATURE_RATES)
safe_desired_curvature_rate = clip(desired_curvature_rate,
-max_curvature_rate,
max_curvature_rate)
safe_desired_curvature = clip(desired_curvature,
current_curvature - max_curvature_rate/DT_MDL,
current_curvature + max_curvature_rate/DT_MDL)
return safe_desired_curvature, safe_desired_curvature_rate
|
17,419 |
def _plot1d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be 2 dimensional, unless creating faceted plots
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
hue : string, optional
Dimension or coordinate for which you want multiple lines plotted.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib.axes.Axes, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_labels : bool, optional
Use xarray metadata to label axes
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only used
for 2D and FacetGrid plots.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}"
# plotfunc and newplotfunc have different signatures:
# - plotfunc: (x, y, z, ax, **kwargs)
# - newplotfunc: (darray, *args, x, y, **kwargs)
# where plotfunc accepts numpy arrays, while newplotfunc accepts a DataArray
# and variable names. newplotfunc also explicitly lists most kwargs, so we
# need to shorten it
def signature(darray, *args, x, y, **kwargs):
pass
@override_signature(signature)
@functools.wraps(plotfunc)
def newplotfunc(
darray,
*args,
x=None,
y=None,
hue=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.update(allargs.pop("kwargs"))
allargs.pop("darray")
allargs.pop("plotfunc")
if plotfunc.__name__ == "line":
return _easy_facetgrid(darray, line, kind="line", **allargs)
else:
raise ValueError(f"Faceting not implemented for {plotfunc.__name__}")
# The allargs dict passed to _easy_facetgrid above contains args
if args == ():
args = kwargs.pop("args", ())
else:
assert "args" not in kwargs
ax = get_axis(figsize, size, aspect, ax)
xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue)
primitive = plotfunc(xplt, yplt, ax, *args, add_labels=add_labels, **kwargs)
if add_labels:
ax.set_title(darray._title_for_slice())
if hueplt is not None and add_legend:
if plotfunc.__name__ == "hist":
handles = primitive[-1]
else:
handles = primitive
ax.legend(
handles=handles,
labels=list(hueplt.values),
title=label_from_attrs(hueplt),
)
_update_axes(
ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim
)
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(
_PlotMethods_obj,
*args,
x=None,
y=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs["darray"] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
|
def _plot1d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Dataarray to plot.
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
hue : string, optional
Dimension or coordinate for which you want multiple lines plotted.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib.axes.Axes, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_labels : bool, optional
Use xarray metadata to label axes
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only used
for 2D and FacetGrid plots.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}"
# plotfunc and newplotfunc have different signatures:
# - plotfunc: (x, y, z, ax, **kwargs)
# - newplotfunc: (darray, *args, x, y, **kwargs)
# where plotfunc accepts numpy arrays, while newplotfunc accepts a DataArray
# and variable names. newplotfunc also explicitly lists most kwargs, so we
# need to shorten it
def signature(darray, *args, x, y, **kwargs):
pass
@override_signature(signature)
@functools.wraps(plotfunc)
def newplotfunc(
darray,
*args,
x=None,
y=None,
hue=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.update(allargs.pop("kwargs"))
allargs.pop("darray")
allargs.pop("plotfunc")
if plotfunc.__name__ == "line":
return _easy_facetgrid(darray, line, kind="line", **allargs)
else:
raise ValueError(f"Faceting not implemented for {plotfunc.__name__}")
# The allargs dict passed to _easy_facetgrid above contains args
if args == ():
args = kwargs.pop("args", ())
else:
assert "args" not in kwargs
ax = get_axis(figsize, size, aspect, ax)
xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue)
primitive = plotfunc(xplt, yplt, ax, *args, add_labels=add_labels, **kwargs)
if add_labels:
ax.set_title(darray._title_for_slice())
if hueplt is not None and add_legend:
if plotfunc.__name__ == "hist":
handles = primitive[-1]
else:
handles = primitive
ax.legend(
handles=handles,
labels=list(hueplt.values),
title=label_from_attrs(hueplt),
)
_update_axes(
ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim
)
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(
_PlotMethods_obj,
*args,
x=None,
y=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs["darray"] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
|
43,089 |
def from_xir(xir_prog):
"""Convert an XIR Program to a Strawberry Fields program.
Args:
xir_prog (xir.Program): the input XIR program object
Returns:
Program: corresponding Strawberry Fields program
"""
# only script-level statements are part of `xir_prog.statements`, which can only have integer
# wires, leading to `xir_prog.wires` only containing integer wire labels
num_of_modes = int(max(xir_prog.wires or [-1])) + 1
name = xir_prog.options.get("name", "xir")
if num_of_modes == 0:
raise ValueError(
"The XIR program is empty and cannot be transformed into a Strawberry Fields program"
)
prog = sfp.Program(num_of_modes, name=name)
# append the quantum operations
with prog.context as q:
for op in xir_prog.statements:
# check if operation name is in the list of
# defined StrawberryFields operations.
# This is used by checking against the ops.py __all__
# module attribute, which contains the names
# of all defined quantum operations
if op.name in ops.__all__:
# get the quantum operation from the sf.ops module
gate = getattr(ops, op.name)
else:
raise NameError(f"Quantum operation {op.name!r} not defined!")
# create the list of regrefs
regrefs = [q[i] for i in op.wires]
if op.params:
# convert symbolic expressions to symbolic expressions containing the corresponding
# MeasuredParameter and FreeParameter instances.
if isinstance(op.params, dict):
vals = sfpar.par_convert(op.params.values(), prog)
params = dict(zip(op.params.keys(), vals))
gate(**params) | regrefs # pylint:disable=expression-not-assigned
else:
params = []
for p in op.params:
if isinstance(p, Decimal):
params.append(float(p))
elif isinstance(p, Iterable):
params.append(np.array(_listr(p)))
else:
params.append(p)
params = sfpar.par_convert(params, prog)
gate(*params) | regrefs # pylint:disable=expression-not-assigned
else:
if callable(gate):
gate() | regrefs # pylint:disable=expression-not-assigned,pointless-statement
else:
gate | regrefs # pylint:disable=expression-not-assigned,pointless-statement
prog._target = xir_prog.options.get("target", None) # pylint: disable=protected-access
if "shots" in xir_prog.options:
prog.run_options["shots"] = xir_prog.options["shots"]
if "cutoff_dim" in xir_prog.options:
prog.backend_options["cutoff_dim"] = xir_prog.options["cutoff_dim"]
return prog
|
def from_xir(xir_prog):
"""Convert an XIR Program to a Strawberry Fields program.
Args:
xir_prog (xir.Program): the input XIR program object
Returns:
Program: corresponding Strawberry Fields program
"""
# only script-level statements are part of `xir_prog.statements`, which can only have integer
# wires, leading to `xir_prog.wires` only containing integer wire labels
num_of_modes = int(max(xir_prog.wires or [-1])) + 1
name = xir_prog.options.get("name", "sf_from_xir")
if num_of_modes == 0:
raise ValueError(
"The XIR program is empty and cannot be transformed into a Strawberry Fields program"
)
prog = sfp.Program(num_of_modes, name=name)
# append the quantum operations
with prog.context as q:
for op in xir_prog.statements:
# check if operation name is in the list of
# defined StrawberryFields operations.
# This is used by checking against the ops.py __all__
# module attribute, which contains the names
# of all defined quantum operations
if op.name in ops.__all__:
# get the quantum operation from the sf.ops module
gate = getattr(ops, op.name)
else:
raise NameError(f"Quantum operation {op.name!r} not defined!")
# create the list of regrefs
regrefs = [q[i] for i in op.wires]
if op.params:
# convert symbolic expressions to symbolic expressions containing the corresponding
# MeasuredParameter and FreeParameter instances.
if isinstance(op.params, dict):
vals = sfpar.par_convert(op.params.values(), prog)
params = dict(zip(op.params.keys(), vals))
gate(**params) | regrefs # pylint:disable=expression-not-assigned
else:
params = []
for p in op.params:
if isinstance(p, Decimal):
params.append(float(p))
elif isinstance(p, Iterable):
params.append(np.array(_listr(p)))
else:
params.append(p)
params = sfpar.par_convert(params, prog)
gate(*params) | regrefs # pylint:disable=expression-not-assigned
else:
if callable(gate):
gate() | regrefs # pylint:disable=expression-not-assigned,pointless-statement
else:
gate | regrefs # pylint:disable=expression-not-assigned,pointless-statement
prog._target = xir_prog.options.get("target", None) # pylint: disable=protected-access
if "shots" in xir_prog.options:
prog.run_options["shots"] = xir_prog.options["shots"]
if "cutoff_dim" in xir_prog.options:
prog.backend_options["cutoff_dim"] = xir_prog.options["cutoff_dim"]
return prog
|
54,381 |
def download_and_extract(buildpath, packagedir, pkg, args):
srcpath = buildpath / packagedir
if 'source' not in pkg:
return srcpath
if 'url' in pkg['source']:
tarballname = Path(pkg['source']['url']).name
tarballpath = buildpath / tarballname
if not tarballpath.is_file():
try:
subprocess.run([
'wget', '-q', '-O', str(tarballpath), pkg['source']['url']
], check=True)
check_checksum(tarballpath, pkg)
except Exception:
tarballpath.unlink()
raise
if not srcpath.is_dir():
shutil.unpack_archive(str(tarballpath), str(buildpath))
for extension in ['.tar.gz', '.tgz', '.tar', '.tar.bz2', '.tbz2', '.tar.xz', '.txz', '.zip']:
if tarballname.endswith(extension):
tarballname = tarballname[:-len(extension)]
break
return buildpath / tarballname
elif 'path' in pkg['source']:
srcdir = Path(pkg['source']['path'])
if not srcdir.is_dir():
raise ValueError("'path' must point to a path")
if not srcpath.is_dir():
shutil.copytree(srcdir, srcpath)
return srcpath
else:
raise ValueError('Incorrect source provided')
|
def download_and_extract(buildpath, packagedir, pkg, args):
srcpath = buildpath / packagedir
if 'source' not in pkg:
return srcpath
if 'url' in pkg['source']:
tarballname = Path(pkg['source']['url']).name
tarballpath = buildpath / tarballname
if not tarballpath.is_file():
try:
subprocess.run([
'wget', '-q', '-O', str(tarballpath), pkg['source']['url']
], check=True)
check_checksum(tarballpath, pkg)
except Exception:
tarballpath.unlink()
raise
if not srcpath.is_dir():
shutil.unpack_archive(str(tarballpath), str(buildpath))
# strip extension e.g. .tar or .tar.gz
extracted_dirname = tarballname.with_suffix('').with_suffix('')
return buildpath / tarballname
elif 'path' in pkg['source']:
srcdir = Path(pkg['source']['path'])
if not srcdir.is_dir():
raise ValueError("'path' must point to a path")
if not srcpath.is_dir():
shutil.copytree(srcdir, srcpath)
return srcpath
else:
raise ValueError('Incorrect source provided')
|
45,689 |
def ManhattanPlot(
dataframe,
chrm="CHR",
bp="BP",
p="P",
snp="SNP",
gene="GENE",
annotation=None,
logp=True,
title="Manhattan Plot",
showgrid=True,
xlabel=None,
ylabel='-log10(p)',
point_size=5,
showlegend=True,
col=None,
suggestiveline_value=-np.log10(1e-8),
suggestiveline_color='#636efa',
suggestiveline_width=1,
genomewideline_value=-np.log10(5e-8),
genomewideline_color='#EF553B',
genomewideline_width=1,
highlight=True,
highlight_color="red",
):
"""Returns a figure for a manhattan plot.
Keyword arguments:
- dataframe (dataframe; required): A pandas dataframe which must contain at
least the following three columns:
- the chromosome number
- genomic base-pair position
- a numeric quantity to plot such as a p-value or zscore
- chrm (string; default 'CHR'): A string denoting the column name for
the chromosome. This column must be float or integer. Minimum
number of chromosomes required is 1. If you have X, Y, or MT
chromosomes, be sure to renumber these 23, 24, 25, etc.
- bp (string; default 'BP'): A string denoting the column name for the
chromosomal position.
- p (string; default 'P'): A string denoting the column name for the
float quantity to be plotted on the y-axis. This column must be
numeric. This does not have to be a p-value. It can be any numeric
quantity such as peak heights, bayes factors, test statistics. If
it is not a p-value, make sure to set logp = FALSE.
- snp (string; default 'SNP'): A string denoting the column name for
the SNP names (e.g. rs number). More generally, this column could
be anything that identifies each point being plotted. For example,
in an Epigenomewide association study (EWAS) this could be the
probe name or cg number. This column should be a character. This
argument is optional, however it is necessary to specify if you
want to highlight points on the plot using the highlight argument
in the figure method.
- gene (string; default 'GENE'): A string denoting the column name for
the GENE names. This column could be a string or a float. More
generally this could be any annotation information that you want
to include in the plot.
- annotation (string; optional): A string denoting the column name for
an annotation. This column could be a string or a float. This
could be any annotation information that you want to include in
the plot (e.g. zscore, effect size, minor allele frequency).
- logp (bool; optional): If True, the -log10 of the p-value is
plotted. It isn't very useful to plot raw p-values; however,
plotting the raw value could be useful for other genome-wide plots
(e.g., peak heights, bayes factors, test statistics, other
"scores", etc.)
- title (string; default 'Manhattan Plot') The title of the graph.
- showgrid (bool; default true): Boolean indicating whether gridlines
should be shown.
- xlabel (string; optional): Label of the x axis.
- ylabel: (string; default '-log10(p)'): Label of the y axis.
- point_size (number; default 5): Size of the points of the Scatter
plot.
- showlegend (bool; default true): Boolean indicating whether legends
should be shown.
- col (string; optional): A string representing the color of the
points of the Scatter plot. Can be in any color format accepted by
plotly_js graph_objs.
- suggestiveline_value (bool | float; default 8): A value which must
be False to deactivate the option, or a numerical value
corresponding to the p-value at which the line should be drawn.
The line has no influence on the data points.
- suggestiveline_color (string; default 'grey'): Color of the suggestive
line.
- suggestiveline_width (number; default 2): Width of the suggestive
line.
- genomewideline_value (bool | float; default -log10(5e-8)): A boolean
which must be False to deactivate the option, or a numerical value
corresponding to the p-value above which the data points are
considered significant.
- genomewideline_color (string; default 'red'): Color of the genome wide
line. Can be in any color format accepted by plotly_js
graph_objs.
- genomewideline_width (number; default 1): Width of the genome wide
line.
- highlight (bool; default true): turning on/off the highlighting of
data points considered significant.
- highlight_color (string; default 'red'): Color of the data points
highlighted because they are significant Can be in any color
format accepted by plotly_js graph_objs.
# ...
Example 1: Random Manhattan Plot
'''
dataframe = pd.DataFrame(
np.random.randint(0,100,size=(100, 3)),
columns=['P', 'CHR', 'BP'])
fig = create_manhattan(dataframe, title='XYZ Manhattan plot')
plotly.offline.plot(fig, image='png')
'''
"""
mh = _ManhattanPlot(
dataframe,
chrm=chrm,
bp=bp,
p=p,
snp=snp,
gene=gene,
annotation=annotation,
logp=logp
)
return mh.figure(
title=title,
showgrid=showgrid,
xlabel=xlabel,
ylabel=ylabel,
point_size=point_size,
showlegend=showlegend,
col=col,
suggestiveline_value=suggestiveline_value,
suggestiveline_color=suggestiveline_color,
suggestiveline_width=suggestiveline_width,
genomewideline_value=genomewideline_value,
genomewideline_color=genomewideline_color,
genomewideline_width=genomewideline_width,
highlight=highlight,
highlight_color=highlight_color
)
|
def ManhattanPlot(
dataframe,
chrm="CHR",
bp="BP",
p="P",
snp="SNP",
gene="GENE",
annotation=None,
logp=True,
title="Manhattan Plot",
showgrid=True,
xlabel=None,
ylabel='-log10(p)',
point_size=5,
showlegend=True,
col=None,
suggestiveline_value=-np.log10(1e-8),
suggestiveline_color='#636efa',
suggestiveline_width=1,
genomewideline_value=-np.log10(5e-8),
genomewideline_color='#EF553B',
genomewideline_width=1,
highlight=True,
highlight_color="red",
):
"""Returns a figure for a manhattan plot.
Keyword arguments:
- dataframe (dataframe; required): A pandas dataframe which must contain at
least the following three columns:
- the chromosome number
- genomic base-pair position
- a numeric quantity to plot such as a p-value or zscore
- chrm (string; default 'CHR'): A string denoting the column name for
the chromosome. This column must be float or integer. Minimum
number of chromosomes required is 1. If you have X, Y, or MT
chromosomes, be sure to renumber these 23, 24, 25, etc.
- bp (string; default 'BP'): A string denoting the column name for the
chromosomal position.
- p (string; default 'P'): A string denoting the column name for the
float quantity to be plotted on the y-axis. This column must be
numeric. This does not have to be a p-value. It can be any numeric
quantity such as peak heights, bayes factors, test statistics. If
it is not a p-value, make sure to set logp = FALSE.
- snp (string; default 'SNP'): A string denoting the column name for
the SNP names (e.g. rs number). More generally, this column could
be anything that identifies each point being plotted. For example,
in an Epigenomewide association study (EWAS) this could be the
probe name or cg number. This column should be a character. This
argument is optional, however it is necessary to specify if you
want to highlight points on the plot using the highlight argument
in the figure method.
- gene (string; default 'GENE'): A string denoting the column name for
the GENE names. This column could be a string or a float. More
generally this could be any annotation information that you want
to include in the plot.
- annotation (string; optional): A string denoting the column name for
an annotation. This column could be a string or a float. This
could be any annotation information that you want to include in
the plot (e.g. zscore, effect size, minor allele frequency).
- logp (bool; optional): If True, the -log10 of the p-value is
plotted. It isn't very useful to plot raw p-values; however,
plotting the raw value could be useful for other genome-wide plots
(e.g., peak heights, bayes factors, test statistics, other
"scores", etc.)
- title (string; default 'Manhattan Plot') The title of the graph.
- showgrid (bool; default true): Boolean indicating whether gridlines
should be shown.
- xlabel (string; optional): Label of the x axis.
- ylabel: (string; default '-log10(p)'): Label of the y axis.
- point_size (number; default 5): Size of the points of the Scatter
plot.
- showlegend (bool; default true): Boolean indicating whether legends
should be shown.
- col (string; optional): A string representing the color of the
points of the Scatter plot. Can be in any color format accepted by
plotly_js graph_objs.
- suggestiveline_value (bool | float; default 8): A value which must
be False to deactivate the option, or a numerical value
corresponding to the p-value at which the line should be drawn.
The line has no influence on the data points.
- suggestiveline_color (string; default 'grey'): Color of the suggestive
line.
- suggestiveline_width (number; default 2): Width of the suggestive
line.
- genomewideline_value (bool | float; default -log10(5e-8)): A boolean
which must be False to deactivate the option, or a numerical value
corresponding to the p-value above which the data points are
considered significant.
- genomewideline_color (string; default 'red'): Color of the genome wide
line. Can be in any color format accepted by plotly_js
graph_objs.
- genomewideline_width (number; default 1): Width of the genome wide
line.
- highlight (bool; default true): turning on/off the highlighting of
data points considered significant.
- highlight_color (string; default 'red'): Color of the data points
highlighted because they are significant Can be in any color
format accepted by plotly_js graph_objs.
# ...
Example 1: Random Manhattan Plot
'''
dataframe = pd.DataFrame(
np.random.randint(0,100,size=(100, 3)),
columns=['P', 'CHR', 'BP'])
fig = create_manhattan(dataframe, title='XYZ Manhattan plot')
plotly.offline.plot(fig, image='png')
'''
"""
mh = _ManhattanPlot(
dataframe,
chrm=chrm,
bp=bp,
p=p,
snp=snp,
gene=gene,
annotation=annotation,
logp=logp
)
return mh.figure(
title=title,
showgrid=showgrid,
xlabel=xlabel,
ylabel=ylabel,
point_size=point_size,
showlegend=showlegend,
col=col,
suggestiveline_value=suggestiveline_value,
suggestiveline_color=suggestiveline_color,
suggestiveline_width=suggestiveline_width,
genomewideline_value=genomewideline_value,
genomewideline_color=genomewideline_color,
genomewideline_width=genomewideline_width,
highlight=highlight,
highlight_color=highlight_color
)
|
19,432 |
def get_url_and_git(args):
"""Get the source and git URLs to use.
Use defaults if none is provided.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
Returns:
tuple(str, Optional[str]): The source and git URLs of the package
"""
# Possible flag combinations:
# spack create or spack create -g -> args.git is None, args.url is None
# spack create <url> -> args.git = '#AUTO-GIT-URL#', args.url = <url>
# -- in this case, url is checked to be git-like
# spack create -g <url> -> args.git = <url>, args.url is None
# spack create -g [some other flag that consumes argument] <url>
# -> args.git is None, args.url = <url>
# spack create -g <git_url> <url> -> args.git = <git_url>, args.url = <url>
# Default URLs
url = 'https://www.example.com/example-1.2.3.tar.gz'
git = None
# No source and no git urls were provided
if args.url is None and args.git is None:
return url, git
# Git url not set explicitly
if (args.git == '#AUTO-GIT-URL#' and is_git_url(args.url)) or args.git is None:
git = args.url
return url, git
else:
url = args.url or url
# Git is forced
if args.git != '#AUTO-GIT-URL#':
git = args.git
return url, git
|
def get_url_and_git(args):
"""Get the source and git URLs to use.
Use defaults if none is provided.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
Returns:
tuple(str, Optional[str]): The source and git URLs of the package
"""
# Possible flag combinations:
# spack create or spack create -g -> args.git is None, args.url is None
# spack create <url> -> args.git = '#AUTO-GIT-URL#', args.url = <url>
# -- in this case, url is checked to be git-like
# spack create -g <url> -> args.git = <url>, args.url is None
# spack create -g [some other flag that consumes argument] <url>
# -> args.git is None, args.url = <url>
# spack create -g <git_url> <url> -> args.git = <git_url>, args.url = <url>
# Default URLs
url = 'https://www.example.com/example-1.2.3.tar.gz'
git = None
# No source and no git urls were provided
if args.url is None and args.git is None:
return url, git
# Git url not set explicitly
if (args.git == '#AUTO-GIT-URL#' and is_git_url(args.url)) or args.git is None:
git = args.url
return args.url, git
else:
url = args.url or url
# Git is forced
if args.git != '#AUTO-GIT-URL#':
git = args.git
return url, git
|
30,776 |
def filter_by_score(events_data: list, score: int):
if score == 0:
return events_data
filtered_event_score = []
for event in events_data:
if event.get("score") >= score:
filtered_event_score.append(event)
return filtered_event_score
|
def filter_by_score(events_data: list, score: int) -> list:
if score == 0:
return events_data
filtered_event_score = []
for event in events_data:
if event.get("score") >= score:
filtered_event_score.append(event)
return filtered_event_score
|
26,240 |
def dup_links(links):
"""Check for duplicated links"""
print(f'Checking for duplicated links...')
hasError = False
seen = {}
dupes = []
for x in links:
if x in ignored_links:
continue
if x not in seen:
seen[x] = 1
else:
if seen[x] == 1:
dupes.append(x)
if not dupes:
print(f"No duplicated links")
else:
print(f"Found duplicated links: {dupes}")
hasError = True
return hasError
|
def dup_links(links):
"""Check for duplicated links"""
print(f'Checking for duplicated links...')
hasError = False
seen = {}
dupes = []
for link in links:
if x in ignored_links:
continue
if x not in seen:
seen[x] = 1
else:
if seen[x] == 1:
dupes.append(x)
if not dupes:
print(f"No duplicated links")
else:
print(f"Found duplicated links: {dupes}")
hasError = True
return hasError
|
31,020 |
def panorama_route_lookup(dest_ip: str, virtual_router=None):
"""
Given the provided ip address, looks up the outgoing interface and zone on the firewall.
"""
if not VSYS:
raise Exception("The 'panorama-route-lookup' command is only relevant for a Firewall instance.")
response = panorama_get_routes(virtual_router)
if 'entry' not in response['response']['result']:
raise Exception("No routes returned from the Firewall.")
else:
routes = response['response']['result']['entry']
ip_addr = ipaddress.ip_address(dest_ip)
current_match = None
matched_route = None
for route in routes:
subnet_raw = route['destination']
subnet = ipaddress.ip_network(subnet_raw)
# If the given IP address is in the subnet
if ip_addr in subnet:
# IF we haven't matched yet
if not current_match:
current_match = subnet
matched_route = route
# If this is a greater subnet
elif subnet.prefixlen > current_match.prefixlen:
current_match = subnet
matched_route = route
if matched_route:
return matched_route
else:
raise Exception("Route not found.")
|
def panorama_route_lookup(dest_ip: str, virtual_router=None):
"""
Given the provided ip address, looks up the outgoing interface and zone on the firewall.
"""
if not VSYS:
raise Exception("The 'panorama-route-lookup' command is only relevant for a Firewall instance.")
response = panorama_get_routes(virtual_router)
if 'entry' not in response['response']['result']:
raise DemistoException("No routes returned from the Firewall.")
else:
routes = response['response']['result']['entry']
ip_addr = ipaddress.ip_address(dest_ip)
current_match = None
matched_route = None
for route in routes:
subnet_raw = route['destination']
subnet = ipaddress.ip_network(subnet_raw)
# If the given IP address is in the subnet
if ip_addr in subnet:
# IF we haven't matched yet
if not current_match:
current_match = subnet
matched_route = route
# If this is a greater subnet
elif subnet.prefixlen > current_match.prefixlen:
current_match = subnet
matched_route = route
if matched_route:
return matched_route
else:
raise Exception("Route not found.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.