repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
cread/ecks
ecks/plugins/wincpu.py
get_wincpu
python
def get_wincpu(parent, host, community): cpu = (1,3,6,1,2,1,25,3,3,1,2) data = parent.get_snmp_data(host, community, cpu, 1) if data: return tuple([ int(load) for (oid, num, load) in data ])
This is a plugin to be loaded by Ecks return a tuple containing cpu_load for each CPU found.
https://github.com/cread/ecks/blob/b0912e099aec37db456c821ff0c5a8fec1609f53/ecks/plugins/wincpu.py#L20-L29
Apache License 2.0
prompt-toolkit/pymux
pymux/layout.py
_create_split
python
def _create_split(pymux, window, split): assert isinstance(split, (arrangement.HSplit, arrangement.VSplit)) is_vsplit = isinstance(split, arrangement.VSplit) def get_average_weight(): weights = 0 count = 0 for i in split: if i in split.weights: weights += split.weights[i] count += 1 if weights: return max(1, weights // count) else: return 1 def report_write_position_callback(item, write_position): if is_vsplit: split.weights[item] = write_position.width else: split.weights[item] = write_position.height def get_size(item): return D(weight=split.weights.get(item) or average_weight) content = [] average_weight = get_average_weight() for i, item in enumerate(split): width = height = None if is_vsplit: width = partial(get_size, item) else: height = partial(get_size, item) if isinstance(item, (arrangement.VSplit, arrangement.HSplit)): child = _create_split(pymux, window, item) elif isinstance(item, arrangement.Pane): child = _create_container_for_process(pymux, window, item) else: raise TypeError('Got %r' % (item,)) content.append(SizedBox( child, width=width, height=height, report_write_position_callback=partial(report_write_position_callback, item))) if is_vsplit: return_cls = VSplit padding_char = _border_vertical else: return_cls = HSplit padding_char = _border_horizontal return return_cls(content, padding=1, padding_char=padding_char)
Create a prompt_toolkit `Container` instance for the given pymux split.
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/layout.py#L604-L679
from __future__ import unicode_literals from prompt_toolkit.application.current import get_app from prompt_toolkit.filters import Condition, has_focus from prompt_toolkit.formatted_text import FormattedText, HTML from prompt_toolkit.layout.containers import VSplit, HSplit, Window, FloatContainer, Float, ConditionalContainer, Container, WindowAlign, to_container from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl from prompt_toolkit.layout.dimension import Dimension from prompt_toolkit.layout.dimension import Dimension as D from prompt_toolkit.layout.dimension import to_dimension, is_dimension from prompt_toolkit.layout.menus import CompletionsMenu from prompt_toolkit.layout.processors import BeforeInput, ShowArg, AppendAutoSuggestion, Processor, Transformation, HighlightSelectionProcessor from prompt_toolkit.layout.screen import Char from prompt_toolkit.mouse_events import MouseEventType from prompt_toolkit.widgets import FormattedTextToolbar, TextArea, Dialog, SearchToolbar from six.moves import range from functools import partial import pymux.arrangement as arrangement import datetime import weakref import six from .filters import WaitsForConfirmation from .format import format_pymux_string from .log import logger __all__ = ( 'LayoutManager', ) class Justify: LEFT = 'left' CENTER = 'center' RIGHT = 'right' _ALL = [LEFT, CENTER, RIGHT] class Z_INDEX: HIGHLIGHTED_BORDER = 2 STATUS_BAR = 5 COMMAND_LINE = 6 MESSAGE_TOOLBAR = 7 WINDOW_TITLE_BAR = 8 POPUP = 9 class Background(Container): def reset(self): pass def preferred_width(self, max_available_width): return D() def preferred_height(self, width, max_available_height): return D() def write_to_screen(self, screen, mouse_handlers, write_position, parent_style, erase_bg, z_index): default_char = Char(' ', 'class:background') dot = Char('.', 'class:background') ypos = write_position.ypos xpos = write_position.xpos for y in range(ypos, ypos + write_position.height): row = screen.data_buffer[y] for x in range(xpos, xpos + write_position.width): row[x] = dot if (x + y) % 3 == 0 else default_char def get_children(self): return [] _numbers = list(zip(*[ ['#####', ' #', '#####', '#####', '# #', '#####', '#####', '#####', '#####', '#####'], ['# #', ' #', ' #', ' #', '# #', '# ', '# ', ' #', '# #', '# #'], ['# #', ' #', '#####', '#####', '#####', '#####', '#####', ' #', '#####', '#####'], ['# #', ' #', '# ', ' #', ' #', ' #', '# #', ' #', '# #', ' #'], ['#####', ' #', '#####', '#####', ' #', '#####', '#####', ' #', '#####', '#####'], ])) def _draw_number(screen, x_offset, y_offset, number, style='class:clock', transparent=False): fg = Char(' ', 'class:clock') bg = Char(' ', '') for y, row in enumerate(_numbers[number]): screen_row = screen.data_buffer[y + y_offset] for x, n in enumerate(row): if n == '#': screen_row[x + x_offset] = fg elif not transparent: screen_row[x + x_offset] = bg class BigClock(Container): WIDTH = 28 HEIGHT = 5 def __init__(self, on_click): assert callable(on_click) self.on_click = on_click def reset(self): pass def write_to_screen(self, screen, mouse_handlers, write_position, parent_style, erase_bg, z_index): xpos = write_position.xpos ypos = write_position.ypos bg = Char(' ', '') def draw_func(): for y in range(ypos, self.HEIGHT + ypos): row = screen.data_buffer[y] for x in range(xpos, xpos + self.WIDTH): row[x] = bg now = datetime.datetime.now() _draw_number(screen, xpos + 0, ypos, now.hour // 10) _draw_number(screen, xpos + 6, ypos, now.hour % 10) _draw_number(screen, xpos + 16, ypos, now.minute // 10) _draw_number(screen, xpos + 23, ypos, now.minute % 10) screen.data_buffer[ypos + 1][xpos + 13] = Char(' ', 'class:clock') screen.data_buffer[ypos + 3][xpos + 13] = Char(' ', 'class:clock') screen.width = self.WIDTH screen.height = self.HEIGHT mouse_handlers.set_mouse_handler_for_range( x_min=xpos, x_max=xpos + write_position.width, y_min=ypos, y_max=ypos + write_position.height, handler=self._mouse_handler) screen.draw_with_z_index(z_index=z_index, draw_func=draw_func) def _mouse_handler(self, cli, mouse_event): if mouse_event.event_type == MouseEventType.MOUSE_UP: self.on_click(cli) else: return NotImplemented def preferred_width(self, max_available_width): return D.exact(BigClock.WIDTH) def preferred_height(self, width, max_available_height): return D.exact(BigClock.HEIGHT) def get_children(self): return [] class PaneNumber(Container): WIDTH = 5 HEIGHT = 5 def __init__(self, pymux, arrangement_pane): self.pymux = pymux self.arrangement_pane = arrangement_pane def reset(self): pass def _get_index(self): window = self.pymux.arrangement.get_active_window() try: return window.get_pane_index(self.arrangement_pane) except ValueError: return 0 def preferred_width(self, max_available_width): return Dimension.exact(6 * len('%s' % self._get_index()) - 1) def preferred_height(self, width, max_available_height): return Dimension.exact(self.HEIGHT) def write_to_screen(self, screen, mouse_handlers, write_position, parent_style, erase_bg, z_index): style = 'class:panenumber' def draw_func(): for i, d in enumerate('%s' % (self._get_index(),)): _draw_number(screen, write_position.xpos + i * 6, write_position.ypos, int(d), style=style, transparent=True) screen.draw_with_z_index(z_index=z_index, draw_func=draw_func) def get_children(self): return [] class MessageToolbar(FormattedTextToolbar): def __init__(self, client_state): def get_message(): if client_state.message: return client_state.message else: return '' def get_tokens(): message = get_message() if message: return FormattedText([ ('class:message', message), ('[SetCursorPosition]', ''), ('class:message', ' '), ]) else: return '' @Condition def is_visible(): return bool(get_message()) super(MessageToolbar, self).__init__(get_tokens) class LayoutManager(object): def __init__(self, pymux, client_state): self.pymux = pymux self.client_state = client_state search_textarea = SearchToolbar() self._popup_textarea = TextArea(scrollbar=True, read_only=True, search_field=search_textarea) self.popup_dialog = Dialog( title='Keys', body=HSplit([ Window(FormattedTextControl(text=''), height=1), self._popup_textarea, search_textarea, Window( FormattedTextControl( text=HTML('Press [<b>q</b>] to quit or [<b>/</b>] for searching.')), align=WindowAlign.CENTER, height=1) ]) ) self.layout = self._create_layout() self.pane_write_positions = {} def reset_write_positions(self): self.pane_write_positions = {} def display_popup(self, title, content): assert isinstance(title, six.text_type) assert isinstance(content, six.text_type) self.popup_dialog.title = title self._popup_textarea.text = content self.client_state.display_popup = True get_app().layout.focus(self._popup_textarea) def _create_select_window_handler(self, window): def handler(mouse_event): if mouse_event.event_type == MouseEventType.MOUSE_DOWN: self.pymux.arrangement.set_active_window(window) self.pymux.invalidate() else: return NotImplemented return handler def _get_status_tokens(self): result = [] for i, w in enumerate(self.pymux.arrangement.windows): if i > 0: result.append(('', ' ')) if w == self.pymux.arrangement.get_active_window(): style = 'class:window.current' format_str = self.pymux.window_status_current_format else: style = 'class:window' format_str = self.pymux.window_status_format result.append(( style, format_pymux_string(self.pymux, format_str, window=w), self._create_select_window_handler(w))) return result def _get_status_left_tokens(self): return format_pymux_string(self.pymux, self.pymux.status_left) def _get_status_right_tokens(self): return format_pymux_string(self.pymux, self.pymux.status_right) def _get_align(self): if self.pymux.status_justify == Justify.RIGHT: return WindowAlign.RIGHT elif self.pymux.status_justify == Justify.CENTER: return WindowAlign.CENTER else: return WindowAlign.LEFT def _before_prompt_command_tokens(self): return [('class:commandline.prompt', '%s ' % (self.client_state.prompt_text, ))] def _create_layout(self): waits_for_confirmation = WaitsForConfirmation(self.pymux) return FloatContainer( content=HSplit([ FloatContainer( Background(), floats=[ Float(width=lambda: self.pymux.get_window_size().columns, height=lambda: self.pymux.get_window_size().rows, content=DynamicBody(self.pymux)) ]), ConditionalContainer( content=VSplit([ Window( height=1, width=(lambda: D(max=self.pymux.status_left_length)), dont_extend_width=True, content=FormattedTextControl(self._get_status_left_tokens)), Window( height=1, char=' ', align=self._get_align, content=FormattedTextControl(self._get_status_tokens)), Window( height=1, width=(lambda: D(max=self.pymux.status_right_length)), dont_extend_width=True, align=WindowAlign.RIGHT, content=FormattedTextControl(self._get_status_right_tokens)) ], z_index=Z_INDEX.STATUS_BAR, style='class:statusbar'), filter=Condition(lambda: self.pymux.enable_status), ) ]), floats=[ Float(bottom=1, left=0, z_index=Z_INDEX.MESSAGE_TOOLBAR, content=MessageToolbar(self.client_state)), Float(left=0, right=0, bottom=0, content=HSplit([ ConditionalContainer( content=Window( height=1, content=ConfirmationToolbar(self.pymux, self.client_state), z_index=Z_INDEX.COMMAND_LINE, ), filter=waits_for_confirmation, ), ConditionalContainer( content=Window( height=D(min=1), style='class:commandline', dont_extend_height=True, content=BufferControl( buffer=self.client_state.command_buffer, preview_search=True, input_processors=[ AppendAutoSuggestion(), BeforeInput(':', style='class:commandline-prompt'), ShowArg(), HighlightSelectionProcessor(), ]), z_index=Z_INDEX.COMMAND_LINE, ), filter=has_focus(self.client_state.command_buffer), ), ConditionalContainer( content=Window( height=1, style='class:commandline', content=BufferControl( buffer=self.client_state.prompt_buffer, input_processors=[ BeforeInput(self._before_prompt_command_tokens), AppendAutoSuggestion(), HighlightSelectionProcessor(), ]), z_index=Z_INDEX.COMMAND_LINE, ), filter=has_focus(self.client_state.prompt_buffer), ), ])), Float( content=ConditionalContainer( content=self.popup_dialog, filter=Condition(lambda: self.client_state.display_popup), ), left=3, right=3, top=5, bottom=5, z_index=Z_INDEX.POPUP, ), Float(xcursor=True, ycursor=True, content=CompletionsMenu(max_height=12)), ] ) class ConfirmationToolbar(FormattedTextControl): def __init__(self, pymux, client_state): def get_tokens(): return [ ('class:question', ' '), ('class:question', format_pymux_string( pymux, client_state.confirm_text or '')), ('class:question', ' '), ('class:yesno', ' y/n'), ('[SetCursorPosition]', ''), ('class:yesno', ' '), ] super(ConfirmationToolbar, self).__init__( get_tokens, style='class:confirmationtoolbar') class DynamicBody(Container): def __init__(self, pymux): self.pymux = pymux self._bodies_for_app = weakref.WeakKeyDictionary() def _get_body(self): new_hash = self.pymux.arrangement.invalidation_hash() app = get_app() if app in self._bodies_for_app: existing_hash, container = self._bodies_for_app[app] if existing_hash == new_hash: return container new_layout = self._build_layout() self._bodies_for_app[app] = (new_hash, new_layout) return new_layout def _build_layout(self): logger.info('Rebuilding layout.') if not self.pymux.arrangement.windows: return Window() active_window = self.pymux.arrangement.get_active_window() if active_window.zoom: return to_container(_create_container_for_process( self.pymux, active_window, active_window.active_pane, zoom=True)) else: window = self.pymux.arrangement.get_active_window() return HSplit([ ConditionalContainer( content=Window(height=1), filter=Condition(lambda: self.pymux.enable_pane_status)), _create_split(self.pymux, window, window.root) ]) def reset(self): for invalidation_hash, body in self._bodies_for_app.values(): body.reset() def preferred_width(self, max_available_width): body = self._get_body() return body.preferred_width(max_available_width) def preferred_height(self, width, max_available_height): body = self._get_body() return body.preferred_height(width, max_available_height) def write_to_screen(self, screen, mouse_handlers, write_position, parent_style, erase_bg, z_index): body = self._get_body() body.write_to_screen(screen, mouse_handlers, write_position, parent_style, erase_bg, z_index) def get_children(self): body = self._get_body() return [body] class SizedBox(Container): def __init__(self, content, width=None, height=None, report_write_position_callback=None): assert is_dimension(width) assert is_dimension(height) assert report_write_position_callback is None or callable(report_write_position_callback) self.content = to_container(content) self.width = width self.height = height self.report_write_position_callback = report_write_position_callback def reset(self): self.content.reset() def preferred_width(self, max_available_width): return to_dimension(self.width) def preferred_height(self, width, max_available_height): return to_dimension(self.height) def write_to_screen(self, screen, mouse_handlers, write_position, parent_style, erase_bg, z_index): if self.report_write_position_callback: self.report_write_position_callback(write_position) self.content.write_to_screen( screen, mouse_handlers, write_position, parent_style, erase_bg, z_index) def get_children(self): return [self.content]
BSD 3-Clause New or Revised License
sjtmusicteam/svs_system
SVS/model/layers/conformer_related.py
LayerNorm.forward
python
def forward(self, x): if self.dim == -1: return super(LayerNorm, self).forward(x) return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
Apply layer normalization. Args: x (torch.Tensor): Input tensor. Returns: torch.Tensor: Normalized tensor.
https://github.com/sjtmusicteam/svs_system/blob/1d6aa75693adf9ce3c90fd1fb462147473a5dc26/SVS/model/layers/conformer_related.py#L685-L695
import logging import math import numpy import torch from torch import nn class ConvolutionModule(nn.Module): def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True): super(ConvolutionModule, self).__init__() assert (kernel_size - 1) % 2 == 0 self.pointwise_conv1 = nn.Conv1d( channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias ) self.depthwise_conv = nn.Conv1d( channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias=bias, ) self.norm = nn.BatchNorm1d(channels) self.pointwise_conv2 = nn.Conv1d( channels, channels, kernel_size=1, stride=1, padding=0, bias=bias ) self.activation = activation def forward(self, x): x = x.transpose(1, 2) x = self.pointwise_conv1(x) x = nn.functional.glu(x, dim=1) x = self.depthwise_conv(x) x = self.activation(self.norm(x)) x = self.pointwise_conv2(x) return x.transpose(1, 2) class EncoderLayer(nn.Module): def __init__( self, size, self_attn, feed_forward, feed_forward_macaron, conv_module, dropout_rate, normalize_before=True, concat_after=False, ): super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.feed_forward_macaron = feed_forward_macaron self.conv_module = conv_module self.norm_ff = LayerNorm(size) self.norm_mha = LayerNorm(size) if feed_forward_macaron is not None: self.norm_ff_macaron = LayerNorm(size) self.ff_scale = 0.5 else: self.ff_scale = 1.0 if self.conv_module is not None: self.norm_conv = LayerNorm(size) self.norm_final = LayerNorm(size) self.dropout = nn.Dropout(dropout_rate) self.size = size self.normalize_before = normalize_before self.concat_after = concat_after if self.concat_after: self.concat_linear = nn.Linear(size + size, size) def forward(self, x_input, mask, cache=None): if isinstance(x_input, tuple): x, pos_emb = x_input[0], x_input[1] else: x, pos_emb = x_input, None if self.feed_forward_macaron is not None: residual = x if self.normalize_before: x = self.norm_ff_macaron(x) x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x)) if not self.normalize_before: x = self.norm_ff_macaron(x) residual = x if self.normalize_before: x = self.norm_mha(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + self.concat_linear(x_concat) else: x = residual + self.dropout(x_att) if not self.normalize_before: x = self.norm_mha(x) if self.conv_module is not None: residual = x if self.normalize_before: x = self.norm_conv(x) x = residual + self.dropout(self.conv_module(x)) if not self.normalize_before: x = self.norm_conv(x) residual = x if self.normalize_before: x = self.norm_ff(x) x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm_ff(x) if self.conv_module is not None: x = self.norm_final(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask class Swish(torch.nn.Module): def forward(self, x): return x * torch.sigmoid(x) def get_activation(act): activation_funcs = { "hardtanh": torch.nn.Hardtanh, "tanh": torch.nn.Tanh, "relu": torch.nn.ReLU, "selu": torch.nn.SELU, "swish": Swish, } return activation_funcs[act]() class VGG2L(torch.nn.Module): def __init__(self, idim, odim): super().__init__() self.vgg2l = torch.nn.Sequential( torch.nn.Conv2d(1, 64, 3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.Conv2d(64, 64, 3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d((3, 2)), torch.nn.Conv2d(64, 128, 3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.Conv2d(128, 128, 3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d((2, 2)), ) self.output = torch.nn.Linear(128 * ((idim // 2) // 2), odim) def forward(self, x, x_mask): x = x.unsqueeze(1) x = self.vgg2l(x) b, c, t, f = x.size() x = self.output(x.transpose(1, 2).contiguous().view(b, t, c * f)) if x_mask is not None: x_mask = self.create_new_mask(x_mask, x) return x, x_mask def create_new_mask(self, x_mask, x): x_t1 = x_mask.size(2) - (x_mask.size(2) % 3) x_mask = x_mask[:, :, :x_t1][:, :, ::3] x_t2 = x_mask.size(2) - (x_mask.size(2) % 2) x_mask = x_mask[:, :, :x_t2][:, :, ::2] return x_mask class MultiHeadedAttention(nn.Module): def __init__(self, n_head, n_feat, dropout_rate): super(MultiHeadedAttention, self).__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.attn = None self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query, key, value): n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value, scores, mask): n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) min_value = float( numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min ) scores = scores.masked_fill(mask, min_value) self.attn = torch.softmax(scores, dim=-1).masked_fill( mask, 0.0 ) else: self.attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(self.attn) x = torch.matmul(p_attn, value) x = ( x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) ) return self.linear_out(x) def forward(self, query, key, value, mask): q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) class RelPositionMultiHeadedAttention(MultiHeadedAttention): def __init__(self, n_head, n_feat, dropout_rate): super().__init__(n_head, n_feat, dropout_rate) self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k)) self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k)) torch.nn.init.xavier_uniform_(self.pos_bias_u) torch.nn.init.xavier_uniform_(self.pos_bias_v) def rel_shift(self, x, zero_triu=False): zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2)) x = x_padded[:, :, 1:].view_as(x) if zero_triu: ones = torch.ones((x.size(2), x.size(3))) x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] return x def forward(self, query, key, value, pos_emb, mask): q, k, v = self.forward_qkv(query, key, value) q = q.transpose(1, 2) n_batch_pos = pos_emb.size(0) p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) p = p.transpose(1, 2) q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2) q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2) matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) matrix_bd = self.rel_shift(matrix_bd) scores = (matrix_ac + matrix_bd) / math.sqrt( self.d_k ) return self.forward_attention(v, scores, mask) def _pre_hook( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): k = prefix + "pe" if k in state_dict: state_dict.pop(k) class PositionalEncoding(torch.nn.Module): def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False): super(PositionalEncoding, self).__init__() self.d_model = d_model self.reverse = reverse self.xscale = math.sqrt(self.d_model) self.dropout = torch.nn.Dropout(p=dropout_rate) self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, max_len)) self._register_load_state_dict_pre_hook(_pre_hook) def extend_pe(self, x): if self.pe is not None: if self.pe.size(1) >= x.size(1): if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return pe = torch.zeros(x.size(1), self.d_model) if self.reverse: position = torch.arange( x.size(1) - 1, -1, -1.0, dtype=torch.float32 ).unsqueeze(1) else: position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) div_term = torch.exp( torch.arange(0, self.d_model, 2, dtype=torch.float32) * -(math.log(10000.0) / self.d_model) ) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.pe = pe.to(device=x.device, dtype=x.dtype) def forward(self, x: torch.Tensor): self.extend_pe(x) x = x * self.xscale + self.pe[:, : x.size(1)] return self.dropout(x) class ScaledPositionalEncoding(PositionalEncoding): def __init__(self, d_model, dropout_rate, max_len=5000): super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len) self.alpha = torch.nn.Parameter(torch.tensor(1.0)) def reset_parameters(self): self.alpha.data = torch.tensor(1.0) def forward(self, x): self.extend_pe(x) x = x + self.alpha * self.pe[:, : x.size(1)] return self.dropout(x) class RelPositionalEncoding(PositionalEncoding): def __init__(self, d_model, dropout_rate, max_len=5000): super().__init__(d_model, dropout_rate, max_len, reverse=True) def forward(self, x): self.extend_pe(x) x = x * self.xscale pos_emb = self.pe[:, : x.size(1)] return self.dropout(x), self.dropout(pos_emb) class LayerNorm(torch.nn.LayerNorm): def __init__(self, nout, dim=-1): super(LayerNorm, self).__init__(nout, eps=1e-12) self.dim = dim
Apache License 2.0
tand826/wsiprocess
wsiprocess/cli.py
Args.set_detection_args
python
def set_detection_args(self): parser_det = self.method_args.add_parser( "detection", help="Arguments for detection tasks.") self.set_wsi_arg(parser_det) parser_det.add_argument( "-vo", "--voc_style", action="store_true", help="Output as VOC style.") parser_det.add_argument( "-co", "--coco_style", action="store_true", help="Output as COCO style.") parser_det.add_argument( "-yo", "--yolo_style", action="store_true", help="Output as YOLO style.") parser_det.add_argument( "-ra", "--ratio", default="8:1:1", help="Ratio of the dataset size of train/validation/test phase.") parser_det.add_argument( "-cb", "--crop_bbox", default=False, action="store_true", help="Crop bounding boxes after patch extraction." ) self.add_annotation_args(parser_det, slide_is_sparse=True) self.set_common_args(parser_det)
Arguments for detection tasks.
https://github.com/tand826/wsiprocess/blob/5a63f4da536d7074aa15e72b8b1355dbc125744e/wsiprocess/cli.py#L126-L150
import argparse from pathlib import Path import wsiprocess as wp class Args: def __init__(self, command): self.build_args(command) def set_base_parser(self): self.base_parser = argparse.ArgumentParser( description="wsiprocess command line tool") def set_common_args(self, parser): parser.add_argument( "-st", "--save_to", type=Path, default=".", help="Where to save the data.") parser.add_argument( "-pw", "--patch_width", type=int, default=256, help="Width of patches.") parser.add_argument( "-ph", "--patch_height", type=int, default=256, help="Height of patches.") parser.add_argument( "-ow", "--overlap_width", type=int, default=0, help="Width of the overlapped area of patches.") parser.add_argument( "-oh", "--overlap_height", type=int, default=0, help="Height of the overlapped area of patches") parser.add_argument( "-ox", "--offset_x", type=int, default=0, help="The offset pixel along the x-axis.") parser.add_argument( "-oy", "--offset_y", type=int, default=0, help="The offset pixel along the y-axis.") parser.add_argument( "-dw", "--dot_bbox_width", type=int, default=30, help="Width of bbox translated from dot annotation.") parser.add_argument( "-dh", "--dot_bbox_height", type=int, help="Height of bbox translated from dot annotation.") parser.add_argument( "-ss", "--start_sample", action="store_true", help="Generate samples at the start of the process.") parser.add_argument( "-fs", "--finished_sample", action="store_true", help="Generate samples at the end of the process.") parser.add_argument( "-np", "--no_patches", action="store_true", help="Patcher run without extracting patches.") parser.add_argument( "-ep", "--extract_patches", action="store_true", help="[Not Available]Extract the patches and save them as images.") parser.add_argument( "-ve", "--verbose", action="store_true", help="Show progress bar while patching.") def set_wsi_arg(self, parser): parser.add_argument( "wsi", type=str, help="Path to the target wsi.") def add_annotation_args(self, parser, slide_is_sparse=False): parser.add_argument( "annotation", type=str, default=False, help="Path to the annotation file.") parser.add_argument( "-ma", "--magnification", choices={40, 20, 10}, default=40, type=int, help="Magnification to process.") parser.add_argument( "-ru", "--rule", type=Path, help="File to define the inclusion / exclusion relationship.") parser.add_argument( "-ef", "--extract_foreground", action="store_true", help="If set, wp extracts patches from foreground.") self.add_binarization_method(parser) self.add_on_foreground(parser, slide_is_sparse) self.add_on_annotation(parser, slide_is_sparse) def add_on_foreground(self, parser, slide_is_sparse=False): on_foreground_param = 0.0001 if slide_is_sparse else 0.01 parser.add_argument( "-of", "--on_foreground", type=float, default=on_foreground_param, help="The ratio of overlapped area of a patch and the foreground.") def add_on_annotation(self, parser, slide_is_sparse=False): on_annotation_param = 0.0001 if slide_is_sparse else 0.01 parser.add_argument( "-oa", "--on_annotation", type=float, default=on_annotation_param, help="The ratio of overlapped area of a patch and the annotated.") def add_binarization_method(self, parser): parser.add_argument( "-mm", "--minmax", type=str, default=False, help="Get foreground mask as pixels from min to max. ie. 30-190") parser.add_argument( "-et", "--export_thumbs", action="store_true", help="Export thumbnails of masks.") def set_method_args(self): self.method_args = self.base_parser.add_subparsers( dest="method", help="Method to use.") self.method_args.required = True def set_evaluation_args(self): parser_eval = self.method_args.add_parser( "evaluation", help="Arguments for methods with no annotation data.") self.set_wsi_arg(parser_eval) self.add_on_foreground(parser_eval) self.add_binarization_method(parser_eval) self.set_common_args(parser_eval) def set_classification_args(self): parser_cls = self.method_args.add_parser( "classification", help="Arguments for classification tasks.") self.set_wsi_arg(parser_cls) self.add_annotation_args(parser_cls) self.set_common_args(parser_cls)
Apache License 2.0
ros2/ci
ros2_batch_job/vendor/osrf_pycommon/osrf_pycommon/cli_utils/verb_pattern.py
split_arguments_by_verb
python
def split_arguments_by_verb(arguments): verb = None pre_verb_args = [] post_verb_args = [] for index, arg in enumerate(arguments): if not arg.startswith('-'): verb = arg post_verb_args = arguments[index + 1:] break pre_verb_args.append(arg) return verb, pre_verb_args, post_verb_args
Split arguments by verb. Given a list of arguments (list of strings), the verb, the pre verb arguments, and the post verb arugments are returned. For example: .. code-block:: python >>> args = ['--command-arg1', 'verb', '--verb-arg1', '--verb-arg2'] >>> split_arguments_by_verb(args) ('verb', ['--command-arg1'], ['--verb-arg1', '--verb-arg2']) :param list arguments: list of system arguments :returns: the verb (str), pre verb args (list), and post verb args (list) :rtype: tuple
https://github.com/ros2/ci/blob/eb8d0ad3e9b74a71561c178bb5d8487f77206d35/ros2_batch_job/vendor/osrf_pycommon/osrf_pycommon/cli_utils/verb_pattern.py#L151-L181
import inspect import pkg_resources def call_prepare_arguments(func, parser, sysargs=None): func_args = [parser] arguments, _, _, defaults = inspect.getargspec(func) if arguments[0] == 'self': del arguments[0] if defaults: arguments = arguments[:-len(defaults)] if len(arguments) not in [1, 2]: raise ValueError("Given function '{0}' must have one or two " "parameters (excluding self), but got '{1}' " "parameters: '{2}'" .format(func.__name__, len(arguments), ', '.join(inspect.getargspec(func)[0]))) if len(arguments) == 2: func_args.append(sysargs or []) return func(*func_args) or parser def create_subparsers(parser, cmd_name, verbs, group, sysargs, title=None): metavar = '[' + ' | '.join(verbs) + ']' subparser = parser.add_subparsers( title=title or '{0} command'.format(cmd_name), metavar=metavar, description='Call `{0} {1} -h` for help on a each verb.'.format( cmd_name, metavar), dest='verb' ) argument_preprocessors = {} verb_subparsers = {} for verb in verbs: desc = load_verb_description(verb, group) cmd_parser = subparser.add_parser( desc['verb'], description=desc['description']) cmd_parser = call_prepare_arguments( desc['prepare_arguments'], cmd_parser, sysargs, ) cmd_parser.set_defaults(main=desc['main']) if 'argument_preprocessor' in desc: argument_preprocessors[verb] = desc['argument_preprocessor'] else: argument_preprocessors[verb] = default_argument_preprocessor verb_subparsers[verb] = cmd_parser return argument_preprocessors, verb_subparsers def default_argument_preprocessor(args): extras = {} return args, extras def list_verbs(group): verbs = [] for entry_point in pkg_resources.iter_entry_points(group=group): verbs.append(entry_point.name) return verbs def load_verb_description(verb_name, group): for entry_point in pkg_resources.iter_entry_points(group=group): if entry_point.name == verb_name: return entry_point.load()
Apache License 2.0
seekintoo/chimay-red
lib/utils.py
write_to_file
python
def write_to_file(data: bytes, filepath: str) -> int: if not isinstance(data, bytes): raise TypeError("data expecting type bytes, got {0}".format(type(data))) if not isinstance(filepath, str): raise TypeError("data expecting type bytes, got {0}".format(type(data))) with open(filepath, "wb") as fd: return fd.write(data)
Writes arbitrary bytes to a file given `data` and `filepath` Returns number of `bytes` written
https://github.com/seekintoo/chimay-red/blob/8e2e2dbd149fd9f9c6ece67127e92f9b10742770/lib/utils.py#L95-L107
import fnmatch import ipaddress import linecache import os import socket import struct import tracemalloc from binascii import hexlify from pwn import remote, log from lib.defines import MAGIC_SIZE, SQUASHFS_MAGIC, SQUASHFS_OFFSET print_info = log.info print_progress = log.progress def craft_post_header(length=0, content_length=True): if content_length: header = b"POST /jsproxy HTTP/1.1\r\nContent-Length: " header += "{}\r\n\r\n".format(str(length)).encode() else: header = b"POST /jsproxy HTTP/1.1\r\n\r\n" return header def create_socket(host: str, port: int): if isinstance(port, str): if port.isdigit(): port = int(port) try: s = socket.socket() s.connect((host, port)) s = remote.fromsocket(s) except Exception: raise ConnectionAbortedError return s def get_system_routes() -> iter: with open("/proc/net/route") as fh: for line in fh: fields = line.strip().split() if fields[1] == "00000000" or fields[1][0].isupper(): continue yield socket.inet_ntoa(struct.pack("=L", int(fields[1], 16))) def check_cidr_overlap(address1: str, address2: str) -> bool: return ipaddress.ip_address(address1) in ipaddress.ip_network(address2) def read_bin_file(filename: str): if not os.path.isfile(filename): raise FileNotFoundError() with open(filename, "rb") as fd: return fd.read() def find_files(directory: str, pattern: str): for root, _, files in os.walk(directory): for basename in files: if fnmatch.fnmatch(basename, pattern): filename = os.path.join(root, basename) yield filename
MIT License
athesdrake/aiotfm
aiotfm/packet.py
Packet.read32
python
def read32(self) -> int: return struct.unpack('>I', self.readBytes(4))[0]
Read an int (four bytes) from the buffer
https://github.com/athesdrake/aiotfm/blob/c9054e215b8449cc72d28b5067679d25288c00ff/aiotfm/packet.py#L79-L81
import struct from typing import ByteString, List, Optional, Tuple, Union from aiotfm.errors import XXTEAInvalidKeys, XXTEAInvalidPacket class Packet: def __init__(self, buffer: Optional[ByteString] = None): if buffer is None: buffer = bytearray() elif not isinstance(buffer, bytearray): buffer = bytearray(buffer) self.buffer: bytearray = buffer self.pos: int = 0 def __repr__(self): return '<Packet {!r}>'.format(bytes(self)) def __bytes__(self): return bytes(self.buffer) @classmethod def new(cls, c: Union[int, List[int], Tuple[int, int]], cc: Optional[int] = None): if isinstance(c, (tuple, list)): c, cc = c elif cc is None: return cls().write16(c) return cls().write8(c).write8(cc) def copy(self, copy_pos: bool = False) -> 'Packet': p = Packet() if copy_pos: p.pos = self.pos p.buffer = self.buffer.copy() return p def readBytes(self, nbr: int = 1) -> bytes: self.pos += nbr return self.buffer[self.pos - nbr:self.pos] def readCode(self) -> Tuple[int, int]: return self.read8(), self.read8() def read8(self) -> int: self.pos += 1 return self.buffer[self.pos - 1] def read16(self) -> int: return struct.unpack('>H', self.readBytes(2))[0] def read24(self) -> int: return int.from_bytes(self.readBytes(3), 'big')
MIT License
ryran/upvm
modules/sysvalidator.py
call
python
def call(cmd, showStdout=False, showStderr=False, shell=False): c.debug("Executing: {}".format(" ".join(cmd))) null = open(os.devnull, 'w') out = err = None if not showStdout: out = null if not showStderr: err = null rc = subprocess.call(cmd, shell=shell, stdout=out, stderr=err) null.close() return ret(rc)
Execute *cmd* and return True on success.
https://github.com/ryran/upvm/blob/bfb7f6eb542ae0d42e178ed183961303ed9212e8/modules/sysvalidator.py#L29-L40
from __future__ import print_function import subprocess import os import tempfile from sys import exit import pwd, grp import json from stat import S_ISBLK from . import cfg from . import string_ops as c myUser = pwd.getpwuid(os.getuid()).pw_name def ret(returnCode): if returnCode == 0: return True else: return False
Apache License 2.0
sky-uk/mite
mite_browser/__init__.py
Page._resources_with_embedabbles
python
def _resources_with_embedabbles(self): return self.frames + self.stylesheets
Any sub-resources of a page which might also contain their own embedded resources
https://github.com/sky-uk/mite/blob/79018b0259d1788566f76ce4da9ec41bd976e9b9/mite_browser/__init__.py#L195-L197
import asyncio import re from functools import wraps from urllib.parse import urlencode, urljoin from bs4 import BeautifulSoup from mite import ensure_fixed_separation from mite.exceptions import MiteError from mite_http import mite_http EMBEDDED_URL_REGEX = re.compile( r"""\(\s*[\]?["']([^"':.]*:)?([^"':.]*\.[^"':.]*)[\]?["']\s*\)""", re.IGNORECASE ) class OptionError(MiteError): def __init__(self, value, options): super().__init__( "%r not in options %r" % (value, options), value=value, options=options ) class ElementNotFoundError(MiteError): def __init__(self, **kwargs): text = (kwargs.pop("text") or "").replace("'", "").replace('"', "") super().__init__( "Could not find element in page with search terms: {}".format( sorted(kwargs.items()) ), text=text, **kwargs, ) def url_builder(base_url, *args, **kwargs): url = base_url for arg in args: url = urljoin(url, arg) if kwargs: url = "".join([url, "?", urlencode(kwargs)]) return url def browser_decorator(separation=0, embedded_resources=False): def wrapper_factory(func): @wraps(func) @mite_http async def wrapper(context, *args, **kwargs): context.browser = Browser(context, embedded_resources) async with ensure_fixed_separation(separation): result = await func(context, *args, **kwargs) del context.browser return result return wrapper return wrapper_factory class Browser: def __init__(self, context, embedded_res=False): self._ctx = context self._session = context.http self._embedded_res = embedded_res async def _download_resource(self, url, origin, type): resource = await self._session.request("GET", url) origin._register_resource(resource, type) async def _download_resources(self, origin): await asyncio.gather( *[ self._download_resource(url, origin, rtype) for url, rtype in origin._embeded_urls ] ) await asyncio.gather( *[ self._download_resources(resource) for resource in origin._resources_with_embedabbles ] ) async def request(self, method, url, *args, **kwargs): embedded_res = kwargs.pop("embedded_res", self._embedded_res) resp = await self._session.request(method, url, *args, **kwargs) page = Page(resp, self) if embedded_res: async with self._ctx.transaction( self._ctx._transaction_name + " - embedded resources" ): await self._download_resources(page) return page @property def headers(self): return self._session.headers async def get(self, url, *args, **kwargs): return await self.request("GET", url, *args, **kwargs) async def post(self, url, *args, **kwargs): return await self.request("POST", url, *args, **kwargs) async def options(self, url, *args, **kwargs): return await self.request("OPTIONS", url, *args, **kwargs) def erase_all_cookies(self): self._session.erase_all_cookies() def erase_session_cookies(self): self._session.erase_session_cookies() def get_cookie_list(self): return self._session.get_cookie_list() class Resource: def __init__(self, response, browser): self.response = response self.browser = browser @property def text(self): return self.response.text @property def _embeded_urls(self): return [] @property def _resources_with_embedabbles(self): return [] class Page(Resource): def __init__(self, response, browser): super().__init__(response, browser) self._dom = None self.scripts = [] self.stylesheets = [] self.resources = [] self.frames = [] def assert_element_in(self, name=None, attrs={}, recursive=True, text=None, **kwargs): if self.find(name=name, attrs=attrs, recursive=recursive, text=text, **kwargs): return True else: raise ElementNotFoundError(name=name, attrs=attrs, text=text, **kwargs) @property def dom(self): if self._dom is None: self._dom = BeautifulSoup(self.response.text, "html.parser") return self._dom @property def cookies(self): return self.response.cookies @property def text(self): return self.response.text @property def headers(self): return self.response.headers @property def status_code(self): return self.response.status_code def find_all(self, *args, **kwargs): return self.dom.find_all(*args, **kwargs) def find(self, *args, **kwargs): return self.dom.find(*args, **kwargs) @property
MIT License
globocom/globonetworkapi-webui
CadVlan/Acl/acl.py
createAclGit
python
def createAclGit(acl_name, environment, network, user): try: acl = check_name_file(acl_name) path = path_acl(environment["nome_ambiente_logico"], environment["nome_divisao"], environment["acl_path"]) mkdir_divison_dc( environment["nome_divisao"], user, environment["acl_path"]) chdir(PATH_TYPES.ACL, network, path) Git.synchronization() File.create(acl) Git.add(acl) Git.commit(acl, "Criação do Arquivo %s pelo usuário: %s" % (acl, user.get_username())) Git.push() logger.info("%s criou no GIT o arquivo: %s" % (user.get_username(), (path + acl))) except (GITCommandError, FileError, Exception), e: logger.error("Erro quando o usuário %s tentou criar o arquivo: %s no Git" % ( user.get_username(), (path + acl))) logger.error(e) raise GITCommandError(e)
Create the file acl. :param acl_name: acl name :param environment: Environment :param network: v4 or v6 :param user: user :raise GITCommandError: Failed to execute command
https://github.com/globocom/globonetworkapi-webui/blob/eb9346c36c5292bc7fb334ec23589bf8fb6b5091/CadVlan/Acl/acl.py#L346-L385
from CadVlan.Util.git import Git, GITCommandError from CadVlan.Util.file import File, FileError from CadVlan.Util.Enum import Enum from CadVlan.settings import PATH_ACL from CadVlan.Util.Enum import NETWORK_TYPES import logging import os from time import strftime import commands from os import listdir from os.path import isfile, join from CadVlan.Util.utility import IP_VERSION logger = logging.getLogger(__name__) EXTENTION_FILE = ".txt" PATH_ACL_TEMPLATES = "/templates/" PATH_TYPES = Enum(["ACL", "TEMPLATE"]) DIVISON_DC = Enum(["FE", "BE", "DEV_QA_FE", "DEV_QA", "BORDA", "BE_POP_SP", "FE_POP_SP", "BORDA_POP_SP"]) ENVIRONMENT_LOGICAL = Enum( ["APLICATIVOS", "PORTAL", "HOMOLOGACAO", "PRODUCAO", "BORDA"]) TEMPLATES = Enum( ["BE", "BEHO", "FE_APLICATIVOS", "FE_DEV_QA", "FE_PORTAL", "FE_STAGING"]) PREFIX_TEMPLATES = "ACL_PADRAO_" hexa = lambda x: hex(x)[2:] def mkdir_divison_dc(divison_dc, user, acl_path=None): try: divison_dc = str(divison_dc).upper() os.chdir(PATH_ACL) if divison_dc == DIVISON_DC.BORDA: divison_dc = "Borda" directory = divison_dc if acl_path: directory = acl_path Git.synchronization() list_path = [] list_path.append("%s%s/" % (PATH_ACL, 'v4')) list_path.append("%s%s/" % (PATH_ACL, 'v6')) for path in list_path: os.chdir(path) folders = directory.split("/") for folder in folders: if folder: if not os.path.exists(folder): os.mkdir(folder) logger.info( "%s criou no Git o diretório: %s/%s" % (user.get_username(), path, folder)) path = "%s/%s" % (path, folder) os.chdir(path) except Exception, e: logger.error("Erro quando o usuário %s tentou criar o diretório: %s no Git" % ( user.get_username(), path+folder)) logger.error(e) raise GITCommandError(e) def script_template(environment_logical, divison_dc, group_l3, template_name): script = False if template_name: script = True else: if divison_dc == DIVISON_DC.FE: if environment_logical == ENVIRONMENT_LOGICAL.APLICATIVOS or environment_logical == ENVIRONMENT_LOGICAL.PORTAL or environment_logical == ENVIRONMENT_LOGICAL.HOMOLOGACAO: if group_l3 == "CORE/DENSIDADE": script = True elif divison_dc == DIVISON_DC.BE: if environment_logical == ENVIRONMENT_LOGICAL.PRODUCAO or environment_logical == ENVIRONMENT_LOGICAL.HOMOLOGACAO: if group_l3 == "CORE/DENSIDADE": script = True return script def chdir(type_path, network, path=None): try: if type_path == PATH_TYPES.ACL: path = "%s%s/%s" % (PATH_ACL, network, path) elif type_path == PATH_TYPES.TEMPLATE: path = "%s%s/%s" % (PATH_ACL, network, PATH_ACL_TEMPLATES) os.chdir(path) except Exception, e: logger.error(e) raise Exception(e) def check_name_file(acl_file_name, extention=True): acl = "" for caracter in acl_file_name: if ((caracter == ".") or (caracter == " ")): pass else: acl += caracter if extention == True: acl = acl + EXTENTION_FILE return acl def check_name_file_bkp(acl_file_name): acl = "" for caracter in acl_file_name: if ((caracter == ".") or (caracter == " ")): pass else: acl += caracter return acl + "_bkp_" + strftime("%Y%m%d%H%M%S") + EXTENTION_FILE def path_acl(environment_logical, divison_dc, acl_path=None): path = divison_dc if environment_logical == ENVIRONMENT_LOGICAL.HOMOLOGACAO: if divison_dc == DIVISON_DC.FE: path = DIVISON_DC.DEV_QA_FE else: path = DIVISON_DC.DEV_QA elif environment_logical == ENVIRONMENT_LOGICAL.PRODUCAO: if divison_dc == replace_to_correct(DIVISON_DC.BE_POP_SP): path = replace_to_correct(DIVISON_DC.BE_POP_SP) elif divison_dc == replace_to_correct(DIVISON_DC.FE_POP_SP): path = replace_to_correct(DIVISON_DC.FE_POP_SP) elif environment_logical == ENVIRONMENT_LOGICAL.BORDA: if divison_dc == replace_to_correct(DIVISON_DC.BORDA_POP_SP): path = replace_to_correct(DIVISON_DC.BORDA_POP_SP) else: path = divison_dc if path == DIVISON_DC.BORDA: path = "Borda" if acl_path: path = acl_path return path def replace_to_correct(value): return value.replace('_', '-') def checkAclGit(acl_file_name, environment, network, user): try: acl = check_name_file(acl_file_name) path = path_acl(environment["nome_ambiente_logico"], environment["nome_divisao"], environment["acl_path"]) mkdir_divison_dc( environment["nome_divisao"], user, environment["acl_path"]) chdir(PATH_TYPES.ACL, network, path) Git.synchronization() return os.path.exists(acl) except (GITCommandError, Exception), e: logger.error( "Erro quando o usuário %s tentou sincronizar no Git" % (user.get_username())) logger.error(e) raise GITCommandError(e) def getAclGit(acl_file_name, environment, network, user): try: acl = check_name_file(acl_file_name) path = path_acl(environment["nome_ambiente_logico"], environment["nome_divisao"], environment["acl_path"]) mkdir_divison_dc( environment["nome_divisao"], user, environment["acl_path"]) chdir(PATH_TYPES.ACL, network, path) Git.synchronization() content = File.read(acl) return content except (GITCommandError, FileError, Exception), e: logger.error( "Erro quando o usuário %s tentou sincronizar no Git" % (user.get_username())) logger.error(e) raise GITCommandError(e) def alterAclGit(acl_name, acl_content, environment, comment, network, user): try: acl = check_name_file(acl_name) path = path_acl(environment["nome_ambiente_logico"], environment["nome_divisao"], environment["acl_path"]) chdir(PATH_TYPES.ACL, network, path) Git.synchronization() File.write(acl, acl_content) Git.commit(acl, "%s comentou: %s" % (user.get_username(), comment)) Git.push() logger.info("%s alterou no GIT o arquivo: %s Comentário do Usuário: %s" % ( user.get_username(), (path + acl), comment)) except (GITCommandError, FileError, Exception), e: logger.error("Erro quando o usuário %s tentou atualizar o arquivo: %s no Git" % ( user.get_username(), (path + acl))) logger.error(e) raise GITCommandError(e)
Apache License 2.0
fused-wind/fusedwind
src/fusedwind/fused_helper.py
my_str
python
def my_str(var, parent='', value=False): if isinstance(var, VariableTree): out = '{' + '<' + parent + '> ' + parent + '|{' + '|'.join([my_str(v, k, value) for k, v in var.items()]) + '}}' else: if value: out = '<' + parent + '> ' + parent + '=' + str(var) else: out = '<' + parent + '> ' + parent return out
String representation for generating graphs
https://github.com/fused-wind/fusedwind/blob/5025b84f8bfb334b33bf172bf1a39e3abcadab15/src/fusedwind/fused_helper.py#L173-L184
from openmdao.main.api import Component, Assembly, VariableTree from openmdao.main.driver import Driver from openmdao.lib.drivers.api import CaseIteratorDriver from openmdao.lib.datatypes.api import VarTree, Float, Instance, Slot, Array, List, Int, Str, Dict base_keys = Assembly.__base_traits__.keys() + Driver.__base_traits__.keys() + CaseIteratorDriver.__base_traits__.keys() from collections import defaultdict import json from numpy import ndarray, array, sort import pandas from openmdao.main.interfaces import Interface, implements from zope.interface import implementer from openmdao.main.api import Component, Assembly from pprint import pprint add2key = lambda key, dico: dict( [(key + '.' + k, v) for k, v in dico.iteritems()]) def flatten(t): out = [] for k, v in t.iteritems(): if isinstance(v, dict): out += add2key(k, flatten(v)).items() else: out.append((k, v)) return dict(out) class dictree(dict): __getattr__ = dict.__getitem__ __setattr__ = dict.__setitem__ def VariableTree(self, cls): obj = cls() for k, v in self.iteritems(): if k in obj.list_vars(): if isinstance(getattr(self, k), dictree): v = getattr(self, k).VariableTree( getattr(obj, k).__class__) setattr(obj, k, v) return obj def serialize(self): new_dic = self.copy() for k, v in self.iteritems(): if isinstance(v, dictree): new_dic[k] = v.serialize() elif isinstance(v, ndarray): new_dic[k] = v.tolist() return new_dic def _repr_json_(self): print json.dumps(self.serialize(), sort_keys=True, indent=2, separators=(',', ': ')) @property def json(self): self._repr_json_() def flatten(self): return flatten(self) def df(self): return pandas.DataFrame(self.flatten()) class defaultdictree(defaultdict, dictree): def __missing__(self, *args, **kwargs): if not args[0][0] == '_': return defaultdict.__missing__(self, *args, **kwargs) tree = lambda *args, **kwargs: defaultdictree(tree, *args, **kwargs) def list_ios(self, iotype=None): if not iotype: return [i for i in self.list_vars() if i not in base_keys] if iotype == 'in': if hasattr(self, 'list_inputs'): return [i for i in self.list_inputs() if i not in base_keys] else: return [] if iotype == 'out': if hasattr(self, 'list_outputs'): return [i for i in self.list_outputs() if i not in base_keys] else: return [] black_list = ['gradient_options'] def my_tree(self, iotype=None): if hasattr(self, 'list_containers'): containers = [f for f in self.list_containers() if f not in black_list and any( [isinstance(getattr(self, f), o) for o in (Component, Driver, Assembly, VariableTree)])] return dictree([i for i in self.items() if i[0] in list_ios(self, iotype)] + [(c, my_tree(getattr(self, c), iotype)) for c in containers]) elif isinstance(self, List): return [my_tree(s, iotype) for s in self] else: return self def _repr_tree_(self): my_tree(self).json def disp(comp): return pprint(dict(comp.items())) def my_call(self, **kwargs): for k, v in kwargs.iteritems(): setattr(self, k, v) self.run() return self
Apache License 2.0
parallels/artifactory
artifactory.py
_ArtifactoryAccessor.rest_put_stream
python
def rest_put_stream(self, url, stream, headers=None, auth=None, verify=True, cert=None): res = requests.put(url, headers=headers, auth=auth, data=stream, verify=verify, cert=cert) return res.text, res.status_code
Perform a chunked PUT request to url with optional authentication This is specifically to upload files.
https://github.com/parallels/artifactory/blob/09ddcc4ae15095eec2347d39774c3f8aca6c4654/artifactory.py#L443-L449
import os import sys import errno import pathlib import collections import requests import re import json import dateutil.parser import hashlib try: import requests.packages.urllib3 as urllib3 except ImportError: import urllib3 try: import configparser except ImportError: import ConfigParser as configparser default_config_path = '~/.artifactory_python.cfg' global_config = None def read_config(config_path=default_config_path): config_path = os.path.expanduser(config_path) if not os.path.isfile(config_path): raise OSError(errno.ENOENT, "Artifactory configuration file not found: '%s'" % config_path) p = configparser.ConfigParser() p.read(config_path) result = {} for section in p.sections(): username = p.get(section, 'username') if p.has_option(section, 'username') else None password = p.get(section, 'password') if p.has_option(section, 'password') else None verify = p.getboolean(section, 'verify') if p.has_option(section, 'verify') else True cert = p.get(section, 'cert') if p.has_option(section, 'cert') else None result[section] = {'username': username, 'password': password, 'verify': verify, 'cert': cert} if result[section]['cert']: result[section]['cert'] = os.path.expanduser(result[section]['cert']) return result def read_global_config(config_path=default_config_path): global global_config if global_config is None: try: global_config = read_config(config_path) except OSError: pass def without_http_prefix(url): if url.startswith('http://'): return url[7:] elif url.startswith('https://'): return url[8:] return url def get_base_url(config, url): if not config: return None for item in config: if url.startswith(item): return item for item in config: if without_http_prefix(url).startswith(without_http_prefix(item)): return item def get_config_entry(config, url): if not config: return None if url in config: return config[url] for item in config: if without_http_prefix(item) == without_http_prefix(url): return config[item] def get_global_config_entry(url): read_global_config() return get_config_entry(global_config, url) def get_global_base_url(url): read_global_config() return get_base_url(global_config, url) def md5sum(filename): md5 = hashlib.md5() with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(128 * md5.block_size), b''): md5.update(chunk) return md5.hexdigest() def sha1sum(filename): sha1 = hashlib.sha1() with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(128 * sha1.block_size), b''): sha1.update(chunk) return sha1.hexdigest() class HTTPResponseWrapper(object): def __init__(self, obj): self.obj = obj def __getattr__(self, attr): if attr in self.__dict__: return self.__dict__[attr] if attr == 'seek': raise AttributeError return getattr(self.obj, attr) def __len__(self): return int(self.getheader('content-length')) def encode_matrix_parameters(parameters): result = [] for param in iter(sorted(parameters)): if isinstance(parameters[param], (list, tuple)): value = (';%s=' % (param)).join(parameters[param]) else: value = parameters[param] result.append("%s=%s" % (param, value)) return ';'.join(result) def escape_chars(s): return "".join(['\\' + ch if ch in '=|,' else ch for ch in s]) def encode_properties(parameters): result = [] for param in iter(sorted(parameters)): if isinstance(parameters[param], (list, tuple)): value = ','.join([escape_chars(x) for x in parameters[param]]) else: value = escape_chars(parameters[param]) result.append("%s=%s" % (param, value)) return '|'.join(result) class _ArtifactoryFlavour(pathlib._Flavour): sep = '/' altsep = '/' has_drv = True pathmod = pathlib.posixpath is_supported = (True) def parse_parts(self, parts): drv, root, parsed = super(_ArtifactoryFlavour, self).parse_parts(parts) return drv, root, parsed def splitroot(self, part, sep=sep): drv = '' root = '' base = get_global_base_url(part) if base and without_http_prefix(part).startswith(without_http_prefix(base)): mark = without_http_prefix(base).rstrip(sep)+sep parts = part.split(mark) else: mark = sep+'artifactory'+sep parts = part.split(mark) if len(parts) >= 2: drv = parts[0] + mark.rstrip(sep) rest = sep + mark.join(parts[1:]) elif part.endswith(mark.rstrip(sep)): drv = part rest = '' else: rest = part if not rest: return drv, '', '' if rest == sep: return drv, '', '' if rest.startswith(sep): root, _, part = rest[1:].partition(sep) root = sep + root + sep return drv, root, part def casefold(self, string): return string def casefold_parts(self, parts): return parts def resolve(self, path): return path def is_reserved(self, _): return False def make_uri(self, path): return path _artifactory_flavour = _ArtifactoryFlavour() ArtifactoryFileStat = collections.namedtuple( 'ArtifactoryFileStat', ['ctime', 'mtime', 'created_by', 'modified_by', 'mime_type', 'size', 'sha1', 'md5', 'is_dir', 'children']) class _ArtifactoryAccessor(pathlib._Accessor): def rest_get(self, url, params=None, headers=None, auth=None, verify=True, cert=None): res = requests.get(url, params=params, headers=headers, auth=auth, verify=verify, cert=cert) return res.text, res.status_code def rest_put(self, url, params=None, headers=None, auth=None, verify=True, cert=None): res = requests.put(url, params=params, headers=headers, auth=auth, verify=verify, cert=cert) return res.text, res.status_code def rest_post(self, url, params=None, headers=None, auth=None, verify=True, cert=None): res = requests.post(url, params=params, headers=headers, auth=auth, verify=verify, cert=cert) return res.text, res.status_code def rest_del(self, url, params=None, auth=None, verify=True, cert=None): res = requests.delete(url, params=params, auth=auth, verify=verify, cert=cert) return res.text, res.status_code
MIT License
cyoon1729/multi-agent-reinforcement-learning
MADDPG/agent.py
DDPGAgent.update
python
def update(self, indiv_reward_batch, indiv_obs_batch, global_state_batch, global_actions_batch, global_next_state_batch, next_global_actions): indiv_reward_batch = torch.FloatTensor(indiv_reward_batch).to(self.device) indiv_reward_batch = indiv_reward_batch.view(indiv_reward_batch.size(0), 1).to(self.device) indiv_obs_batch = torch.FloatTensor(indiv_obs_batch).to(self.device) global_state_batch = torch.FloatTensor(global_state_batch).to(self.device) global_actions_batch = torch.stack(global_actions_batch).to(self.device) global_next_state_batch = torch.FloatTensor(global_next_state_batch).to(self.device) next_global_actions = next_global_actions self.critic_optimizer.zero_grad() curr_Q = self.critic.forward(global_state_batch, global_actions_batch) next_Q = self.critic_target.forward(global_next_state_batch, next_global_actions) estimated_Q = indiv_reward_batch + self.gamma * next_Q critic_loss = self.MSELoss(curr_Q, estimated_Q.detach()) critic_loss.backward() torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5) self.critic_optimizer.step() self.actor_optimizer.zero_grad() policy_loss = -self.critic.forward(global_state_batch, global_actions_batch).mean() curr_pol_out = self.actor.forward(indiv_obs_batch) policy_loss += -(curr_pol_out**2).mean() * 1e-3 policy_loss.backward() torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5) self.actor_optimizer.step()
indiv_reward_batch : only rewards of agent i indiv_obs_batch : only observations of agent i global_state_batch : observations of all agents are concatenated global actions_batch : actions of all agents are concatenated global_next_state_batch : observations of all agents are concatenated next_global_actions : actions of all agents are concatenated
https://github.com/cyoon1729/multi-agent-reinforcement-learning/blob/1c59291d2a65906e15f1dc2a6113d1fd18592506/MADDPG/agent.py#L66-L103
import torch import torch.nn as nn import torch.optim as optim import torch.autograd as autograd import numpy as np from model import CentralizedCritic, Actor class DDPGAgent: def __init__(self, env, agent_id, actor_lr=1e-4, critic_lr=1e-3, gamma=0.99, tau=1e-2): self.env = env self.agent_id = agent_id self.actor_lr = actor_lr self.critic_lr = critic_lr self.gamma = gamma self.tau = tau self.device = "cpu" self.use_cuda = torch.cuda.is_available() if self.use_cuda: self.device = "cuda" self.obs_dim = self.env.observation_space[agent_id].shape[0] self.action_dim = self.env.action_space[agent_id].n self.num_agents = self.env.n self.critic_input_dim = int(np.sum([env.observation_space[agent].shape[0] for agent in range(env.n)])) self.actor_input_dim = self.obs_dim self.critic = CentralizedCritic(self.critic_input_dim, self.action_dim * self.num_agents).to(self.device) self.critic_target = CentralizedCritic(self.critic_input_dim, self.action_dim * self.num_agents).to(self.device) self.actor = Actor(self.actor_input_dim, self.action_dim).to(self.device) self.actor_target = Actor(self.actor_input_dim, self.action_dim).to(self.device) for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()): target_param.data.copy_(param.data) for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()): target_param.data.copy_(param.data) self.MSELoss = nn.MSELoss() self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=critic_lr) self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr) def get_action(self, state): state = autograd.Variable(torch.from_numpy(state).float().squeeze(0)).to(self.device) action = self.actor.forward(state) action = self.onehot_from_logits(action) return action def onehot_from_logits(self, logits, eps=0.0): argmax_acs = (logits == logits.max(0, keepdim=True)[0]).float() if eps == 0.0: return argmax_acs rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice( range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False) return torch.stack([argmax_acs[i] if r > eps else rand_acs[i] for i, r in enumerate(torch.rand(logits.shape[0]))])
MIT License
rbuffat/pyidf
pyidf/thermal_zones_and_surfaces.py
ZoneGroup.zone_list_name
python
def zone_list_name(self, value=None): self["Zone List Name"] = value
Corresponds to IDD field `Zone List Name`
https://github.com/rbuffat/pyidf/blob/c2f744211572b5e14e29522aac1421ba88addb0e/pyidf/thermal_zones_and_surfaces.py#L870-L872
from collections import OrderedDict import logging from pyidf.helper import DataObject logger = logging.getLogger("pyidf") logger.addHandler(logging.NullHandler()) class GlobalGeometryRules(DataObject): _schema = {'extensible-fields': OrderedDict(), 'fields': OrderedDict([(u'starting vertex position', {'name': u'Starting Vertex Position', 'pyname': u'starting_vertex_position', 'required-field': True, 'autosizable': False, 'accepted-values': [u'UpperLeftCorner', u'LowerLeftCorner', u'UpperRightCorner', u'LowerRightCorner'], 'autocalculatable': False, 'type': 'alpha'}), (u'vertex entry direction', {'name': u'Vertex Entry Direction', 'pyname': u'vertex_entry_direction', 'required-field': True, 'autosizable': False, 'accepted-values': [u'Counterclockwise', u'Clockwise'], 'autocalculatable': False, 'type': 'alpha'}), (u'coordinate system', {'name': u'Coordinate System', 'pyname': u'coordinate_system', 'required-field': True, 'autosizable': False, 'accepted-values': [u'Relative', u'World', u'Absolute'], 'autocalculatable': False, 'type': 'alpha'}), (u'daylighting reference point coordinate system', {'name': u'Daylighting Reference Point Coordinate System', 'pyname': u'daylighting_reference_point_coordinate_system', 'default': u'Relative', 'required-field': False, 'autosizable': False, 'accepted-values': [u'Relative', u'World', u'Absolute'], 'autocalculatable': False, 'type': 'alpha'}), (u'rectangular surface coordinate system', {'name': u'Rectangular Surface Coordinate System', 'pyname': u'rectangular_surface_coordinate_system', 'default': u'Relative', 'required-field': False, 'autosizable': False, 'accepted-values': [u'Relative', u'World', u'Absolute'], 'autocalculatable': False, 'type': 'alpha'})]), 'format': None, 'group': u'Thermal Zones and Surfaces', 'min-fields': 0, 'name': u'GlobalGeometryRules', 'pyname': u'GlobalGeometryRules', 'required-object': True, 'unique-object': True} @property def starting_vertex_position(self): return self["Starting Vertex Position"] @starting_vertex_position.setter def starting_vertex_position(self, value=None): self["Starting Vertex Position"] = value @property def vertex_entry_direction(self): return self["Vertex Entry Direction"] @vertex_entry_direction.setter def vertex_entry_direction(self, value=None): self["Vertex Entry Direction"] = value @property def coordinate_system(self): return self["Coordinate System"] @coordinate_system.setter def coordinate_system(self, value=None): self["Coordinate System"] = value @property def daylighting_reference_point_coordinate_system(self): return self["Daylighting Reference Point Coordinate System"] @daylighting_reference_point_coordinate_system.setter def daylighting_reference_point_coordinate_system(self, value="Relative"): self["Daylighting Reference Point Coordinate System"] = value @property def rectangular_surface_coordinate_system(self): return self["Rectangular Surface Coordinate System"] @rectangular_surface_coordinate_system.setter def rectangular_surface_coordinate_system(self, value="Relative"): self["Rectangular Surface Coordinate System"] = value class GeometryTransform(DataObject): _schema = {'extensible-fields': OrderedDict(), 'fields': OrderedDict([(u'plane of transform', {'name': u'Plane of Transform', 'pyname': u'plane_of_transform', 'default': u'XY', 'required-field': True, 'autosizable': False, 'accepted-values': [u'XY'], 'autocalculatable': False, 'type': 'alpha'}), (u'current aspect ratio', {'name': u'Current Aspect Ratio', 'pyname': u'current_aspect_ratio', 'minimum>': 0.0, 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': 'real'}), (u'new aspect ratio', {'name': u'New Aspect Ratio', 'pyname': u'new_aspect_ratio', 'minimum>': 0.0, 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': 'real'})]), 'format': None, 'group': u'Thermal Zones and Surfaces', 'min-fields': 0, 'name': u'GeometryTransform', 'pyname': u'GeometryTransform', 'required-object': False, 'unique-object': True} @property def plane_of_transform(self): return self["Plane of Transform"] @plane_of_transform.setter def plane_of_transform(self, value="XY"): self["Plane of Transform"] = value @property def current_aspect_ratio(self): return self["Current Aspect Ratio"] @current_aspect_ratio.setter def current_aspect_ratio(self, value=None): self["Current Aspect Ratio"] = value @property def new_aspect_ratio(self): return self["New Aspect Ratio"] @new_aspect_ratio.setter def new_aspect_ratio(self, value=None): self["New Aspect Ratio"] = value class Zone(DataObject): _schema = {'extensible-fields': OrderedDict(), 'fields': OrderedDict([(u'name', {'name': u'Name', 'pyname': u'name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'alpha'}), (u'direction of relative north', {'name': u'Direction of Relative North', 'pyname': u'direction_of_relative_north', 'default': 0.0, 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'deg'}), (u'x origin', {'name': u'X Origin', 'pyname': u'x_origin', 'default': 0.0, 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'm'}), (u'y origin', {'name': u'Y Origin', 'pyname': u'y_origin', 'default': 0.0, 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'm'}), (u'z origin', {'name': u'Z Origin', 'pyname': u'z_origin', 'default': 0.0, 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'm'}), (u'type', {'name': u'Type', 'pyname': u'type', 'default': 1, 'maximum': 1, 'required-field': False, 'autosizable': False, 'minimum': 1, 'autocalculatable': False, 'type': u'integer'}), (u'multiplier', {'name': u'Multiplier', 'pyname': u'multiplier', 'default': 1, 'required-field': False, 'autosizable': False, 'minimum': 1, 'autocalculatable': False, 'type': u'integer'}), (u'ceiling height', {'name': u'Ceiling Height', 'pyname': u'ceiling_height', 'default': 'autocalculate', 'required-field': False, 'autosizable': False, 'autocalculatable': True, 'type': u'real', 'unit': u'm'}), (u'volume', {'name': u'Volume', 'pyname': u'volume', 'default': 'autocalculate', 'required-field': False, 'autosizable': False, 'autocalculatable': True, 'type': u'real', 'unit': u'm3'}), (u'floor area', {'name': u'Floor Area', 'pyname': u'floor_area', 'default': 'autocalculate', 'required-field': False, 'autosizable': False, 'autocalculatable': True, 'type': u'real', 'unit': u'm2'}), (u'zone inside convection algorithm', {'name': u'Zone Inside Convection Algorithm', 'pyname': u'zone_inside_convection_algorithm', 'required-field': False, 'autosizable': False, 'accepted-values': [u'Simple', u'TARP', u'CeilingDiffuser', u'AdaptiveConvectionAlgorithm', u'TrombeWall'], 'autocalculatable': False, 'type': 'alpha'}), (u'zone outside convection algorithm', {'name': u'Zone Outside Convection Algorithm', 'pyname': u'zone_outside_convection_algorithm', 'required-field': False, 'autosizable': False, 'accepted-values': [u'SimpleCombined', u'TARP', u'DOE-2', u'MoWiTT', u'AdaptiveConvectionAlgorithm'], 'autocalculatable': False, 'type': 'alpha'}), (u'part of total floor area', {'name': u'Part of Total Floor Area', 'pyname': u'part_of_total_floor_area', 'default': u'Yes', 'required-field': False, 'autosizable': False, 'accepted-values': [u'Yes', u'No'], 'autocalculatable': False, 'type': 'alpha'})]), 'format': u'vertices', 'group': u'Thermal Zones and Surfaces', 'min-fields': 0, 'name': u'Zone', 'pyname': u'Zone', 'required-object': False, 'unique-object': False} @property def name(self): return self["Name"] @name.setter def name(self, value=None): self["Name"] = value @property def direction_of_relative_north(self): return self["Direction of Relative North"] @direction_of_relative_north.setter def direction_of_relative_north(self, value=None): self["Direction of Relative North"] = value @property def x_origin(self): return self["X Origin"] @x_origin.setter def x_origin(self, value=None): self["X Origin"] = value @property def y_origin(self): return self["Y Origin"] @y_origin.setter def y_origin(self, value=None): self["Y Origin"] = value @property def z_origin(self): return self["Z Origin"] @z_origin.setter def z_origin(self, value=None): self["Z Origin"] = value @property def type(self): return self["Type"] @type.setter def type(self, value=1): self["Type"] = value @property def multiplier(self): return self["Multiplier"] @multiplier.setter def multiplier(self, value=1): self["Multiplier"] = value @property def ceiling_height(self): return self["Ceiling Height"] @ceiling_height.setter def ceiling_height(self, value="autocalculate"): self["Ceiling Height"] = value @property def volume(self): return self["Volume"] @volume.setter def volume(self, value="autocalculate"): self["Volume"] = value @property def floor_area(self): return self["Floor Area"] @floor_area.setter def floor_area(self, value="autocalculate"): self["Floor Area"] = value @property def zone_inside_convection_algorithm(self): return self["Zone Inside Convection Algorithm"] @zone_inside_convection_algorithm.setter def zone_inside_convection_algorithm(self, value=None): self["Zone Inside Convection Algorithm"] = value @property def zone_outside_convection_algorithm(self): return self["Zone Outside Convection Algorithm"] @zone_outside_convection_algorithm.setter def zone_outside_convection_algorithm(self, value=None): self["Zone Outside Convection Algorithm"] = value @property def part_of_total_floor_area(self): return self["Part of Total Floor Area"] @part_of_total_floor_area.setter def part_of_total_floor_area(self, value="Yes"): self["Part of Total Floor Area"] = value class ZoneGroup(DataObject): _schema = {'extensible-fields': OrderedDict(), 'fields': OrderedDict([(u'name', {'name': u'Name', 'pyname': u'name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'alpha'}), (u'zone list name', {'name': u'Zone List Name', 'pyname': u'zone_list_name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'object-list'}), (u'zone list multiplier', {'name': u'Zone List Multiplier', 'pyname': u'zone_list_multiplier', 'default': 1, 'required-field': False, 'autosizable': False, 'minimum': 1, 'autocalculatable': False, 'type': u'integer'})]), 'format': None, 'group': u'Thermal Zones and Surfaces', 'min-fields': 2, 'name': u'ZoneGroup', 'pyname': u'ZoneGroup', 'required-object': False, 'unique-object': False} @property def name(self): return self["Name"] @name.setter def name(self, value=None): self["Name"] = value @property def zone_list_name(self): return self["Zone List Name"] @zone_list_name.setter
Apache License 2.0
agoragames/leaderboard-python
leaderboard/leaderboard.py
Leaderboard.rank_member_in
python
def rank_member_in( self, leaderboard_name, member, score, member_data=None): pipeline = self.redis_connection.pipeline() if isinstance(self.redis_connection, Redis): pipeline.zadd(leaderboard_name, member, score) else: pipeline.zadd(leaderboard_name, score, member) if member_data: pipeline.hset( self._member_data_key(leaderboard_name), member, member_data) pipeline.execute()
Rank a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @param score [float] Member score. @param member_data [String] Optional member data.
https://github.com/agoragames/leaderboard-python/blob/4bc028164c259c3a31c7e6ccb2a8ecb83cfb1da2/leaderboard/leaderboard.py#L127-L147
from __future__ import division from redis import StrictRedis, Redis, ConnectionPool import math import sys if sys.version_info.major == 3: from itertools import zip_longest else: from itertools import izip_longest as zip_longest def grouper(n, iterable, fillvalue=None): args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) class Leaderboard(object): VERSION = '3.7.3' DEFAULT_PAGE_SIZE = 25 DEFAULT_REDIS_HOST = 'localhost' DEFAULT_REDIS_PORT = 6379 DEFAULT_REDIS_DB = 0 DEFAULT_MEMBER_DATA_NAMESPACE = 'member_data' DEFAULT_GLOBAL_MEMBER_DATA = False DEFAULT_POOLS = {} ASC = 'asc' DESC = 'desc' MEMBER_KEY = 'member' MEMBER_DATA_KEY = 'member_data' SCORE_KEY = 'score' RANK_KEY = 'rank' @classmethod def pool(self, host, port, db, pools={}, **options): key = (host, port, db) rval = pools.get(key) if not isinstance(rval, ConnectionPool): rval = ConnectionPool(host=host, port=port, db=db, **options) pools[key] = rval return rval def __init__(self, leaderboard_name, **options): self.leaderboard_name = leaderboard_name self.options = options self.page_size = self.options.pop('page_size', self.DEFAULT_PAGE_SIZE) if self.page_size < 1: self.page_size = self.DEFAULT_PAGE_SIZE self.member_data_namespace = self.options.pop( 'member_data_namespace', self.DEFAULT_MEMBER_DATA_NAMESPACE) self.global_member_data = self.options.pop( 'global_member_data', self.DEFAULT_GLOBAL_MEMBER_DATA) self.order = self.options.pop('order', self.DESC).lower() if not self.order in [self.ASC, self.DESC]: raise ValueError( "%s is not one of [%s]" % (self.order, ",".join([self.ASC, self.DESC]))) redis_connection = self.options.pop('redis_connection', None) if redis_connection: self.redis_connection = redis_connection else: connection = self.options.pop('connection', None) if isinstance(connection, (StrictRedis, Redis)): self.options['connection_pool'] = connection.connection_pool if 'connection_pool' not in self.options: self.options['connection_pool'] = self.pool( self.options.pop('host', self.DEFAULT_REDIS_HOST), self.options.pop('port', self.DEFAULT_REDIS_PORT), self.options.pop('db', self.DEFAULT_REDIS_DB), self.options.pop('pools', self.DEFAULT_POOLS), **self.options ) self.redis_connection = Redis(**self.options) def delete_leaderboard(self): self.delete_leaderboard_named(self.leaderboard_name) def delete_leaderboard_named(self, leaderboard_name): pipeline = self.redis_connection.pipeline() pipeline.delete(leaderboard_name) pipeline.delete(self._member_data_key(leaderboard_name)) pipeline.execute() def rank_member(self, member, score, member_data=None): self.rank_member_in(self.leaderboard_name, member, score, member_data)
MIT License
microsoft/electionguard-python
tests/bench/bench_chaum_pedersen.py
identity
python
def identity(x: int) -> int: return x
Placeholder function used just to warm up the parallel mapper prior to benchmarking.
https://github.com/microsoft/electionguard-python/blob/eb19846cd17ae73064586da8f0be11d97c565b43/tests/bench/bench_chaum_pedersen.py#L47-L49
from dataclasses import dataclass from timeit import default_timer as timer from typing import Dict, List, Tuple from statistics import mean, stdev from electionguard.chaum_pedersen import make_disjunctive_chaum_pedersen_zero from electionguard.elgamal import ( elgamal_keypair_from_secret, ElGamalKeyPair, elgamal_encrypt, ) from electionguard.group import ElementModQ, ONE_MOD_Q from electionguard.nonces import Nonces from electionguard.scheduler import Scheduler from electionguard.utils import get_optional @dataclass class BenchInput: keypair: ElGamalKeyPair r: ElementModQ s: ElementModQ def chaum_pedersen_bench(bi: BenchInput) -> Tuple[float, float]: ciphertext = get_optional(elgamal_encrypt(0, bi.r, bi.keypair.public_key)) start1 = timer() proof = make_disjunctive_chaum_pedersen_zero( ciphertext, bi.r, bi.keypair.public_key, ONE_MOD_Q, bi.s ) end1 = timer() valid = proof.is_valid(ciphertext, bi.keypair.public_key, ONE_MOD_Q) end2 = timer() if not valid: raise Exception("Wasn't expecting an invalid proof during a benchmark!") return end1 - start1, end2 - end1
MIT License
cloudant/python-cloudant
src/cloudant/design_document.py
DesignDocument.list_indexes
python
def list_indexes(self): return list(self.indexes.keys())
Retrieves a list of available indexes in the locally cached DesignDocument. :returns: List of index names
https://github.com/cloudant/python-cloudant/blob/3bb26d75fa255802a5f308bbf9cff1ba3b34439b/src/cloudant/design_document.py#L635-L642
from ._2to3 import iteritems_, url_quote_plus, STRTYPE from ._common_util import QUERY_LANGUAGE, codify, response_to_json_dict, assert_document_type_id, DESIGN_PREFIX from .document import Document from .view import View, QueryIndexView from .error import CloudantArgumentError, CloudantDesignDocumentException class DesignDocument(Document): def __init__(self, database, document_id=None, partitioned=False): if document_id: assert_document_type_id(document_id) if document_id and not document_id.startswith(DESIGN_PREFIX): document_id = '{0}{1}'.format(DESIGN_PREFIX, document_id) super(DesignDocument, self).__init__(database, document_id) if partitioned: self.setdefault('options', {'partitioned': True}) else: self.setdefault('options', {'partitioned': False}) self._nested_object_names = frozenset(['views', 'indexes', 'lists', 'shows']) for prop in self._nested_object_names: self.setdefault(prop, dict()) @property def validate_doc_update(self): return self.get('validate_doc_update') @property def filters(self): return self.get('filters') @property def updates(self): return self.get('updates') @property def st_indexes(self): return self.get('st_indexes') @property def lists(self): return self.get('lists') @property def shows(self): return self.get('shows') @property def rewrites(self): return self.get('rewrites') @property def views(self): return self.get('views') @property def indexes(self): return self.get('indexes') def document_partition_url(self, partition_key): return '/'.join(( self._database.database_partition_url(partition_key), '_design', url_quote_plus(self['_id'][8:], safe='') )) def add_view(self, view_name, map_func, reduce_func=None, **kwargs): if self.get_view(view_name) is not None: raise CloudantArgumentError(107, view_name) if self.get('language', None) == QUERY_LANGUAGE: raise CloudantDesignDocumentException(101) view = View(self, view_name, map_func, reduce_func, **kwargs) self.views.__setitem__(view_name, view) def add_search_index(self, index_name, search_func, analyzer=None): if self.get_index(index_name) is not None: raise CloudantArgumentError(108, index_name) if analyzer is not None: search = {'index': codify(search_func), 'analyzer': analyzer} else: search = {'index': codify(search_func)} self.indexes.__setitem__(index_name, search) def add_list_function(self, list_name, list_func): if self.get_list_function(list_name) is not None: raise CloudantArgumentError(109, list_name) self.lists.__setitem__(list_name, codify(list_func)) def add_show_function(self, show_name, show_func): if self.get_show_function(show_name) is not None: raise CloudantArgumentError(110, show_name) self.shows.__setitem__(show_name, show_func) def update_view(self, view_name, map_func, reduce_func=None, **kwargs): view = self.get_view(view_name) if view is None: raise CloudantArgumentError(111, view_name) if isinstance(view, QueryIndexView): raise CloudantDesignDocumentException(102) view = View(self, view_name, map_func, reduce_func, **kwargs) self.views.__setitem__(view_name, view) def update_search_index(self, index_name, search_func, analyzer=None): search = self.get_index(index_name) if search is None: raise CloudantArgumentError(112, index_name) if analyzer is not None: search = {'index': codify(search_func), 'analyzer': analyzer} else: search = {'index': codify(search_func)} self.indexes.__setitem__(index_name, search) def update_list_function(self, list_name, list_func): if self.get_list_function(list_name) is None: raise CloudantArgumentError(113, list_name) self.lists.__setitem__(list_name, codify(list_func)) def update_show_function(self, show_name, show_func): if self.get_show_function(show_name) is None: raise CloudantArgumentError(114, show_name) self.shows.__setitem__(show_name, show_func) def delete_view(self, view_name): view = self.get_view(view_name) if view is None: return if isinstance(view, QueryIndexView): raise CloudantDesignDocumentException(103) self.views.__delitem__(view_name) def delete_index(self, index_name): index = self.get_index(index_name) if index is None: return self.indexes.__delitem__(index_name) def delete_list_function(self, list_name): self.lists.__delitem__(list_name) def delete_show_function(self, show_name): if self.get_show_function(show_name) is None: return self.shows.__delitem__(show_name) def fetch(self): super(DesignDocument, self).fetch() if self.views: for view_name, view_def in iteritems_(self.get('views', dict())): if self.get('language', None) != QUERY_LANGUAGE: self['views'][view_name] = View( self, view_name, view_def.pop('map', None), view_def.pop('reduce', None), **view_def ) else: self['views'][view_name] = QueryIndexView( self, view_name, view_def.pop('map', None), view_def.pop('reduce', None), **view_def ) for prop in self._nested_object_names: getattr(self, prop, self.setdefault(prop, dict())) def save(self): if self.views: if self.get('language', None) != QUERY_LANGUAGE: for view_name, view in self.iterviews(): if isinstance(view, QueryIndexView): raise CloudantDesignDocumentException(104, view_name) else: for view_name, view in self.iterviews(): if not isinstance(view, QueryIndexView): raise CloudantDesignDocumentException(105, view_name) if self.indexes: if self.get('language', None) != QUERY_LANGUAGE: for index_name, search in self.iterindexes(): if not isinstance(search['index'], STRTYPE): raise CloudantDesignDocumentException(106, index_name) else: for index_name, index in self.iterindexes(): if not isinstance(index['index'], dict): raise CloudantDesignDocumentException(107, index_name) for prop in self._nested_object_names: if not getattr(self, prop): self.__delitem__(prop) super(DesignDocument, self).save() for prop in self._nested_object_names: getattr(self, prop, self.setdefault(prop, dict())) def __setitem__(self, key, value): if ( key == '_id' and value is not None and not value.startswith('_design/') ): value = '_design/{0}'.format(value) super(DesignDocument, self).__setitem__(key, value) def iterviews(self): for view_name, view in iteritems_(self.views): yield view_name, view def iterindexes(self): for index_name, search_func in iteritems_(self.indexes): yield index_name, search_func def iterlists(self): for list_name, list_func in iteritems_(self.lists): yield list_name, list_func def itershows(self): for show_name, show_func in iteritems_(self.shows): yield show_name, show_func def list_views(self): return list(self.views.keys())
Apache License 2.0
azure/autorest.python
test/vanilla/low-level/Expected/AcceptanceTests/HttpLowLevel/httpinfrastructurelowlevel/rest/http_retry/_request_builders_py3.py
build_options502_request
python
def build_options502_request(**kwargs: Any) -> HttpRequest: accept = "application/json" url = kwargs.pop("template_url", "/http/retry/502") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="OPTIONS", url=url, headers=header_parameters, **kwargs)
Return 502 status code, then 200 after retry. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest
https://github.com/azure/autorest.python/blob/90d60a965788e3b4c0809e6686bdc3525acac89c/test/vanilla/low-level/Expected/AcceptanceTests/HttpLowLevel/httpinfrastructurelowlevel/rest/http_retry/_request_builders_py3.py#L140-L160
from typing import Any, Optional from azure.core.rest import HttpRequest from msrest import Serializer _SERIALIZER = Serializer() def build_head408_request(**kwargs: Any) -> HttpRequest: accept = "application/json" url = kwargs.pop("template_url", "/http/retry/408") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="HEAD", url=url, headers=header_parameters, **kwargs) def build_put500_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) accept = "application/json" url = kwargs.pop("template_url", "/http/retry/500") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, json=json, content=content, **kwargs) def build_patch500_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) accept = "application/json" url = kwargs.pop("template_url", "/http/retry/500") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="PATCH", url=url, headers=header_parameters, json=json, content=content, **kwargs) def build_get502_request(**kwargs: Any) -> HttpRequest: accept = "application/json" url = kwargs.pop("template_url", "/http/retry/502") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
MIT License
edoburu/django-any-urlfield
any_urlfield/templatetags/any_urlfield_tags.py
WithDictNode.render
python
def render(self, context): extra_context = self.context_expr.resolve(context) if not isinstance(extra_context, dict): raise TemplateSyntaxError("{% withdict %} expects the argument to be a dictionary.") with context.push(**extra_context): return self.nodelist.render(context)
Render the tag, with extra context layer.
https://github.com/edoburu/django-any-urlfield/blob/b34c2b3aefd0e499a3e401e7048ad19d031c921f/any_urlfield/templatetags/any_urlfield_tags.py#L18-L27
from django.template import Library, Node, TemplateSyntaxError register = Library() class WithDictNode(Node): def __init__(self, nodelist, context_expr): self.nodelist = nodelist self.context_expr = context_expr
Apache License 2.0
pachyderm/python-pachyderm
src/python_pachyderm/experimental/mixin/auth.py
AuthMixin.authenticate_oidc
python
def authenticate_oidc(self, oidc_state: str) -> str: return self._req(Service.AUTH, "Authenticate", oidc_state=oidc_state).pach_token
Authenticates a user to the Pachyderm cluster via OIDC. Parameters ---------- oidc_state : str An OIDC state token. Returns ------- str A token that can be used for making authenticate requests.
https://github.com/pachyderm/python-pachyderm/blob/9dbffba91ac753e7c63c58d71768f53f83789cb9/src/python_pachyderm/experimental/mixin/auth.py#L156-L169
from typing import Dict, List from python_pachyderm.service import Service, auth_proto class AuthMixin: def activate_auth(self, root_token: str = None) -> str: return self._req(Service.AUTH, "Activate", root_token=root_token).pach_token def deactivate_auth(self) -> None: self._req(Service.AUTH, "Deactivate") def get_auth_configuration(self) -> auth_proto.OIDCConfig: return self._req(Service.AUTH, "GetConfiguration").configuration def set_auth_configuration(self, configuration: auth_proto.OIDCConfig) -> None: self._req(Service.AUTH, "SetConfiguration", configuration=configuration) def get_role_binding( self, resource: auth_proto.Resource ) -> Dict[str, auth_proto.Roles]: return self._req( Service.AUTH, "GetRoleBinding", resource=resource ).binding.entries def modify_role_binding( self, resource: auth_proto.Resource, principal: str, roles: List[str] = None ) -> None: self._req( Service.AUTH, "ModifyRoleBinding", resource=resource, principal=principal, roles=roles, ) def get_oidc_login(self) -> auth_proto.GetOIDCLoginResponse: return self._req(Service.AUTH, "GetOIDCLogin")
Apache License 2.0
laiguokun/funnel-transformer
tensorflow/modeling.py
FunnelTFM.get_mlm_loss
python
def get_mlm_loss(self, target, inputs, is_training, seg_id=None, mapping=None, input_mask=None, use_tpu=False, use_bfloat16=False): ret_dict = {} net_config = self.net_config dtype = tf.float32 if not use_bfloat16 else tf.bfloat16 input_embed, word_embed_table, emb_dict = self.input_embedding( inputs, is_training, seg_id=seg_id, use_tpu=use_tpu, dtype=dtype) ops.update_ret_dict(ret_dict, emb_dict, "emb") output, hiddens, enc_dict = self.encoder( input_embed, is_training, seg_id=seg_id, input_mask=input_mask) ops.update_ret_dict(ret_dict, enc_dict, "enc") if net_config.n_block > 1: output, dec_dict = self.decoder( hiddens, input_mask=input_mask, seg_id=seg_id, is_training=is_training) ops.update_ret_dict(ret_dict, dec_dict, "dec") lm_loss, logits = self.lm_loss( output, target, mapping=mapping, lookup_table=word_embed_table, return_logits=True, use_tpu=use_tpu) ret_dict["lm_logits"] = logits ret_dict["hiddens"] = hiddens return lm_loss, ret_dict
Get mlm pretrain output.
https://github.com/laiguokun/funnel-transformer/blob/1085523bc768e499d8c55edf6af0d70cb1cd27d2/tensorflow/modeling.py#L981-L1021
from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os from absl import flags import tensorflow.compat.v1 as tf import ops flags.DEFINE_string("overwrite_keys", default="", help="Comma separated keys to indicate model configs that " "will always be overwritten by the FLAGS values.") flags.DEFINE_string("block_size", default="3_3_3", help="Depth of blocks with potential parameter sharing.") flags.DEFINE_integer("d_model", default=512, help="Dimension of the model.") flags.DEFINE_integer("d_embed", default=512, help="Dimension of the embeddings.") flags.DEFINE_integer("n_head", default=8, help="Number of attention heads.") flags.DEFINE_integer("d_head", default=64, help="Dimension of each attention head.") flags.DEFINE_integer("d_inner", default=2048, help="Dimension of inner hidden size in FFN.") flags.DEFINE_float("dropout", default=0.1, help="Model dropout.") flags.DEFINE_float("dropatt", default=0.1, help="Attention dropout.") flags.DEFINE_float("dropact", default=0.0, help="Activation dropout.") flags.DEFINE_string("ff_activation", default="gelu", help="Activation type used in position-wise feed-forward.") flags.DEFINE_string("rel_attn_type", default="factorized", help="Type of the relative attention.") flags.DEFINE_enum("init", default="truncated_normal", enum_values=["normal", "uniform", "truncated_normal"], help="Initialization method.") flags.DEFINE_float("init_std", default=0.02, help="Initialization std when init is normal.") flags.DEFINE_float("init_range", default=0.1, help="Initialization std when init is uniform.") flags.DEFINE_enum("pooling_type", default="mean", enum_values=["mean", "max"], help="choose from [max, mean].") flags.DEFINE_integer("pooling_size", default=2, help="Kernel size for max and mean pooling.") flags.DEFINE_bool("pool_q_only", default=True, help="Only perform pooling on query") flags.DEFINE_bool("separate_cls", default=True, help="Whether to isolate the [cls]") flags.DEFINE_string("decoder_size", default="2", help="Size configuration of the decoder.") flags.DEFINE_bool("truncate_seq", default=True, help="Truncate the last few tokens according to the max " "stride in the network to make separate [cls] efficient.") FLAGS = flags.FLAGS INF = 1e8 class ModelConfig(object): keys = ["block_size", "vocab_size", "d_embed", "d_model", "n_head", "d_head", "d_inner", "ff_activation", "dropout", "dropatt", "dropact", "init", "init_std", "init_range", "rel_attn_type", "separate_cls", "pooling_type", "pooling_size", "pool_q_only", "decoder_size"] def __init__(self, block_size, vocab_size, d_embed, d_model, n_head, d_head, d_inner, dropout, dropatt, dropact, ff_activation, init="truncated_normal", init_std=0.02, init_range=0.1, rel_attn_type="factorized", separate_cls=True, pooling_type="mean", pooling_size=2, pool_q_only=True, decoder_size="0"): assert vocab_size == FLAGS.vocab_size, "Vocabulary size does not match." self.vocab_size = vocab_size self.d_embed = d_embed self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.dropout = dropout self.dropatt = dropatt self.dropact = dropact self.ff_activation = ff_activation self.init = init self.init_std = init_std self.init_range = init_range self.rel_attn_type = rel_attn_type self.block_size = block_size self.block_depth = [] self.block_param_size = [] self.block_repeat_size = [] for cur_block_size in block_size.split("_"): cur_block_size = ModelConfig.parse_depth_string(cur_block_size) self.block_depth.append(cur_block_size[0] * cur_block_size[1]) self.block_param_size.append(cur_block_size[0]) self.block_repeat_size.append(cur_block_size[1]) self.n_block = len(self.block_depth) assert not (self.n_block == 1 and decoder_size != "0"), "Models with only 1 block does NOT need a decoder." self.decoder_size = decoder_size decoder_size = ModelConfig.parse_depth_string(decoder_size) self.decoder_depth = decoder_size[0] * decoder_size[1] self.decoder_param_size = decoder_size[0] self.decoder_repeat_size = decoder_size[1] self.pooling_type = pooling_type self.pooling_size = pooling_size self.pool_q_only = pool_q_only self.separate_cls = separate_cls @staticmethod def parse_depth_string(depth_str): depth_config = depth_str.split("x") if len(depth_config) == 1: depth_config.append(1) assert len(depth_config) == 2, "Require two-element depth config." return list(map(int, depth_config)) @staticmethod def overwrite_args(args): for key in list(set(ModelConfig.keys) & set(args.keys())): overwrite_keys = set(FLAGS.overwrite_keys.split(",")) if key in overwrite_keys: args[key] = getattr(FLAGS, key) else: setattr(FLAGS, key, args[key]) return args @staticmethod def init_from_text(file_path, sep_symbol=None): tf.logging.info("Initialize ModelConfig from text file %s.", file_path) args = {} with tf.io.gfile.GFile(file_path) as f: for line in f: k, v = line.strip().split(sep_symbol) if k in ModelConfig.keys: args[k] = v else: tf.logging.warning("Unused key %s", k) args = ModelConfig.overwrite_args(args) net_config = ModelConfig(**args) return net_config @staticmethod def init_from_json(file_path): tf.logging.info("Initialize ModelConfig from json file %s.", file_path) with tf.io.gfile.GFile(file_path) as f: json_data = json.load(f) if (not getattr(FLAGS, "use_tpu", False) and json_data["rel_attn_type"] == "factorized"): json_data["rel_attn_type"] = "rel_shift" tf.logging.info("Change rel_attn_type to `rel_shift` for non-TPU env.") json_data = ModelConfig.overwrite_args(json_data) net_config = ModelConfig(**json_data) return net_config @staticmethod def init_from_flags(): tf.logging.info("Initialize ModelConfig from FLAGS.") args = {} for key in ModelConfig.keys: args[key] = getattr(FLAGS, key) return ModelConfig(**args) def to_json(self, json_path): tf.logging.info("Save ModelConfig to json file %s.", json_path) json_data = {} for key in ModelConfig.keys: json_data[key] = getattr(self, key) json_dir = os.path.dirname(json_path) if not tf.io.gfile.exists(json_dir): tf.io.gfile.makedirs(json_dir) with tf.io.gfile.GFile(json_path, "w") as f: json.dump(json_data, f, indent=4, sort_keys=True) class FunnelTFM(object): def __init__(self, net_config): self.net_config = net_config self.attn_structures = None def get_initializer(self): net_config = self.net_config if net_config.init == "uniform": initializer = tf.initializers.random_uniform( minval=-net_config.init_range, maxval=net_config.init_range, seed=None) elif net_config.init == "normal": initializer = tf.initializers.random_normal( stddev=net_config.init_std, seed=None) elif net_config.init == "truncated_normal": initializer = tf.initializers.truncated_normal( stddev=net_config.init_std, seed=None) else: raise ValueError("Initializer {} not supported".format(net_config.init)) return initializer def get_embedding_table(self, scope="input", dtype=tf.float32): net_config = self.net_config with tf.variable_scope(scope, reuse=True): with tf.variable_scope("word_embedding", reuse=True): lookup_table = tf.get_variable( "lookup_table", [net_config.vocab_size, net_config.d_model], dtype=dtype) return lookup_table def input_embedding(self, inputs, is_training, seg_id=None, pos_id=None, word_embed_table=None, use_tpu=False, scope="input", reuse=tf.AUTO_REUSE, dtype=tf.float32): net_config = self.net_config initializer = self.get_initializer() ret_dict = {} def embed_func(x, pos_id, seg_id): embed, word_embed_table = ops.embedding_lookup( x=x, n_embed=net_config.vocab_size, d_embed=net_config.d_embed, initializer=initializer, use_tpu=use_tpu, dtype=dtype, scope="word_embedding") if net_config.rel_attn_type == "null": if pos_id is None: pos_id = tf.cast(tf.range(tf.shape(x)[-1]), x.dtype) pos_emb, _ = ops.embedding_lookup( x=pos_id, n_embed=512, d_embed=net_config.d_embed, initializer=initializer, use_tpu=use_tpu, dtype=dtype, scope="position_embedding") embed += pos_emb if seg_id is not None: seg_emb, _ = ops.embedding_lookup( x=seg_id % 2, n_embed=2, d_embed=net_config.d_embed, initializer=initializer, use_tpu=use_tpu, dtype=dtype, scope="segment_embedding") embed += seg_emb return embed, word_embed_table with tf.variable_scope(scope, reuse=reuse): word_emb, word_embed_table = embed_func(x=inputs, pos_id=pos_id, seg_id=seg_id) word_emb = ops.layer_norm_op(word_emb, norm_shape=[net_config.d_embed]) output = ops.dropout_op(word_emb, net_config.dropout, training=is_training) return output, word_embed_table, ret_dict def input_projection(self, input_embed): net_config = self.net_config initializer = self.get_initializer() ret_dict = {} output = input_embed if net_config.d_embed != net_config.d_model: tf.logging.info("Project input embedding: %s -> %s", net_config.d_embed, net_config.d_model) output = ops.dense( output, net_config.d_model, inp_shape=net_config.d_embed, initializer=initializer, scope="input_projection") return output, ret_dict def tfmxl_layer(self, q, k, v, pos_enc, seg_mat, attn_mask, is_training, func_mask=None, attn_bias=None): net_config = self.net_config initializer = self.get_initializer() ret_dict = {} output, attn_dict = ops.rel_multihead_attn( q=q, k=k, v=v, pos_enc=pos_enc, seg_mat=seg_mat, attn_mask=attn_mask, attn_bias=attn_bias, d_model=net_config.d_model, n_head=net_config.n_head, d_head=net_config.d_head, dropout=net_config.dropout, dropatt=net_config.dropatt, is_training=is_training, initializer=initializer, func_mask=func_mask, rel_attn_type=net_config.rel_attn_type) output, pffn_dict = ops.positionwise_ffn( inp=output, d_model=net_config.d_model, d_inner=net_config.d_inner, activation_type=net_config.ff_activation, dropout=net_config.dropout, dropact=net_config.dropact, is_training=is_training, initializer=initializer) ops.update_ret_dict(ret_dict, attn_dict, "attn") ops.update_ret_dict(ret_dict, pffn_dict, "pffn") return output, ret_dict def encoder(self, input_embed, is_training, seg_id=None, pos_id=None, input_mask=None, scope="encoder", reuse=tf.AUTO_REUSE): net_config = self.net_config ret_dict = {} with tf.variable_scope(scope, reuse=reuse): output, _ = self.input_projection(input_embed) hiddens = [] layer_dict = {} for block_idx in range(net_config.n_block): if block_idx == 0: pos_enc, seg_mat, func_mask = self.init_attn_structures( input_embed, seg_id, pos_id, is_training) else: pool_ret = self.pre_attn_pooling( output, pos_enc, seg_mat, input_mask, func_mask, block_idx, is_training) pooled_out, pos_enc, seg_mat, input_mask, func_mask = pool_ret attn_mask = None if input_mask is None else input_mask[:, None, None] for param_idx in range(net_config.block_param_size[block_idx]): layer_idx = sum(net_config.block_param_size[:block_idx]) + param_idx with tf.variable_scope("layer_{}".format(layer_idx), reuse=reuse): cur_repeat_size = net_config.block_repeat_size[block_idx] for repeat_idx in range(cur_repeat_size): sub_idx = (param_idx * cur_repeat_size + repeat_idx) do_pooling = block_idx > 0 and sub_idx == 0 if do_pooling: if net_config.pool_q_only: q = pooled_out k = v = output else: q = k = v = pooled_out else: q = k = v = output output, layer_dict = self.tfmxl_layer( q=q, k=k, v=v, pos_enc=pos_enc, seg_mat=seg_mat, attn_mask=attn_mask, is_training=is_training, func_mask=func_mask) if do_pooling: pool_ret = self.post_attn_pooling( pos_enc, seg_mat, input_mask, func_mask, block_idx, is_training) pos_enc, seg_mat, input_mask, func_mask = pool_ret attn_mask = None if input_mask is None else input_mask[:, None, None] hiddens.append(output) prefix = "block_{}/layer_{}/repeat_{}".format( block_idx, layer_idx, repeat_idx) ops.update_ret_dict(ret_dict, layer_dict, prefix) return output, hiddens, ret_dict def stride_pool(self, tensor, axis): if tensor is None: return None net_config = self.net_config pool_size = net_config.pooling_size if isinstance(tensor, (tuple, list)): ndims = tensor[0].shape.ndims else: ndims = tensor.shape.ndims axis = axis % ndims slice_list = [] for i in range(ndims): if i == axis: if FLAGS.separate_cls: if FLAGS.truncate_seq: slice_list.append(slice(1, -1, pool_size)) else: slice_list.append(slice(1, None, pool_size)) else: slice_list.append(slice(None, None, pool_size)) break else: slice_list.append(slice(None)) if net_config.separate_cls: cls_slice_list = [] for i in range(ndims): if i == axis: cls_slice_list.append(slice(None, 1)) break else: cls_slice_list.append(slice(None)) def _pool_func(origin): pooled = origin[slice_list] if net_config.separate_cls: pooled = tf.concat([origin[cls_slice_list], pooled], axis=axis) return pooled if isinstance(tensor, (tuple, list)): return list(map(_pool_func, tensor)) else: return _pool_func(tensor) def pool_tensor(self, tensor, mode="mean"): if tensor is None: return None net_config = self.net_config ndims = tensor.shape.ndims pool_size = net_config.pooling_size if net_config.separate_cls: cls_tensor = tensor[:, :1] if FLAGS.truncate_seq: pooled = tensor[:, 1:-1] else: pooled = tensor[:, 1:] else: pooled = tensor if ndims == 2: pooled = pooled[:, :, None] if mode == "mean": pooled = tf.nn.avg_pool1d( pooled, ksize=pool_size, strides=pool_size, data_format="NWC", padding="SAME") elif mode == "max": pooled = tf.nn.max_pool1d( pooled, ksize=pool_size, strides=pool_size, data_format="NWC", padding="SAME") elif mode == "min": pooled = -tf.nn.max_pool1d( -pooled, ksize=pool_size, strides=pool_size, data_format="NWC", padding="SAME") else: raise NotImplementedError if ndims == 2: pooled = tf.squeeze(pooled, 2) if net_config.separate_cls: pooled = tf.concat([cls_tensor, pooled], axis=1) return pooled def rel_shift_pos_enc(self, q_len, q_pow, k_len, k_pow, is_training, dtype): net_config = self.net_config pool_size = net_config.pooling_size q_stride = pool_size ** q_pow k_stride = pool_size ** k_pow shift = q_stride // k_stride min_pos_k = 1 - k_stride max_pos_k = min_pos_k + (k_len - 1) * k_stride min_pos_q = 1 - q_stride ref_point = min_pos_q - min_pos_k num_to_remove = shift * q_len max_dist = ref_point + num_to_remove * k_stride min_dist = min_pos_q - max_pos_k rel_pos_id = tf.range(max_dist, min_dist - 1, -k_stride) enc = ops.get_pos_enc_gpu( rel_pos_id, net_config.d_model, net_config.dropout, is_training=is_training, dtype=dtype) pos_enc = (enc, shift) return pos_enc def init_attn_structures(self, hidden, seg_id, pos_id, is_training): net_config = self.net_config if net_config.rel_attn_type == "null": self.attn_structures = (None, None, None) else: if self.attn_structures is None: seq_len = tf.shape(hidden)[1] if net_config.rel_attn_type == "factorized": if pos_id is None: half_len = tf.cast(seq_len // 2, tf.float32) pos_id = tf.range(-half_len, half_len, 1.0) pos_enc = ops.get_pos_enc( pos_id, pos_id, net_config.d_model, net_config.dropout, is_training=is_training, dtype=hidden.dtype) elif net_config.rel_attn_type == "rel_shift": assert pos_id is None seq_len_fp = tf.cast(seq_len, tf.float32) rel_pos_id = tf.range(seq_len_fp, -seq_len_fp, -1.0) enc = ops.get_pos_enc_gpu( rel_pos_id, net_config.d_model, net_config.dropout, is_training=is_training, dtype=hidden.dtype) shift = 1 pos_enc = (enc, shift) else: raise NotImplementedError seg_mat = ops.seg_id_to_mat(seg_id, seg_id) num_real_token = seq_len - 1 func_mask = tf.pad( tf.ones([num_real_token, num_real_token], dtype=hidden.dtype), [[1, 0], [1, 0]]) self.attn_structures = (pos_enc, seg_mat, func_mask) return self.attn_structures def pre_attn_pooling(self, output, pos_enc, seg_mat, input_mask, func_mask, block_idx, is_training): net_config = self.net_config if net_config.pool_q_only: seg_mat = self.stride_pool(seg_mat, 1) output = self.pool_tensor(output, mode=net_config.pooling_type) func_mask = self.stride_pool(func_mask, 0) if pos_enc is not None: if net_config.rel_attn_type == "factorized": pos_enc = self.stride_pool(pos_enc[:2], 0) + pos_enc[2:] elif net_config.rel_attn_type == "rel_shift": pos_enc = self.rel_shift_pos_enc( q_len=tf.shape(func_mask)[0], q_pow=block_idx, k_len=tf.shape(func_mask)[1], k_pow=block_idx-1, is_training=is_training, dtype=func_mask.dtype) else: raise NotImplementedError else: seg_mat = self.stride_pool(seg_mat, 1) seg_mat = self.stride_pool(seg_mat, 2) output = self.pool_tensor(output, mode=net_config.pooling_type) func_mask = self.stride_pool(func_mask, 0) func_mask = self.stride_pool(func_mask, 1) input_mask = self.pool_tensor(input_mask, mode="min") if pos_enc is not None: if net_config.rel_attn_type == "factorized": pos_enc = self.stride_pool(pos_enc, 0) elif net_config.rel_attn_type == "rel_shift": pos_enc = self.rel_shift_pos_enc( q_len=tf.shape(func_mask)[0], q_pow=block_idx, k_len=tf.shape(func_mask)[1], k_pow=block_idx, is_training=is_training, dtype=func_mask.dtype) else: raise NotImplementedError return output, pos_enc, seg_mat, input_mask, func_mask def post_attn_pooling(self, pos_enc, seg_mat, input_mask, func_mask, block_idx, is_training): net_config = self.net_config if net_config.pool_q_only: seg_mat = self.stride_pool(seg_mat, 2) func_mask = self.stride_pool(func_mask, 1) input_mask = self.pool_tensor(input_mask, mode="min") if pos_enc is not None: if net_config.rel_attn_type == "factorized": pos_enc = pos_enc[:2] + self.stride_pool(pos_enc[2:], 0) elif net_config.rel_attn_type == "rel_shift": pos_enc = self.rel_shift_pos_enc( q_len=tf.shape(func_mask)[1], q_pow=block_idx, k_len=tf.shape(func_mask)[1], k_pow=block_idx, is_training=is_training, dtype=func_mask.dtype) else: raise NotImplementedError return pos_enc, seg_mat, input_mask, func_mask def upsample(self, output, stride, tgt_len): if stride == 1: return output net_config = self.net_config if net_config.separate_cls: cls_output = output[:, :1] output = output[:, 1:] output = tf.repeat(output, repeats=stride, axis=1) if net_config.separate_cls: if FLAGS.truncate_seq: pad_len = stride - 1 output = tf.pad(output, [[0, 0], [0, pad_len], [0, 0]]) else: output = output[:, :tgt_len - 1] output = tf.concat([cls_output, output], axis=1) return output def bridge_layer(self, hiddens, input_mask, reuse=tf.AUTO_REUSE): net_config = self.net_config ret_dict = {} tgt_len = tf.shape(input_mask)[1] with tf.variable_scope("upsampling_layer", reuse=reuse): upsampled_hids = [] cum_num_layer = 0 for block_idx in range(net_config.n_block): stride = 2 ** block_idx cum_num_layer += (net_config.block_repeat_size[block_idx] * net_config.block_param_size[block_idx]) layer_idx = cum_num_layer - 1 upsampled_hid = self.upsample( hiddens[layer_idx], stride=stride, tgt_len=tgt_len) upsampled_hids.append(upsampled_hid) upsampled_hidden = upsampled_hids[-1] unpooled_hidden = upsampled_hids[0] output = upsampled_hidden + unpooled_hidden return output, ret_dict def decoder(self, hiddens, is_training, input_mask=None, seg_id=None, pos_id=None, scope="decoder", reuse=tf.AUTO_REUSE): net_config = self.net_config ret_dict = {} output, bridge_dict = self.bridge_layer( hiddens, input_mask, reuse=reuse) ops.update_ret_dict(ret_dict, bridge_dict, "bridge") if net_config.decoder_depth == 0: return output, ret_dict pos_enc, seg_mat, func_mask = self.init_attn_structures( output, seg_id, pos_id, is_training) attn_mask = None if input_mask is None else input_mask[:, None, None] n_enc_param_layer = sum(net_config.block_param_size) with tf.variable_scope(scope, reuse=reuse): for param_idx in range(net_config.decoder_param_size): layer_idx = n_enc_param_layer + param_idx with tf.variable_scope("layer_{}".format(layer_idx), reuse=reuse): for repeat_idx in range(net_config.decoder_repeat_size): output, layer_dict = self.tfmxl_layer( q=output, k=output, v=output, pos_enc=pos_enc, seg_mat=seg_mat, attn_mask=attn_mask, is_training=is_training, func_mask=func_mask) ops.update_ret_dict( ret_dict, layer_dict, "layer_{}/repeat_{}".format(layer_idx, repeat_idx)) return output, ret_dict def summarize_sequence(self, hidden, scope="sequnece_summary", reuse=tf.AUTO_REUSE): net_config = self.net_config initializer = self.get_initializer() with tf.variable_scope(scope, reuse=reuse): summary = ops.dense( hidden[:, 0], net_config.d_model, activation=tf.tanh, use_bias=True, initializer=initializer, scope="summary") return summary def extract_hiddens(self, inputs, is_training, seg_id=None, pos_id=None, input_mask=None, use_decoder=False, use_tpu=False, use_bfloat16=False): dtype = tf.float32 if not use_bfloat16 else tf.bfloat16 input_embed, _, _ = self.input_embedding( inputs, is_training, seg_id=seg_id, use_tpu=use_tpu, dtype=dtype) output, hiddens, ret_dict = self.encoder( input_embed, is_training, seg_id=seg_id, pos_id=pos_id, input_mask=input_mask) if use_decoder: output, _ = self.decoder( hiddens, input_mask=input_mask, seg_id=seg_id, pos_id=pos_id, is_training=is_training) return output, hiddens, ret_dict def get_pooled_output(self, inputs, is_training, seg_id=None, input_mask=None, use_tpu=False, use_bfloat16=False): output, _, _ = self.extract_hiddens( inputs, is_training, seg_id=seg_id, input_mask=input_mask, use_tpu=use_tpu, use_bfloat16=use_bfloat16) summary = self.summarize_sequence(output) return summary, output def lm_logits(self, hidden, lookup_table=None, mapping=None, scope="lm"): net_config = self.net_config initializer = self.get_initializer() if mapping is not None: hidden = tf.einsum("...id,...ki->...kd", hidden, mapping) with tf.variable_scope("{}_proj".format(scope)): hidden = ops.dense( hidden, out_shape=net_config.d_embed, inp_shape=net_config.d_model, activation=ops.get_activation(net_config.ff_activation), initializer=initializer) hidden = ops.layer_norm_op(hidden, norm_shape=[net_config.d_embed]) with tf.variable_scope("{}_loss".format(scope)): if lookup_table is not None: softmax_w = lookup_table else: softmax_w = tf.get_variable("weight", [net_config.vocab_size, net_config.d_embed], dtype=hidden.dtype, initializer=initializer) softmax_b = tf.get_variable("bias", [net_config.vocab_size], dtype=hidden.dtype, initializer=tf.zeros_initializer()) logits = tf.einsum("...d,nd->...n", hidden, softmax_w) + softmax_b if logits.dtype != tf.float32: logits = tf.cast(logits, tf.float32) return logits def lm_loss(self, hidden, target, lookup_table=None, mapping=None, return_logits=False, use_tpu=False, scope="lm"): net_config = self.net_config logits = self.lm_logits(hidden, lookup_table, mapping, scope) if target.shape.as_list() == logits.shape.as_list(): if use_tpu: target = tf.cast(target, logits.dtype) loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * target, -1) else: loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=target, logits=logits) else: if use_tpu: target = tf.one_hot(target, net_config.vocab_size, dtype=logits.dtype) loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * target, -1) else: loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=logits) if return_logits: return loss, logits else: return loss def classification_loss(self, hidden, labels, n_class, is_training, scope, reuse=tf.AUTO_REUSE, return_logits=False): net_config = self.net_config initializer = self.get_initializer() with tf.variable_scope(scope, reuse=reuse): hidden = ops.dropout_op(hidden, net_config.dropout, training=is_training) logits = ops.dense( hidden, n_class, initializer=initializer, scope="logit") if logits.dtype != tf.float32: logits = tf.cast(logits, tf.float32) one_hot_target = tf.one_hot(labels, n_class, dtype=logits.dtype) loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1) if return_logits: return loss, logits return loss def regression_loss(self, hidden, labels, is_training, scope, reuse=tf.AUTO_REUSE, return_logits=False): net_config = self.net_config initializer = self.get_initializer() with tf.variable_scope(scope, reuse=reuse): hidden = ops.dropout_op(hidden, net_config.dropout, training=is_training) logits = ops.dense( hidden, 1, initializer=initializer, scope="logit") logits = tf.squeeze(logits, axis=-1) if logits.dtype != tf.float32: logits = tf.cast(logits, tf.float32) loss = tf.square(logits - tf.cast(labels, logits.dtype)) if return_logits: return loss, logits return loss
MIT License
awslabs/aws-ec2rescue-linux
ec2rlcore/main.py
Main.bug_report
python
def bug_report(self): print("ec2rl {}".format(self.PROGRAM_VERSION)) print("{}, {}".format(ec2rlcore.prediag.get_distro(), platform.release())) print("Python {}, {}".format(platform.python_version(), sys.executable)) return True
Print version information relevant for inclusion in a bug report and return True.
https://github.com/awslabs/aws-ec2rescue-linux/blob/8ecf40e7ea0d2563dac057235803fca2221029d2/ec2rlcore/main.py#L701-L706
from __future__ import print_function import datetime import errno import os import platform import re import shutil import sys import logging import ec2rlcore import ec2rlcore.console_out import ec2rlcore.constraint import ec2rlcore.logutil import ec2rlcore.module import ec2rlcore.moduledir import ec2rlcore.options import ec2rlcore.paralleldiagnostics import ec2rlcore.prediag import ec2rlcore.programversion import ec2rlcore.s3upload import requests import yaml class Main(object): subcommands = sorted(["run", "list", "help", "menu-config", "save-config", "upload", "version", "version-check", "software-check", "bug-report"]) __meta_options = ["--config-file", "--url", "--upload-directory"] PROGRAM_VERSION = ec2rlcore.programversion.ProgramVersion("1.1.6") VERSION_ENDPOINT = "https://s3.amazonaws.com/ec2rescuelinux/VERSION" def __init__(self, debug=False, full_init=False): self._write_initialized = False self._full_initialized = False self._modules = None self._prediags = None self._postdiags = None self._modules_need_init = True self._prediags_need_init = True self._postdiags_need_init = True self.pruned_modules = list() self.prune_stats = dict() self.directories = dict() self.logger = ec2rlcore.logutil.LogUtil.get_root_logger() self.console = ec2rlcore.logutil.LogUtil.set_direct_console_logger(logging.INFO) self.options = ec2rlcore.options.Options(subcommands=Main.subcommands) _callp = sys.argv[0] if not os.path.isabs(_callp): _callp = os.path.abspath(_callp) self.directories["CALLPATH"] = os.path.split(_callp)[0] self.directories["LIBDIR"] = os.path.join(self.directories["CALLPATH"], "lib") self.directories["WORKDIR"] = self._get_workdir(self.options.global_args) if "--debug" in sys.argv or debug: self.debug = True self.logger.setLevel(logging.DEBUG) self._setup_write_paths() ec2rlcore.logutil.LogUtil.set_debug_log_handler(os.path.join(self.directories["RUNDIR"], "Debug.log")) self.logger.debug("ec2rlcore.Main.__init__()") else: self.debug = False self.logger.setLevel(logging.INFO) self.subcommand = "default_help" self.constraint = ec2rlcore.constraint.Constraint() if self.options.subcommand: self.subcommand = self.options.subcommand if full_init: self.full_init() def _get_workdir(self, global_args): if "outputdir" in self.options.global_args and os.path.isdir(self.options.global_args["outputdir"]): return self.options.global_args["outputdir"] else: return "/var/tmp/ec2rl" def full_init(self): if self._full_initialized: return True self._setup_write_paths() ec2rlcore.logutil.LogUtil.set_main_log_handler(os.path.join(self.directories["RUNDIR"], "Main.log")) self.logger.debug("Added main log handler at {}".format(os.path.join(self.directories["RUNDIR"], "Main.log"))) ec2rlcore.logutil.LogUtil.set_console_log_handler(logging.WARNING) self.logger.debug("Console logging for warning+ enabled") self._setup_environ() self.logger.debug("Initialized {} 'prediag' module(s)".format(len(self.prediags))) self.logger.debug("Initialized {} 'run' module(s)".format(len(self.modules))) self.logger.debug("Initialized {} 'postdiag' module(s)".format(len(self.postdiags))) for mod in self.modules: self.constraint.update(mod.constraint.with_keys(["domain", "class", "distro", "software", "perfimpact"])) self.logger.debug("my subcommand = {}".format(self.subcommand)) self.logger.debug("my constraints {}".format(self.constraint)) self.logger.debug("my global_args {}".format(self.options.global_args)) self._full_initialized = True def _setup_write_paths(self): if self._write_initialized: return True datetime_str = re.sub(":", "_", datetime.datetime.utcnow().isoformat()) self.directories["RUNDIR"] = os.path.join(self.directories["WORKDIR"], datetime_str) self.directories["LOGDIR"] = os.path.join(self.directories["RUNDIR"], "mod_out") self.directories["GATHEREDDIR"] = os.path.join(self.directories["RUNDIR"], "gathered_out") self.directories["SPECDIR"] = datetime_str try: os.mkdir(self.directories["WORKDIR"]) os.chmod(self.directories["WORKDIR"], 0o777) except OSError as err: if err.errno != errno.EEXIST: raise MainDirectoryError(self.directories["WORKDIR"]) try: os.mkdir(self.directories["RUNDIR"], 0o700) except OSError as err: if err.errno != errno.EEXIST: raise MainDirectoryError(self.directories["RUNDIR"]) try: os.mkdir(self.directories["LOGDIR"]) except OSError as err: if err.errno != errno.EEXIST: raise MainDirectoryError(self.directories["LOGDIR"]) try: os.mkdir("{}/prediagnostic".format(self.directories["LOGDIR"])) except OSError as err: if err.errno != errno.EEXIST: raise MainDirectoryError("{}/prediagnostic".format(self.directories["LOGDIR"])) try: os.mkdir("{}/run".format(self.directories["LOGDIR"])) except OSError as err: if err.errno != errno.EEXIST: raise MainDirectoryError("{}/run".format(self.directories["LOGDIR"])) try: os.mkdir("{}/postdiagnostic".format(self.directories["LOGDIR"])) except OSError as err: if err.errno != errno.EEXIST: raise MainDirectoryError("{}/postdiagnostic".format(self.directories["LOGDIR"])) try: os.mkdir(self.directories["GATHEREDDIR"]) except OSError as err: if err.errno != errno.EEXIST: raise MainDirectoryError(self.directories["GATHEREDDIR"]) self._write_initialized = True return True def _setup_environ(self): self.logger.debug("ec2rlcore.Main._setup_environ()") os.environ["EC2RL_LIBDIR"] = self.directories["LIBDIR"] os.environ["EC2RL_WORKDIR"] = self.directories["WORKDIR"] os.environ["EC2RL_RUNDIR"] = self.directories["RUNDIR"] os.environ["EC2RL_LOGDIR"] = self.directories["LOGDIR"] os.environ["EC2RL_GATHEREDDIR"] = self.directories["GATHEREDDIR"] os.environ["EC2RL_SPECDIR"] = self.directories["SPECDIR"] os.environ["EC2RL_CALLPATH"] = self.directories["CALLPATH"] if "perfimpact" in self.options.global_args and self.options.global_args["perfimpact"] == "true": os.environ["EC2RL_PERFIMPACT"] = "True" else: os.environ["EC2RL_PERFIMPACT"] = "False" return True @property def modules(self): if self._modules_need_init: self._modules = ec2rlcore.moduledir.ModuleDir("{}/mod.d".format(self.directories["CALLPATH"])) self._modules_need_init = False if "onlyclasses" in self.options.global_args.keys(): for module_class in self.options.global_args["onlyclasses"].rsplit(","): if module_class and module_class not in self.options.classes_to_run: self.options.classes_to_run.append(module_class) else: self.options.classes_to_run = self.modules.classes if "onlydomains" in self.options.global_args.keys(): for module_domain in self.options.global_args["onlydomains"].rsplit(","): if module_domain and module_domain not in self.options.domains_to_run: self.options.domains_to_run.append(module_domain) else: self.options.domains_to_run = self.modules.domains return self._modules @property def prediags(self): if self._prediags_need_init: self._prediags = ec2rlcore.moduledir.ModuleDir("{}/pre.d".format(self.directories["CALLPATH"])) self._prediags_need_init = False return self._prediags @property def postdiags(self): if self._postdiags_need_init: self._postdiags = ec2rlcore.moduledir.ModuleDir("{}/post.d".format(self.directories["CALLPATH"])) self._postdiags_need_init = False self._postdiags_need_init = False return self._postdiags def get_help(self, help_arg=None): with open(self.directories["CALLPATH"] + "/ec2rlcore/help.yaml") as helpfile: helpmessages = yaml.load(helpfile) help_dict = { "list": helpmessages["list_help"], "run": helpmessages["run_help"], "help": helpmessages["help_help"], "menu-config": helpmessages["menu_config_help"], "save-config": helpmessages["save_config_help"], "upload": helpmessages["upload_help"], "version": helpmessages["version_help"], "version-check": helpmessages["version_check_help"], "software-check": helpmessages["software_check_help"], "bug-report": helpmessages["bug_report_help"] } if help_arg in help_dict.keys(): help_message = str(help_dict[help_arg]) elif self.subcommand == "default_help": help_message = helpmessages["help_header"] help_message += "\nec2rl: missing subcommand operand" else: help_message = helpmessages["help_header"] help_message += "\n" for subcommand in Main.subcommands: help_message += str(help_dict[subcommand]) help_message += "\n" help_message += helpmessages["help_footer"] return help_message def list(self): self.logger.debug("ec2rlcore.Main.list()") print("Here is a list of available modules that apply to the current host:\n") print(ec2rlcore.module.Module.list_header) for mod in self.modules: if mod.applicable and set(mod.constraint["domain"]).intersection(self.options.domains_to_run) and set(mod.constraint["class"]).intersection(self.options.classes_to_run): print(mod) print("\nS: Requires sudo/root to run") print("P: Requires --perfimpact=true to run (can potentially cause performance impact)") print("R: Supports remediation if --remediate is given") print("\nClasses refer to the type of task the module performs") print(" Diagnose: success/fail/warn conditions determined by module.") print(" Gather: create a copy of a local file for inspection.") print(" Collect: collect command output") print("\nDomains are defined per module and refer to the general area of investigation for the module.") print("\nTo see module help, you can run:\n") print("ec2rl help [MODULEa ... MODULEx]") print("ec2rl help [--only-modules=MODULEa ... MODULEx] [--only-domains=DOMAINa ... DOMAINx]") return True def help(self): self.logger.debug("ec2rlcore.Main.help()") args_to_help = [] output = "" self.full_init() if "onlymodules" in self.options.global_args: args_to_help = [mod_name for mod_name in self.options.global_args["onlymodules"].rsplit(",")] elif "onlydomains" in self.options.global_args: for this_domain in self.options.domains_to_run: if this_domain in self.modules.domain_map.keys(): for module_obj in self.modules.domain_map[this_domain]: args_to_help.append(module_obj.name) elif "onlyclasses" in self.options.global_args: for this_class in self.options.classes_to_run: if this_class in self.modules.class_map.keys(): for module_obj in self.modules.class_map[this_class]: args_to_help.append(module_obj.name) elif len(sys.argv) >= 3: for arg_num in range(2, len(sys.argv)): if sys.argv[arg_num] in self.subcommands or sys.argv[arg_num] in self.modules.name_map.keys(): args_to_help.append(sys.argv[arg_num]) match = False for arg in args_to_help: if match: output += "\n\n" if arg in self.options.subcommand_list: match = True output += self.get_help(arg) elif arg in self.modules.name_map.keys(): match = True output += self.modules.name_map[arg].help if not output: output = self.get_help() print(output) return True def save_config(self): self.full_init() self.logger.debug("ec2rlcore.Main.save_config()") config_file = os.path.join(self.directories["RUNDIR"], "configuration.cfg") self.options.write_config(config_file, self.modules) ec2rlcore.dual_log("\n----------[Configuration File]----------\n") ec2rlcore.dual_log("Configuration file saved:") ec2rlcore.dual_log(config_file) return True def menu_config(self): try: import curses except ImportError: print("ERROR:\tMissing Python module 'curses'.") print("\tPlease install this module and rerun ec2rl") sys.exit(1) import ec2rlcore.menu_item import ec2rlcore.menu_config self.full_init() self.logger.debug("ec2rlcore.Main.menu_config()") if "Global" in self.modules.name_map.keys(): self.modules.remove(self.modules.name_map["Global"]) the_menu = ec2rlcore.menu_config.get_menu_config(self.modules) the_menu() module_menu = the_menu["View all modules"] for module_name in module_menu.get_item_keys(): keys_to_remove = [] for key in module_menu[module_name].get_item_keys(): if (isinstance(module_menu[module_name][key], ec2rlcore.menu_item.TextEntryItem) and not module_menu[module_name][key].get_value()) or isinstance(module_menu[module_name][key], ec2rlcore.menu_item.ExitItem): keys_to_remove.append(key) for key in keys_to_remove: module_menu[module_name].remove(module_menu[module_name][key]) self.options.per_module_args[module_name] = module_menu[module_name].get_items_dict_copy() global_menu = the_menu["Configure global module arguments"] for key in global_menu.get_item_keys(): if isinstance(global_menu[key], (ec2rlcore.menu_item.ToggleItem, ec2rlcore.menu_item.TextEntryItem)) and global_menu[key].get_value(): self.options.global_args[key] = global_menu[key].get_value() def update_only_global_arg(item_name): num_items_to_run = 0 str_items_to_run = "" for item in global_menu["only-{}".format(item_name)].get_items(): if item.get_value() == "True": num_items_to_run += 1 if not str_items_to_run: str_items_to_run = item.row_left else: str_items_to_run = ",".join((str_items_to_run, item.row_left)) if item_name == "modules" and 0 < num_items_to_run < len(self.modules) or item_name != "modules" and num_items_to_run < len(getattr(self.modules, item_name)): self.options.global_args["only{}".format(item_name)] = str_items_to_run else: return False return True update_only_global_arg("classes") update_only_global_arg("domains") update_only_global_arg("modules") self.options.global_args["subcommand"] = "run" if the_menu["Run this configuration"].get_value(): self.subcommand = "run" return self() else: return self.save_config() def __call__(self, subcommand=None): self.logger.debug("ec2rlcore.Main.__call__()") if not subcommand and self.subcommand == "default_help": subcommand = "help" elif not subcommand: subcommand = self.subcommand subcommand = subcommand.replace("-", "_") return getattr(self, subcommand)() def version(self): print("ec2rl {}".format(self.PROGRAM_VERSION)) print("Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All rights reserved.") print("This software is distributed under the Apache License, Version 2.0.") print("") print("This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, " "either express or implied.") return True def version_check(self): try: upstream_version = ec2rlcore.programversion.ProgramVersion(requests.get(self.VERSION_ENDPOINT).text.strip()) except requests.exceptions.Timeout: raise MainVersionCheckTimeout() print("Running version: {}".format(self.PROGRAM_VERSION)) print("Upstream version: {}".format(upstream_version)) if upstream_version > self.PROGRAM_VERSION: print("An update is available.") else: print("No update available.") return True def software_check(self): packages_needed = set() for mod in self.modules: for software_constraint in iter(mod.constraint["software"]): if not ec2rlcore.prediag.which(software_constraint): packages_needed.update(set(mod.package)) packages_needed = [package for package in packages_needed if package] if len(packages_needed) > 0: print("One or more software packages required to run all modules are missing.\n" "Information regarding these software packages can be found at the specified URLs below.\n") for package in packages_needed: modules = ",".join([mod.name for mod in self.modules.package_map[package]]) try: package_name, package_link = package.split() except ValueError: raise MainSoftwareCheckPackageParsingFailure(package, modules) print("{}: {}".format("Package-Name", package_name)) print("{}: {}".format("Package-URL", package_link)) print("{}: {}".format("Affected-Modules", modules)) print("") else: print("All test software requirements have been met.") return True
Apache License 2.0
natduca/quickopen
src/query.py
_filter_result_for_exact_matches
python
def _filter_result_for_exact_matches(query_text, base_result): res = QueryResult() res.debug_info = copy.deepcopy(base_result.debug_info) res.truncated = base_result.truncated for hit,rank in base_result.hits: if _is_exact_match(query_text, hit): res.filenames.append(hit) res.ranks.append(rank) return res
Returns a new QueryResult object containing only filenames that exactly match the provided query.
https://github.com/natduca/quickopen/blob/527cda56b867a0b2f47baa9ec4f39459fec746ca/src/query.py#L147-L160
import copy import fixed_size_dict import os import sys import time from basename_ranker import BasenameRanker from query_result import QueryResult from trace_event import * def _is_in_base_path(filename, base_path): expanded_base_path = os.path.expandvars(os.path.expanduser(base_path)) normalized_base_path = os.path.abspath(os.path.realpath(expanded_base_path)) normalized_filename = os.path.abspath(filename) common = os.path.commonprefix([normalized_base_path, normalized_filename]) return common == normalized_base_path def _is_exact_match(query_text, hit): if not hit.endswith(query_text): return False first_idx = hit.rfind(query_text) if first_idx == 0: return True if hit[first_idx - 1] == os.sep: return True return False class DirPriority(object): def __init__(self, dir, priority): self.dir = dir self.priority = priority def _filter_for_base_path(base_result, query): res = QueryResult() res.debug_info = copy.deepcopy(base_result.debug_info) res.truncated = base_result.truncated for hit,rank in base_result.hits: if _is_in_base_path(hit, query.base_path): res.filenames.append(hit) res.ranks.append(rank) return res def _apply_global_rank_adjustment(base_result, indexed_dirs, query): all_open_filenames = [] if query.current_filename: all_open_filenames.append(query.current_filename) all_open_filenames.extend(query.open_filenames) inactive_dirs = set(indexed_dirs) active_dir_orders = {} for open_filename in all_open_filenames: for d in indexed_dirs: if open_filename.startswith(d): if d not in active_dir_orders: active_dir_orders[d] = len(active_dir_orders) inactive_dirs.remove(d) inactive_dirs = list(inactive_dirs) inactive_dirs.sort() for i in inactive_dirs: active_dir_orders[i] = len(active_dir_orders) active_dir_orders = [(x,y) for x,y in active_dir_orders.items()] def get_order(f): for d,order in active_dir_orders: if f.startswith(d): return order return sys.maxint def hit_cmp(x,y): h = get_order(x[0]) - get_order(y[0]) if h != 0: return h j = -cmp(x[1],y[1]) if j != 0: return j x_base = os.path.basename(x[0]) y_base = os.path.basename(y[0]) j = cmp(x_base, y_base) if j != 0: return j return cmp(x[0], y[0]) hits = list(base_result.hits) hits.sort(hit_cmp) new_hits = _rerank(hits) res = QueryResult(new_hits, base_result.truncated) res.debug_info = copy.deepcopy(base_result.debug_info) return res def _rerank(hits): if len(hits) == 0: return [] deltas = [1 for x in range(len(hits))] deltas[0] = 0 for i in range(1, len(hits)): deltas[i] = hits[i][1] - hits[i-1][1] res = [hits[0]] for i in range(1, len(hits)): delta = deltas[i] if delta >= 0: delta = -0.1 res.append((hits[i][0], res[i-1][1] + delta)) return res
Apache License 2.0
haroldmills/vesper
vesper/util/time_difference_formatter.py
TimeDifferenceFormatter.get_min_time_increment
python
def get_min_time_increment(format_string): _, format_codes = _parse_format_string(format_string) increments = [_FORMAT_CODE_TIME_INCREMENTS[c] for c in format_codes] increments = [i for i in increments if i is not None] if len(increments) == 0: return None else: return min(increments)
Gets the minimum nonzero increment between two time differences as formatted with the specified format string.
https://github.com/haroldmills/vesper/blob/2bde3447eeb34b75cc580fbdafe7b26195a31530/vesper/util/time_difference_formatter.py#L47-L67
import itertools import math import re _FORMAT_CODE_RE = re.compile('%([GgdHhMmSsf%]|[1-6]f)') _FORMAT_CODE_TIME_INCREMENTS = { 'G': None, 'g': None, 'd': 24 * 3600, 'H': 3600, 'h': 3600, 'M': 60, 'm': 60, 'S': 1, 's': 1, 'f': .000001, '1f': .1, '2f': .01, '3f': .001, '4f': .0001, '5f': .00001, '6f': .000001, '%': None } _FORMATTING_METHOD_NAMES = { '%': '_format_percent' } class TimeDifferenceFormatter: @staticmethod
MIT License
melinath/django-pipetter
pipetter/__init__.py
autodiscover
python
def autodiscover(): import copy from django.conf import settings from django.utils.importlib import import_module from django.utils.module_loading import module_has_submodule for app in settings.INSTALLED_APPS: if app == __package__: continue mod = import_module(app) try: before_import_registry = copy.copy(registry._registry) import_module('%s.pipettes' % app) except: registry._registry = before_import_registry if module_has_submodule(mod, 'pipettes'): raise
Auto-discover INSTALLED_APPS pipettes.py files and force an import on them. This function should be called from a module which is known to run, such as a models.py or urls.py file. Modeled off of django.contrib.admin.autodiscover().
https://github.com/melinath/django-pipetter/blob/fd21254f64e3538fd6dcd5ddc4d5dc7444f5fafb/pipetter/__init__.py#L78-L106
from pipetter.default_pipettes import default_pipettes from inspect import ismethod import re VERSION = (0, 9) class NotRegistered(Exception): pass class AlreadyRegistered(Exception): pass class PipetteError(Exception): pass PIPETTE_TAG_NAME_RE = re.compile("^[a-zA-Z0-9]\w*$") class PipetteRegistry(object): def __init__(self): self._registry = {} def register(self, pipette, tag_name=None): if not hasattr(pipette, 'get_context') or not ismethod(pipette.get_context): raise PipetteError('%s does not define a get_context method' % pipette) if tag_name is None: tag_name = getattr(pipette, 'tag_name', pipette.__module__.rsplit('.', 1)[-1]) if not PIPETTE_TAG_NAME_RE.match(tag_name): raise PipetteError("%s is not a valid tag name." % tag_name) if tag_name in self._registry: raise AlreadyRegistered('Pipette tag %s was already registered.' % tag_name) if not hasattr(pipette, 'template'): pipette.template = "pipettes/%s.html" % tag_name if not hasattr(pipette, 'takes_context'): pipette.takes_context = False if not hasattr(pipette, 'cache_for'): pipette.cache_for = 5 self._registry[tag_name] = pipette def unregister(self, pipette, tag_name=None): if tag_name is None: tag_name = getattr(pipette, 'tag_name', pipette.__module__.rsplit('.', 1)[-1]) if tag_name not in self._registry: raise NotRegistered('Pipette tag %s is not registered' % tag_name) elif self._registry[tag_name] != pipette: raise NotRegistered('A different pipette is registered as tag %s' % tag_name) self._registry.remove(pipette) def __iter__(self): return self._registry.__iter__() def items(self): return self._registry.items() def __getitem__(self, key): return self._registry[key] registry = PipetteRegistry() for pipette in default_pipettes: registry.register(pipette)
ISC License
docusign/docusign-python-client
docusign_esign/apis/envelopes_api.py
EnvelopesApi.apply_template_to_document_with_http_info
python
def apply_template_to_document_with_http_info(self, account_id, document_id, envelope_id, **kwargs): all_params = ['account_id', 'document_id', 'envelope_id', 'preserve_template_recipient', 'document_template_list'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method apply_template_to_document" % key ) params[key] = val del params['kwargs'] if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `apply_template_to_document`") if ('document_id' not in params) or (params['document_id'] is None): raise ValueError("Missing the required parameter `document_id` when calling `apply_template_to_document`") if ('envelope_id' not in params) or (params['envelope_id'] is None): raise ValueError("Missing the required parameter `envelope_id` when calling `apply_template_to_document`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/envelopes/{envelopeId}/documents/{documentId}/templates'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'document_id' in params: path_params['documentId'] = params['document_id'] if 'envelope_id' in params: path_params['envelopeId'] = params['envelope_id'] query_params = {} if 'preserve_template_recipient' in params: query_params['preserve_template_recipient'] = params['preserve_template_recipient'] header_params = {} form_params = [] local_var_files = {} body_params = None if 'document_template_list' in params: body_params = params['document_template_list'] header_params['Accept'] = self.api_client. select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DocumentTemplateList', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Adds templates to a document in an envelope. Adds templates to a document in the specified envelope. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.apply_template_to_document_with_http_info(account_id, document_id, envelope_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str document_id: The ID of the document being accessed. (required) :param str envelope_id: The envelopeId Guid of the envelope being accessed. (required) :param str preserve_template_recipient: :param DocumentTemplateList document_template_list: :return: DocumentTemplateList If the method is called asynchronously, returns the request thread.
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/apis/envelopes_api.py#L191-L284
from __future__ import absolute_import import sys import os import re from six import iteritems from ..client.configuration import Configuration from ..client.api_client import ApiClient class EnvelopesApi(object): def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def apply_template(self, account_id, envelope_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.apply_template_with_http_info(account_id, envelope_id, **kwargs) else: (data) = self.apply_template_with_http_info(account_id, envelope_id, **kwargs) return data def apply_template_with_http_info(self, account_id, envelope_id, **kwargs): all_params = ['account_id', 'envelope_id', 'preserve_template_recipient', 'document_template_list'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method apply_template" % key ) params[key] = val del params['kwargs'] if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `apply_template`") if ('envelope_id' not in params) or (params['envelope_id'] is None): raise ValueError("Missing the required parameter `envelope_id` when calling `apply_template`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/envelopes/{envelopeId}/templates'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'envelope_id' in params: path_params['envelopeId'] = params['envelope_id'] query_params = {} if 'preserve_template_recipient' in params: query_params['preserve_template_recipient'] = params['preserve_template_recipient'] header_params = {} form_params = [] local_var_files = {} body_params = None if 'document_template_list' in params: body_params = params['document_template_list'] header_params['Accept'] = self.api_client. select_header_accept(['application/json']) auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DocumentTemplateList', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def apply_template_to_document(self, account_id, document_id, envelope_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.apply_template_to_document_with_http_info(account_id, document_id, envelope_id, **kwargs) else: (data) = self.apply_template_to_document_with_http_info(account_id, document_id, envelope_id, **kwargs) return data
MIT License
aws-samples/amazon-sagemaker-cdk-examples
dask-fargate/.env/lib/python3.6/site-packages/aws_cdk/aws_kms/__init__.py
_IKeyProxy.key_arn
python
def key_arn(self) -> str: return jsii.get(self, "keyArn")
The ARN of the key. attribute: :attribute:: true
https://github.com/aws-samples/amazon-sagemaker-cdk-examples/blob/054cc7f776c0d01c5e134197a2246fa2c5b053a0/dask-fargate/.env/lib/python3.6/site-packages/aws_cdk/aws_kms/__init__.py#L646-L652
import abc import datetime import enum import typing import jsii import jsii.compat import publication from jsii.python import classproperty import aws_cdk.aws_iam import aws_cdk.core __jsii_assembly__ = jsii.JSIIAssembly.load("@aws-cdk/aws-kms", "1.18.0", __name__, "aws-kms@1.18.0.jsii.tgz") @jsii.data_type(jsii_type="@aws-cdk/aws-kms.AliasAttributes", jsii_struct_bases=[], name_mapping={'alias_name': 'aliasName', 'alias_target_key': 'aliasTargetKey'}) class AliasAttributes(): def __init__(self, *, alias_name: str, alias_target_key: "IKey"): self._values = { 'alias_name': alias_name, 'alias_target_key': alias_target_key, } @property def alias_name(self) -> str: return self._values.get('alias_name') @property def alias_target_key(self) -> "IKey": return self._values.get('alias_target_key') def __eq__(self, rhs) -> bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs) -> bool: return not (rhs == self) def __repr__(self) -> str: return 'AliasAttributes(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items()) @jsii.data_type(jsii_type="@aws-cdk/aws-kms.AliasProps", jsii_struct_bases=[], name_mapping={'alias_name': 'aliasName', 'target_key': 'targetKey', 'removal_policy': 'removalPolicy'}) class AliasProps(): def __init__(self, *, alias_name: str, target_key: "IKey", removal_policy: typing.Optional[aws_cdk.core.RemovalPolicy]=None): self._values = { 'alias_name': alias_name, 'target_key': target_key, } if removal_policy is not None: self._values["removal_policy"] = removal_policy @property def alias_name(self) -> str: return self._values.get('alias_name') @property def target_key(self) -> "IKey": return self._values.get('target_key') @property def removal_policy(self) -> typing.Optional[aws_cdk.core.RemovalPolicy]: return self._values.get('removal_policy') def __eq__(self, rhs) -> bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs) -> bool: return not (rhs == self) def __repr__(self) -> str: return 'AliasProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items()) @jsii.implements(aws_cdk.core.IInspectable) class CfnAlias(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-kms.CfnAlias"): def __init__(self, scope: aws_cdk.core.Construct, id: str, *, alias_name: str, target_key_id: str) -> None: props = CfnAliasProps(alias_name=alias_name, target_key_id=target_key_id) jsii.create(CfnAlias, self, [scope, id, props]) @jsii.member(jsii_name="inspect") def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None: return jsii.invoke(self, "inspect", [inspector]) @jsii.member(jsii_name="renderProperties") def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]: return jsii.invoke(self, "renderProperties", [props]) @classproperty @jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME") def CFN_RESOURCE_TYPE_NAME(cls) -> str: return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME") @property @jsii.member(jsii_name="cfnProperties") def _cfn_properties(self) -> typing.Mapping[str,typing.Any]: return jsii.get(self, "cfnProperties") @property @jsii.member(jsii_name="aliasName") def alias_name(self) -> str: return jsii.get(self, "aliasName") @alias_name.setter def alias_name(self, value: str): return jsii.set(self, "aliasName", value) @property @jsii.member(jsii_name="targetKeyId") def target_key_id(self) -> str: return jsii.get(self, "targetKeyId") @target_key_id.setter def target_key_id(self, value: str): return jsii.set(self, "targetKeyId", value) @jsii.data_type(jsii_type="@aws-cdk/aws-kms.CfnAliasProps", jsii_struct_bases=[], name_mapping={'alias_name': 'aliasName', 'target_key_id': 'targetKeyId'}) class CfnAliasProps(): def __init__(self, *, alias_name: str, target_key_id: str): self._values = { 'alias_name': alias_name, 'target_key_id': target_key_id, } @property def alias_name(self) -> str: return self._values.get('alias_name') @property def target_key_id(self) -> str: return self._values.get('target_key_id') def __eq__(self, rhs) -> bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs) -> bool: return not (rhs == self) def __repr__(self) -> str: return 'CfnAliasProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items()) @jsii.implements(aws_cdk.core.IInspectable) class CfnKey(aws_cdk.core.CfnResource, metaclass=jsii.JSIIMeta, jsii_type="@aws-cdk/aws-kms.CfnKey"): def __init__(self, scope: aws_cdk.core.Construct, id: str, *, key_policy: typing.Any, description: typing.Optional[str]=None, enabled: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, enable_key_rotation: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, key_usage: typing.Optional[str]=None, pending_window_in_days: typing.Optional[jsii.Number]=None, tags: typing.Optional[typing.List[aws_cdk.core.CfnTag]]=None) -> None: props = CfnKeyProps(key_policy=key_policy, description=description, enabled=enabled, enable_key_rotation=enable_key_rotation, key_usage=key_usage, pending_window_in_days=pending_window_in_days, tags=tags) jsii.create(CfnKey, self, [scope, id, props]) @jsii.member(jsii_name="inspect") def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None: return jsii.invoke(self, "inspect", [inspector]) @jsii.member(jsii_name="renderProperties") def _render_properties(self, props: typing.Mapping[str,typing.Any]) -> typing.Mapping[str,typing.Any]: return jsii.invoke(self, "renderProperties", [props]) @classproperty @jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME") def CFN_RESOURCE_TYPE_NAME(cls) -> str: return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME") @property @jsii.member(jsii_name="attrArn") def attr_arn(self) -> str: return jsii.get(self, "attrArn") @property @jsii.member(jsii_name="cfnProperties") def _cfn_properties(self) -> typing.Mapping[str,typing.Any]: return jsii.get(self, "cfnProperties") @property @jsii.member(jsii_name="tags") def tags(self) -> aws_cdk.core.TagManager: return jsii.get(self, "tags") @property @jsii.member(jsii_name="keyPolicy") def key_policy(self) -> typing.Any: return jsii.get(self, "keyPolicy") @key_policy.setter def key_policy(self, value: typing.Any): return jsii.set(self, "keyPolicy", value) @property @jsii.member(jsii_name="description") def description(self) -> typing.Optional[str]: return jsii.get(self, "description") @description.setter def description(self, value: typing.Optional[str]): return jsii.set(self, "description", value) @property @jsii.member(jsii_name="enabled") def enabled(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]: return jsii.get(self, "enabled") @enabled.setter def enabled(self, value: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]): return jsii.set(self, "enabled", value) @property @jsii.member(jsii_name="enableKeyRotation") def enable_key_rotation(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]: return jsii.get(self, "enableKeyRotation") @enable_key_rotation.setter def enable_key_rotation(self, value: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]): return jsii.set(self, "enableKeyRotation", value) @property @jsii.member(jsii_name="keyUsage") def key_usage(self) -> typing.Optional[str]: return jsii.get(self, "keyUsage") @key_usage.setter def key_usage(self, value: typing.Optional[str]): return jsii.set(self, "keyUsage", value) @property @jsii.member(jsii_name="pendingWindowInDays") def pending_window_in_days(self) -> typing.Optional[jsii.Number]: return jsii.get(self, "pendingWindowInDays") @pending_window_in_days.setter def pending_window_in_days(self, value: typing.Optional[jsii.Number]): return jsii.set(self, "pendingWindowInDays", value) @jsii.data_type(jsii_type="@aws-cdk/aws-kms.CfnKeyProps", jsii_struct_bases=[], name_mapping={'key_policy': 'keyPolicy', 'description': 'description', 'enabled': 'enabled', 'enable_key_rotation': 'enableKeyRotation', 'key_usage': 'keyUsage', 'pending_window_in_days': 'pendingWindowInDays', 'tags': 'tags'}) class CfnKeyProps(): def __init__(self, *, key_policy: typing.Any, description: typing.Optional[str]=None, enabled: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, enable_key_rotation: typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]=None, key_usage: typing.Optional[str]=None, pending_window_in_days: typing.Optional[jsii.Number]=None, tags: typing.Optional[typing.List[aws_cdk.core.CfnTag]]=None): self._values = { 'key_policy': key_policy, } if description is not None: self._values["description"] = description if enabled is not None: self._values["enabled"] = enabled if enable_key_rotation is not None: self._values["enable_key_rotation"] = enable_key_rotation if key_usage is not None: self._values["key_usage"] = key_usage if pending_window_in_days is not None: self._values["pending_window_in_days"] = pending_window_in_days if tags is not None: self._values["tags"] = tags @property def key_policy(self) -> typing.Any: return self._values.get('key_policy') @property def description(self) -> typing.Optional[str]: return self._values.get('description') @property def enabled(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]: return self._values.get('enabled') @property def enable_key_rotation(self) -> typing.Optional[typing.Union[typing.Optional[bool], typing.Optional[aws_cdk.core.IResolvable]]]: return self._values.get('enable_key_rotation') @property def key_usage(self) -> typing.Optional[str]: return self._values.get('key_usage') @property def pending_window_in_days(self) -> typing.Optional[jsii.Number]: return self._values.get('pending_window_in_days') @property def tags(self) -> typing.Optional[typing.List[aws_cdk.core.CfnTag]]: return self._values.get('tags') def __eq__(self, rhs) -> bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs) -> bool: return not (rhs == self) def __repr__(self) -> str: return 'CfnKeyProps(%s)' % ', '.join(k + '=' + repr(v) for k, v in self._values.items()) @jsii.interface(jsii_type="@aws-cdk/aws-kms.IKey") class IKey(aws_cdk.core.IResource, jsii.compat.Protocol): @staticmethod def __jsii_proxy_class__(): return _IKeyProxy @property @jsii.member(jsii_name="keyArn") def key_arn(self) -> str: ... @property @jsii.member(jsii_name="keyId") def key_id(self) -> str: ... @jsii.member(jsii_name="addAlias") def add_alias(self, alias: str) -> "Alias": ... @jsii.member(jsii_name="addToResourcePolicy") def add_to_resource_policy(self, statement: aws_cdk.aws_iam.PolicyStatement, allow_no_op: typing.Optional[bool]=None) -> None: ... @jsii.member(jsii_name="grant") def grant(self, grantee: aws_cdk.aws_iam.IGrantable, *actions: str) -> aws_cdk.aws_iam.Grant: ... @jsii.member(jsii_name="grantDecrypt") def grant_decrypt(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant: ... @jsii.member(jsii_name="grantEncrypt") def grant_encrypt(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant: ... @jsii.member(jsii_name="grantEncryptDecrypt") def grant_encrypt_decrypt(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant: ... class _IKeyProxy(jsii.proxy_for(aws_cdk.core.IResource)): __jsii_type__ = "@aws-cdk/aws-kms.IKey" @property @jsii.member(jsii_name="keyArn")
Apache License 2.0
yfauser/planespotter
app-server/app/lib/python2.7/site-packages/sqlalchemy/orm/util.py
identity_key
python
def identity_key(*args, **kwargs): if args: row = None largs = len(args) if largs == 1: class_ = args[0] try: row = kwargs.pop("row") except KeyError: ident = kwargs.pop("ident") elif largs in (2, 3): class_, ident = args else: raise sa_exc.ArgumentError( "expected up to three positional arguments, " "got %s" % largs) identity_token = kwargs.pop("identity_token", None) if kwargs: raise sa_exc.ArgumentError("unknown keyword arguments: %s" % ", ".join(kwargs)) mapper = class_mapper(class_) if row is None: return mapper.identity_key_from_primary_key( util.to_list(ident), identity_token=identity_token) else: return mapper.identity_key_from_row( row, identity_token=identity_token) else: instance = kwargs.pop("instance") if kwargs: raise sa_exc.ArgumentError("unknown keyword arguments: %s" % ", ".join(kwargs.keys)) mapper = object_mapper(instance) return mapper.identity_key_from_instance(instance)
Generate "identity key" tuples, as are used as keys in the :attr:`.Session.identity_map` dictionary. This function has several call styles: * ``identity_key(class, ident, identity_token=token)`` This form receives a mapped class and a primary key scalar or tuple as an argument. E.g.:: >>> identity_key(MyClass, (1, 2)) (<class '__main__.MyClass'>, (1, 2), None) :param class: mapped class (must be a positional argument) :param ident: primary key, may be a scalar or tuple argument. ;param identity_token: optional identity token .. versionadded:: 1.2 added identity_token * ``identity_key(instance=instance)`` This form will produce the identity key for a given instance. The instance need not be persistent, only that its primary key attributes are populated (else the key will contain ``None`` for those missing values). E.g.:: >>> instance = MyClass(1, 2) >>> identity_key(instance=instance) (<class '__main__.MyClass'>, (1, 2), None) In this form, the given instance is ultimately run though :meth:`.Mapper.identity_key_from_instance`, which will have the effect of performing a database check for the corresponding row if the object is expired. :param instance: object instance (must be given as a keyword arg) * ``identity_key(class, row=row, identity_token=token)`` This form is similar to the class/tuple form, except is passed a database result row as a :class:`.RowProxy` object. E.g.:: >>> row = engine.execute("select * from table where a=1 and b=2").\ first() >>> identity_key(MyClass, row=row) (<class '__main__.MyClass'>, (1, 2), None) :param class: mapped class (must be a positional argument) :param row: :class:`.RowProxy` row returned by a :class:`.ResultProxy` (must be given as a keyword arg) ;param identity_token: optional identity token .. versionadded:: 1.2 added identity_token
https://github.com/yfauser/planespotter/blob/d400216502b6b5592a4889eb9fa277b2ddb75f9b/app-server/app/lib/python2.7/site-packages/sqlalchemy/orm/util.py#L211-L307
from .. import sql, util, event, exc as sa_exc, inspection from ..sql import expression, util as sql_util, operators from .interfaces import PropComparator, MapperProperty from . import attributes import re from .base import instance_str, state_str, state_class_str, attribute_str, state_attribute_str, object_mapper, object_state, _none_set, _never_set from .base import class_mapper, _class_to_mapper from .base import InspectionAttr from .path_registry import PathRegistry all_cascades = frozenset(("delete", "delete-orphan", "all", "merge", "expunge", "save-update", "refresh-expire", "none")) class CascadeOptions(frozenset): _add_w_all_cascades = all_cascades.difference([ 'all', 'none', 'delete-orphan']) _allowed_cascades = all_cascades __slots__ = ( 'save_update', 'delete', 'refresh_expire', 'merge', 'expunge', 'delete_orphan') def __new__(cls, value_list): if isinstance(value_list, util.string_types) or value_list is None: return cls.from_string(value_list) values = set(value_list) if values.difference(cls._allowed_cascades): raise sa_exc.ArgumentError( "Invalid cascade option(s): %s" % ", ".join([repr(x) for x in sorted(values.difference(cls._allowed_cascades))])) if "all" in values: values.update(cls._add_w_all_cascades) if "none" in values: values.clear() values.discard('all') self = frozenset.__new__(CascadeOptions, values) self.save_update = 'save-update' in values self.delete = 'delete' in values self.refresh_expire = 'refresh-expire' in values self.merge = 'merge' in values self.expunge = 'expunge' in values self.delete_orphan = "delete-orphan" in values if self.delete_orphan and not self.delete: util.warn("The 'delete-orphan' cascade " "option requires 'delete'.") return self def __repr__(self): return "CascadeOptions(%r)" % ( ",".join([x for x in sorted(self)]) ) @classmethod def from_string(cls, arg): values = [ c for c in re.split(r'\s*,\s*', arg or "") if c ] return cls(values) def _validator_events( desc, key, validator, include_removes, include_backrefs): if not include_backrefs: def detect_is_backref(state, initiator): impl = state.manager[key].impl return initiator.impl is not impl if include_removes: def append(state, value, initiator): if ( initiator.op is not attributes.OP_BULK_REPLACE and (include_backrefs or not detect_is_backref(state, initiator)) ): return validator(state.obj(), key, value, False) else: return value def bulk_set(state, values, initiator): if include_backrefs or not detect_is_backref(state, initiator): obj = state.obj() values[:] = [ validator(obj, key, value, False) for value in values] def set_(state, value, oldvalue, initiator): if include_backrefs or not detect_is_backref(state, initiator): return validator(state.obj(), key, value, False) else: return value def remove(state, value, initiator): if include_backrefs or not detect_is_backref(state, initiator): validator(state.obj(), key, value, True) else: def append(state, value, initiator): if ( initiator.op is not attributes.OP_BULK_REPLACE and (include_backrefs or not detect_is_backref(state, initiator)) ): return validator(state.obj(), key, value) else: return value def bulk_set(state, values, initiator): if include_backrefs or not detect_is_backref(state, initiator): obj = state.obj() values[:] = [ validator(obj, key, value) for value in values] def set_(state, value, oldvalue, initiator): if include_backrefs or not detect_is_backref(state, initiator): return validator(state.obj(), key, value) else: return value event.listen(desc, 'append', append, raw=True, retval=True) event.listen(desc, 'bulk_replace', bulk_set, raw=True) event.listen(desc, 'set', set_, raw=True, retval=True) if include_removes: event.listen(desc, "remove", remove, raw=True, retval=True) def polymorphic_union(table_map, typecolname, aliasname='p_union', cast_nulls=True): colnames = util.OrderedSet() colnamemaps = {} types = {} for key in table_map: table = table_map[key] if isinstance(table, sql.Select): table = table.alias() table_map[key] = table m = {} for c in table.c: colnames.add(c.key) m[c.key] = c types[c.key] = c.type colnamemaps[table] = m def col(name, table): try: return colnamemaps[table][name] except KeyError: if cast_nulls: return sql.cast(sql.null(), types[name]).label(name) else: return sql.type_coerce(sql.null(), types[name]).label(name) result = [] for type, table in table_map.items(): if typecolname is not None: result.append( sql.select([col(name, table) for name in colnames] + [sql.literal_column( sql_util._quote_ddl_expr(type)). label(typecolname)], from_obj=[table])) else: result.append(sql.select([col(name, table) for name in colnames], from_obj=[table])) return sql.union_all(*result).alias(aliasname)
MIT License
vlsida/openram
compiler/modules/hierarchical_predecode.py
hierarchical_predecode.create_input_inverters
python
def create_input_inverters(self): self.inv_inst = [] for inv_num in range(self.number_of_inputs): name = "pre_inv_{0}".format(inv_num) self.inv_inst.append(self.add_inst(name=name, mod=self.inv)) self.connect_inst(["in_{0}".format(inv_num), "inbar_{0}".format(inv_num), "vdd", "gnd"])
Create the input inverters to invert input signals for the decode stage.
https://github.com/vlsida/openram/blob/f66aac3264598eeae31225c62b6a4af52412d407/compiler/modules/hierarchical_predecode.py#L138-L147
import debug import design import math from vector import vector from sram_factory import factory from globals import OPTS from tech import layer_properties as layer_props from tech import layer_indices from tech import layer_stacks from tech import preferred_directions from tech import drc class hierarchical_predecode(design.design): def __init__(self, name, input_number, column_decoder=False, height=None): self.number_of_inputs = input_number b = factory.create(module_type=OPTS.bitcell) if not height: self.cell_height = b.height else: self.cell_height = height self.column_decoder = column_decoder self.input_and_rail_pos = [] self.number_of_outputs = int(math.pow(2, self.number_of_inputs)) super().__init__(name) def add_pins(self): for k in range(self.number_of_inputs): self.add_pin("in_{0}".format(k), "INPUT") for i in range(self.number_of_outputs): self.add_pin("out_{0}".format(i), "OUTPUT") self.add_pin("vdd", "POWER") self.add_pin("gnd", "GROUND") def add_modules(self): debug.check(self.number_of_inputs <= 4, "Invalid number of predecode inputs: {}".format(self.number_of_inputs)) if self.column_decoder: and_type = "pand{}".format(self.number_of_inputs) inv_type = "pinv" else: and_type = "and{}_dec".format(self.number_of_inputs) inv_type = "inv_dec" self.and_mod = factory.create(module_type=and_type, height=self.cell_height) self.add_mod(self.and_mod) self.inv = factory.create(module_type=inv_type, height=self.cell_height, size=1) self.add_mod(self.inv) def create_layout(self): self.setup_layout_constraints() self.route_rails() self.place_input_inverters() self.place_and_array() self.route() self.add_boundary() self.DRC_LVS() def setup_layout_constraints(self): self.bus_layer = layer_props.hierarchical_predecode.bus_layer self.bus_directions = layer_props.hierarchical_predecode.bus_directions if self.column_decoder: self.bus_pitch = self.m3_pitch self.bus_space = self.m3_space else: self.bus_pitch = getattr(self, self.bus_layer + "_pitch") self.bus_space = getattr(self, self.bus_layer + "_space") self.bus_space = layer_props.hierarchical_predecode.bus_space_factor * self.bus_space self.input_layer = layer_props.hierarchical_predecode.input_layer self.output_layer = layer_props.hierarchical_predecode.output_layer self.output_layer_pitch = getattr(self, self.output_layer + "_pitch") self.height = self.number_of_outputs * self.and_mod.height self.x_off_inv_1 = (self.number_of_inputs + 1) * self.bus_pitch + self.bus_pitch self.x_off_and = self.x_off_inv_1 + self.inv.width + (2 * self.number_of_inputs + 2) * self.bus_pitch self.width = self.x_off_and + self.and_mod.width def route_rails(self): input_names = ["in_{}".format(x) for x in range(self.number_of_inputs)] offset = vector(self.bus_pitch, self.bus_pitch) self.input_rails = self.create_vertical_bus(layer=self.bus_layer, offset=offset, names=input_names, length=self.height - self.bus_pitch, pitch=self.bus_pitch) invert_names = ["Abar_{}".format(x) for x in range(self.number_of_inputs)] non_invert_names = ["A_{}".format(x) for x in range(self.number_of_inputs)] decode_names = invert_names + non_invert_names offset = vector(self.x_off_inv_1 + self.inv.width + self.bus_pitch, self.bus_pitch) self.decode_rails = self.create_vertical_bus(layer=self.bus_layer, offset=offset, names=decode_names, length=self.height - self.bus_pitch, pitch=self.bus_pitch)
BSD 3-Clause New or Revised License
ucam-smt/sgnmt
cam/sgnmt/predictors/parse.py
ParsePredictor.get_unk_probability
python
def get_unk_probability(self, posterior): return self.predictor.get_unk_probability(posterior)
Return unk probability as determined by slave predictor Returns: float, unk prob
https://github.com/ucam-smt/sgnmt/blob/c663ec7b251552e36b6b4f992f0ac21aad87cb7b/cam/sgnmt/predictors/parse.py#L83-L88
import copy import logging from cam.sgnmt import utils from cam.sgnmt.predictors.core import Predictor import numpy as np import collections def load_external_ids(path): logging.info('Loading ids from file {}'.format(path)) with open(path) as f: ids = [int(line.strip()) for line in f] return set(ids) class InternalHypo(object): def __init__(self, score, token_score, predictor_state, word_to_consume): self.score = score self.predictor_state = predictor_state self.word_to_consume = word_to_consume self.norm_score = score self.token_score = token_score self.beam_len = 1 def extend(self, score, predictor_state, word_to_consume): self.score += score self.predictor_state = predictor_state self.word_to_consume = word_to_consume self.beam_len += 1 class ParsePredictor(Predictor): def __init__(self, slave_predictor, normalize_scores=True, beam_size=4, max_internal_len=35, nonterminal_ids=None): super(ParsePredictor, self).__init__() self.predictor = slave_predictor self.normalize_scores = normalize_scores self.beam_size = beam_size self.max_internal_len = max_internal_len self.nonterminals = load_external_ids(nonterminal_ids) self.nonterminals.discard(utils.EOS_ID) self.nonterminals.discard(utils.UNK_ID) self.tok_to_hypo = {}
Apache License 2.0
rapid7/vm-console-client-python
rapid7vmconsole/models/service.py
Service.__init__
python
def __init__(self, configurations=None, databases=None, family=None, links=None, name=None, port=None, product=None, protocol=None, user_groups=None, users=None, vendor=None, version=None, web_applications=None): self._configurations = None self._databases = None self._family = None self._links = None self._name = None self._port = None self._product = None self._protocol = None self._user_groups = None self._users = None self._vendor = None self._version = None self._web_applications = None self.discriminator = None if configurations is not None: self.configurations = configurations if databases is not None: self.databases = databases if family is not None: self.family = family if links is not None: self.links = links if name is not None: self.name = name self.port = port if product is not None: self.product = product self.protocol = protocol if user_groups is not None: self.user_groups = user_groups if users is not None: self.users = users if vendor is not None: self.vendor = vendor if version is not None: self.version = version if web_applications is not None: self.web_applications = web_applications
Service - a model defined in Swagger
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/service.py#L63-L104
import pprint import re import six class Service(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'configurations': 'list[Configuration]', 'databases': 'list[Database]', 'family': 'str', 'links': 'list[Link]', 'name': 'str', 'port': 'int', 'product': 'str', 'protocol': 'str', 'user_groups': 'list[GroupAccount]', 'users': 'list[UserAccount]', 'vendor': 'str', 'version': 'str', 'web_applications': 'list[WebApplication]' } attribute_map = { 'configurations': 'configurations', 'databases': 'databases', 'family': 'family', 'links': 'links', 'name': 'name', 'port': 'port', 'product': 'product', 'protocol': 'protocol', 'user_groups': 'userGroups', 'users': 'users', 'vendor': 'vendor', 'version': 'version', 'web_applications': 'webApplications' }
MIT License
hazyresearch/fonduer
src/fonduer/utils/visualizer.py
Visualizer.display_words
python
def display_words( self, sentences: List[Sentence], target: Optional[str] = None, pdf_file: Optional[str] = None, ) -> DisplayHandle: if not pdf_file: pdf_file = os.path.join(self.pdf_path, sentences[0].document.name + ".pdf") boxes = [] for sentence in sentences: for i, word in enumerate(sentence.words): if target is None or word == target: boxes.append( Bbox( sentence.page[i], sentence.top[i], sentence.bottom[i], sentence.left[i], sentence.right[i], ) ) imgs = self.display_boxes(pdf_file, boxes) return display(*imgs)
Display the bounding boxes of words. Display the bounding boxes corresponding to words on the pdf.
https://github.com/hazyresearch/fonduer/blob/c9fd6b91998cd708ab95aeee3dfaf47b9e549ffd/src/fonduer/utils/visualizer.py#L93-L119
import logging import os import subprocess import warnings from builtins import object from collections import defaultdict from typing import DefaultDict, List, Optional, Tuple from bs4 import BeautifulSoup from IPython.display import DisplayHandle, display from wand.color import Color from wand.drawing import Drawing from wand.image import Image from fonduer.candidates.models import Candidate, SpanMention from fonduer.parser.models import Sentence from fonduer.utils.utils_visual import Bbox logger = logging.getLogger(__name__) class Visualizer(object): def __init__(self, pdf_path: str) -> None: self.pdf_path = pdf_path def display_boxes( self, pdf_file: str, boxes: List[Bbox], alternate_colors: bool = False, ) -> List[Image]: imgs = [] with Color("blue") as blue, Color("red") as red, Color( "rgba(0, 0, 0, 0.0)" ) as transparent: colors = [blue, red] boxes_per_page: DefaultDict[int, int] = defaultdict(int) boxes_by_page: DefaultDict[ int, List[Tuple[int, int, int, int]] ] = defaultdict(list) for i, (page, top, bottom, left, right) in enumerate(boxes): boxes_per_page[page] += 1 boxes_by_page[page].append((top, bottom, left, right)) for i, page_num in enumerate(boxes_per_page.keys()): with Drawing() as draw: img = pdf_to_img(pdf_file, page_num) draw.fill_color = transparent for j, (top, bottom, left, right) in enumerate( boxes_by_page[page_num] ): draw.stroke_color = ( colors[j % 2] if alternate_colors else colors[0] ) draw.rectangle(left=left, top=top, right=right, bottom=bottom) draw(img) imgs.append(img) return imgs def display_candidates( self, candidates: List[Candidate], pdf_file: Optional[str] = None ) -> DisplayHandle: if not pdf_file: pdf_file = os.path.join(self.pdf_path, candidates[0].document.name) if os.path.isfile(pdf_file + ".pdf"): pdf_file += ".pdf" elif os.path.isfile(pdf_file + ".PDF"): pdf_file += ".PDF" else: logger.error("display_candidates failed: pdf file missing.") boxes = [m.context.get_bbox() for c in candidates for m in c.get_mentions()] imgs = self.display_boxes(pdf_file, boxes, alternate_colors=True) return display(*imgs)
MIT License
biolink/ontobio
ontobio/io/assocwriter.py
AssocWriter.write_assoc
python
def write_assoc(self, assoc): vals = self.as_tsv(assoc) if vals != []: self._write_row(vals)
Write a single association to a line in the output file
https://github.com/biolink/ontobio/blob/da9c5ff912785ee4ab98a8a39585562ecd2bdef5/ontobio/io/assocwriter.py#L83-L90
import re import datetime import json import logging from typing import List, Union from ontobio import ecomap from ontobio.io import parser_version_regex from ontobio.model import association logger = logging.getLogger(__name__) external_taxon = re.compile("taxon:([0-9]+)") internal_taxon = re.compile("NCBITaxon:([0-9]+)") def _str(v): if v is None: return "" else: return str(v) class AssocWriterConfig(): pass class AssocWriter(): def _split_prefix(self, ref): id = ref['id'] [prefix, local_id] = id.split(':', maxsplit=1) return prefix, local_id def _write_row(self, vals): line = self.tsv_as_string(vals) if self.file: self.file.write(line+"\n") else: print(line) def tsv_as_string(self, vals) -> str: return "\t".join([_str(v) for v in vals]) def _write(self, line): if self.file: self.file.write(line) else: print(line) def normalize_taxon(self, taxon): global internal_taxon global external_taxon if taxon == None: return "" if external_taxon.match(taxon): return internal_taxon match = internal_taxon.match(taxon) if match: taxon_id = match.group(1) return "taxon:{num}".format(num=taxon_id) return taxon def as_tsv(self, assoc: Union[association.GoAssociation, dict]) -> List[str]: pass
BSD 3-Clause New or Revised License
michaelgale/cq-kit
cqkit/cq_files.py
StepFileExporter._find_header_tokens
python
def _find_header_tokens(self): with open(self.filename, "r") as fp: self._flines = fp.readlines() self._filemap = {} for i, line in enumerate(self._flines, 1): t = LineToken.get_header_token(line) if t is not None: self._filemap[t] = i if t == LineToken.DATA: break
fill a local dictionary with line locations of important file tokens
https://github.com/michaelgale/cq-kit/blob/e44b54d75e2687fa29cf1ee0f181008521befc3c/cqkit/cq_files.py#L329-L339
import datetime import decimal import os import os.path from datetime import date, datetime, time from enum import Enum import pyparsing import cadquery as cq try: from OCC.Core.STEPControl import ( STEPControl_Writer, STEPControl_AsIs, STEPControl_ManifoldSolidBrep, ) from OCC.Core.IGESControl import * from OCC.Core.Interface import * from OCC.Extend.DataExchange import * from OCC.Core.BRepMesh import BRepMesh_IncrementalMesh from OCC.Core.StlAPI import StlAPI_Writer OCCT_VERSION = "6.9" except: from OCP.STEPControl import ( STEPControl_Writer, STEPControl_AsIs, STEPControl_ManifoldSolidBrep, ) from OCP.IGESControl import * from OCP.Interface import * from OCP.BRepMesh import BRepMesh_IncrementalMesh from OCP.StlAPI import StlAPI_Writer Interface_Static_SetIVal = Interface_Static.SetIVal_s Interface_Static_SetCVal = Interface_Static.SetCVal_s OCCT_VERSION = "7.4" class suppress_stdout_stderr(object): def __init__(self): self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)] self.save_fds = [os.dup(1), os.dup(2)] def __enter__(self): os.dup2(self.null_fds[0], 1) os.dup2(self.null_fds[1], 2) def __exit__(self, *_): os.dup2(self.save_fds[0], 1) os.dup2(self.save_fds[1], 2) for fd in self.null_fds + self.save_fds: os.close(fd) def better_float_str(x, tolerance=12, pre_strip=True): if pre_strip: xs = x.replace("(", "").replace(")", "").replace(";", "") else: xs = x ns = str(decimal.Decimal(xs.strip()).quantize(decimal.Decimal(10) ** -tolerance)) estr = "0E-%d" % tolerance ns = ns.replace(estr, "0.") if "E" not in ns: ns = ns.rstrip("0") return ns def replace_delimited_floats(x, token, subtoken, tolerance): xt0 = x.split(token) ls = [] for xs in xt0: xt1 = xs.split(subtoken) for s in xt1: if "." in s: try: ns = better_float_str(s, tolerance=tolerance, pre_strip=False) ls.append(ns) ls.append(subtoken) continue except: pass ls.append(s) ls.append(subtoken) ls.pop(-1) ls.append(token) ls.pop(-1) return "".join(ls) def better_float_line(x, tolerance): s = replace_delimited_floats(x, "(", ",", tolerance=tolerance) s = replace_delimited_floats(s, ")", ",", tolerance=tolerance) return s class LineToken(Enum): PRODUCT = 1 CARTESIAN_POINT = 2 DIRECTION = 3 HEADER = 4 ENDSEC = 5 DATA = 6 FILE_NAME = 7 FILE_DESCRIPTION = 8 FILE_SCHEMA = 9 def __str__(self): return self.name @classmethod def get_line_token(cls, line): for name, member in LineToken.__members__.items(): if line[: len(name)].upper() == name: return member if str(name + "(") in line: return member return None @classmethod def get_header_token(cls, line): for name, member in LineToken.__members__.items(): if line[: len(name)].upper() == name: if member.value >= LineToken.HEADER.value: return member return None @classmethod def get_data_token(cls, line): for name, member in LineToken.__members__.items(): if line[: len(name)].upper() == name: if member.value < LineToken.HEADER.value: return member if str(name + "(") in line: if member.value < LineToken.HEADER.value: return member return None class StepFileExporter: def __init__(self, shape=None, filename=None, title=None, **kwargs): if shape is None: raise ValueError("StepFileExporter requires a valid CQ shape object") if filename is None: raise ValueError( "StepFileExporter requires a file path/name for exported object" ) self.shape = shape self.filename = filename _, self.tail = os.path.split(self.filename) if title is not None: self.title = title else: self.title = self.tail if len(self.tail) > 5: if self.tail.upper().endswith(".STEP"): self.title = self.tail[:-5] self.tolerance = 12 self.write_pcurves = False self.precision_mode = 1 self.add_meta_data = True self.metadata = { "author": "", "email": "", "organization": "", "preprocessor": "Open CASCADE STEP processor %s" % (OCCT_VERSION), "origin": "python-cadquery", "authorization": "", } self._filemap = {} self._flines = [] def export(self): writer = STEPControl_Writer() pcurves = 1 if self.write_pcurves else 0 Interface_Static_SetIVal("write.surfacecurve.mode", pcurves) Interface_Static_SetIVal("write.precision.mode", self.precision_mode) with suppress_stdout_stderr(): writer.Transfer(self.shape.val().wrapped, STEPControl_AsIs) writer.Write(self.filename) if self.add_meta_data: self._final_export()
MIT License
chmln/sublime-text-theme-switcher-menu
theme_switcher.py
menu_cache_path
python
def menu_cache_path(): return os.path.join(sublime.cache_path(), __package__)
Return absolute path for plugin's main menu cache dir.
https://github.com/chmln/sublime-text-theme-switcher-menu/blob/b32e5cb03bd9206733a044c313e5af2292b96dcb/theme_switcher.py#L9-L11
import sublime import sublime_plugin import os _HAVE_ST_UI = int(sublime.version()) >= 3127
ISC License
aws-samples/aws-device-farm-appium-python-tests-for-android-sample-app
tests/pages/web_page.py
WebPage.go_to_url
python
def go_to_url(self, url): nav_bar = self.driver.find_element_by_android_uiautomator(self.NAV_BAR_SELECTOR) sleep(self.KEYBOARD_ANIMATION_DELAY) nav_bar.send_keys(url + '\n') sleep(self.WEBSITE_LOAD_TIME)
Inputs url and presses enter.
https://github.com/aws-samples/aws-device-farm-appium-python-tests-for-android-sample-app/blob/86182ec2fae531f7376fc4b7261529700d67eb0f/tests/pages/web_page.py#L33-L38
from time import sleep from selenium.common.exceptions import NoSuchElementException from tests.pages.base_pages.base_page import BasePage class WebPage(BasePage): NAV_BAR_SELECTOR = 'new UiSelector().textContains("http://www.amazon.com")' FOCUSED_WEB_VIEW_SELECTOR = 'new UiSelector().focused(true).descriptionContains("aws")' KEYBOARD_ANIMATION_DELAY = 1 WEBSITE_LOAD_TIME = 7 def tap_screen_center(self): window_size = self.driver.get_window_size() mid_x = window_size['width'] / 2 mid_y = window_size['height'] / 2 self.driver.tap([(mid_x, mid_y)])
Apache License 2.0
commvault/cvpysdk
cvpysdk/identity_management.py
IdentityManagementApp.app_key
python
def app_key(self): return self._app_key
Treats the app key as a read-only attribute.
https://github.com/commvault/cvpysdk/blob/66df30e6e31d619812b7756cb4f7e130b220a08f/cvpysdk/identity_management.py#L663-L665
import time from past.builtins import basestring from .exception import SDKException class IdentityManagementApps(object): def __init__(self, commcell_object): self._commcell_object = commcell_object self._cvpysdk_object = commcell_object._cvpysdk_object self._update_response_ = commcell_object._update_response_ self._APPS = commcell_object._services['IDENTITY_APPS'] self._apps = None self.refresh() def __str__(self): representation_string = "{:^5}\t{:^50}\n\n".format('S. No.', 'App') for index, app in enumerate(self._apps): sub_str = '{:^5}\t{:30}\n'.format(index + 1, app) representation_string += sub_str return representation_string.strip() def __repr__(self): return "IdentityManagementApps class instance for Commcell: '{0}'".format( self._commcell_object.commserv_name ) def __len__(self): return len(self.all_apps) def _get_apps(self): flag, response = self._cvpysdk_object.make_request( 'GET', self._APPS ) if flag: apps = {} if response.json() and 'clientThirdPartyApps' in response.json(): response_value = response.json()['clientThirdPartyApps'] for app in response_value: apps[app['appName'].lower()] = { 'appKey': app['appKey'], 'appType': app['appType'], 'appDescription': app['appDescription'], 'flags': app['flags'], 'isEnabled': app['isEnabled'] } return apps else: response_string = self._update_response_(response.text) raise SDKException('Response', '101', response_string) def get(self, app_name): if not isinstance(app_name, basestring): raise SDKException('IdentityManagement', '101') else: app_name = app_name.lower() if self.has_identity_app(app_name): return IdentityManagementApp( self._commcell_object, app_name, self._apps[app_name] ) raise SDKException('IdentityManagement', '102') @property def get_local_identity_app(self): if self._apps: for app in self._apps: if self._apps[app]['appType'] == 4: return self.get(app) @property def get_commcell_identity_apps(self): commcell_apps = [] if self._apps: for app in self._apps: if self._apps[app]['appType'] == 3: commcell_apps.append(self.get(app)) return commcell_apps @property def all_apps(self): return self._apps def delete_identity_app(self, app_name): draft_json = self._apps.get(app_name) if draft_json: req_json = { 'opType': 2, 'clientThirdPartyApps': [ draft_json ] } else: raise SDKException('IdentityManagement', '102') flag, response = self._cvpysdk_object.make_request( 'POST', self._APPS, req_json ) if flag: if response.json() and 'error' in response.json(): if response.json()['error']['errorCode'] == 0: self.refresh() else: raise SDKException( 'Response', '101', response.json()['error']['warningMessage'] ) else: response_string = self._update_response_(response.text) raise SDKException('Response', '101', response_string) def configure_local_identity_app(self, user_list=None): third_party_json = { 'opType': 1, 'clientThirdPartyApps': [ { 'appType': 4, 'isEnabled': True, 'assocTree': [ { 'userId': self._commcell_object.users.all_users[user_name], '_type_': 13 } for user_name in user_list ] } ] } flag, response = self._cvpysdk_object.make_request( 'POST', self._APPS, third_party_json ) if flag: if response.json() and 'error' in response.json(): if response.json()['error']['errorCode'] == 0: self.refresh() return self.get_local_identity_app else: raise SDKException( 'IdentityManagement', '103', ' - error {0}'.format(response.json()['error']['errorString']) ) else: response_string = self._update_response_(response.text) raise SDKException('', '101', response_string) def configure_commcell_app(self, idp_props, app_name, app_display_name, app_description='', user_assoc_list=None, user_mappings=None): third_party_json = { 'opType': 1, 'clientThirdPartyApps': [ { 'appName': app_name, 'appDisplayName': app_display_name, 'appDescription': app_description, 'flags': 0, 'appType': 3, 'isEnabled': True, 'UserMappings': { 'opType': 2, 'userslist': [ { 'userfromToken': spuser, "localuser": { "userId": self._commcell_object.users.all_users[ user_mappings[spuser] ] } } for spuser in user_mappings ] }, 'props': { 'nameValues': idp_props }, 'assocTree': [ { 'userId': self._commcell_object.users.all_users[user_name], '_type_': 13 } for user_name in user_assoc_list ] } ] } flag, response = self._cvpysdk_object.make_request( 'POST', self._APPS, third_party_json ) if flag: if response.json() and 'error' in response.json(): if response.json()['error']['errorCode'] == 0: self.refresh() return self.get_commcell_identity_apps else: raise SDKException( 'IdentityManagement', '103', ' - error {0}'.format(response.json()['error']['errorString']) ) else: response_string = self._update_response_(response.text) raise SDKException('', '101', response_string) def configure_openid_app(self, appname, props, user_to_be_added): third_party_json = { "App_SetClientThirdPartyAppPropReq":{ "opType": 1, "clientThirdPartyApps": [ { "appName": appname, "flags": 0, "appType": 5, "isEnabled": 1, "props": { "nameValues": props }, "assocTree": [ { "_type_": 13, "userName": user_name } for user_name in user_to_be_added ] } ] } } response_json = self._commcell_object.qoperation_execute(third_party_json) if response_json.get('errorCode', 0) != 0: raise SDKException( 'IdentityManagement', '103', 'Error: "{}"'.format(response_json['errorMessage']) ) else: self.refresh() def has_identity_app(self, app_name): if not isinstance(app_name, basestring): raise SDKException('IdentityManagement', '102') return self._apps and app_name.lower() in self._apps def refresh(self): self._apps = self._get_apps() class IdentityManagementApp(object): def __init__(self, commcell_object, app_name, app_dict=None): self._commcell_object = commcell_object self._cvpysdk_object = commcell_object._cvpysdk_object self._update_response_ = commcell_object._update_response_ self._app_name = app_name self._app_description = None self._flags = None self._app_type = None self._app_type_dict = { 1: 'Regular', 2: 'SAML', 3: 'CommCell', 4: 'Local Identity', 5: 'OpenId Connect' } self._is_enabled = None self._app_displayname = None self._app_dict = app_dict if app_dict: self._app_key = app_dict['appKey'] else: self._app_key = self._get_app_key() self._APPS = commcell_object._services['IDENTITY_APPS'] self.refresh() def __repr__(self): representation_string = 'IdentityManagementApp class instance for app: \ "{0}", of Commcell: "{1}"' return representation_string.format( self._app_name, self._commcell_object.commserv_name ) def _get_app_key(self): apps = IdentityManagementApps(self._commcell_object) return apps.get(self.app_name).app_key def _get_app_details(self): if self._app_dict: return self._app_dict flag, response = self._cvpysdk_object.make_request( 'GET', self._APPS ) if flag: if response.json() and 'clientThirdPartyApps' in response.json(): response_value = response.json()['clientThirdPartyApps'] for app in response_value: if app['appKey'] == self._app_key: self._app_description = app.get('appDescription') self._flags = app.get('flags') self._app_type = self._app_type_dict[app.get('appType')] self._is_enabled = app.get('isEnabled') return app else: raise SDKException('IdentityManagement', '101') else: response_string = self._update_response_(response.text) raise SDKException('Response', '101', response_string) def get_app_props(self): req_xml = """<App_GetClientThirdPartyAppPropReq propLevel='30'> <appKeys val='{0}'/> </App_GetClientThirdPartyAppPropReq>""".format(self.app_key) response = self._commcell_object._qoperation_execute(req_xml) if 'clientThirdPartyApps' in response: return response['clientThirdPartyApps'][0]['props']['nameValues'] else: raise SDKException('IdentityManagement', '102') def refresh(self): self._properties = self._get_app_details() @property def app_name(self): return self._app_name @property
Apache License 2.0
klavinslab/coral
coral/reaction/_central_dogma.py
coding_sequence
python
def coding_sequence(rna): if isinstance(rna, coral.DNA): rna = transcribe(rna) codons_left = len(rna) // 3 start_codon = coral.RNA('aug') stop_codons = [coral.RNA('uag'), coral.RNA('uga'), coral.RNA('uaa')] start = None stop = None valid = [None, None] index = 0 while codons_left: codon = rna[index:index + 3] if valid[0] is None: if codon in start_codon: start = index valid[0] = True else: if codon in stop_codons: stop = index + 3 valid[1] = True break index += 3 codons_left -= 1 if valid[0] is None: raise ValueError('Sequence has no start codon.') elif stop is None: raise ValueError('Sequence has no stop codon.') coding_rna = rna[start:stop] return coding_rna
Extract coding sequence from an RNA template. :param seq: Sequence from which to extract a coding sequence. :type seq: coral.RNA :param material: Type of sequence ('dna' or 'rna') :type material: str :returns: The first coding sequence (start codon -> stop codon) matched from 5' to 3'. :rtype: coral.RNA :raises: ValueError if rna argument has no start codon. ValueError if rna argument has no stop codon in-frame with the first start codon.
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/reaction/_central_dogma.py#L42-L86
import coral from . import utils def transcribe(dna): return utils.convert_sequence(dna, 'rna') def translate(rna): return utils.convert_sequence(rna, 'peptide') def reverse_transcribe(rna): return utils.convert_sequence(rna, 'dna')
MIT License
arangodb-community/pyarango
pyArango/connection.py
Connection.getEndpointURL
python
def getEndpointURL(self): if self.loadBalancing == "round-robin": url = self.arangoURL[self.currentURLId] self.currentURLId = (self.currentURLId + 1) % len(self.arangoURL) return url elif self.loadBalancing == "random": import random return random.choice(self.arangoURL)
return an endpoint url applying load balacing strategy
https://github.com/arangodb-community/pyarango/blob/db758bf6ffab47fee02bec3f960f87065b28bc33/pyArango/connection.py#L217-L225
import uuid import json as json_mod from datetime import datetime import requests import base64 import tempfile import shutil from .action import ConnectionAction from .database import Database, DBHandle from .theExceptions import CreationError, ConnectionError from .users import Users from .ca_certificate import CA_Certificate class JsonHook(object): def __init__(self, ret): self.ret = ret self.ret.json_originalFct = self.ret.json def __call__(self, *args, **kwargs): try: return self.ret.json_originalFct(*args, **kwargs) except Exception as e: print( "Unable to get json for request: %s. Content: %s" % (self.ret.url, self.ret.content) ) raise e class AikidoSession(object): class Holder(object): def __init__(self, fct, auth, max_conflict_retries=5, verify=True): self.fct = fct self.auth = auth self.max_conflict_retries = max_conflict_retries if not isinstance(verify, bool) and not isinstance(verify, CA_Certificate) and not not isinstance(verify, str) : raise ValueError("'verify' argument can only be of type: bool, CA_Certificate or str ") self.verify = verify def __call__(self, *args, **kwargs): if self.auth: kwargs["auth"] = self.auth if isinstance(self.verify, CA_Certificate): kwargs["verify"] = self.verify.get_file_path() else : kwargs["verify"] = self.verify try: status_code = 1200 retry = 0 while status_code == 1200 and retry < self.max_conflict_retries : ret = self.fct(*args, **kwargs) status_code = ret.status_code retry += 1 except: print ("===\nUnable to establish connection, perhaps arango is not running.\n===") raise if len(ret.content) < 1: raise ConnectionError("Empty server response", ret.url, ret.status_code, ret.content) elif ret.status_code == 401: raise ConnectionError("Unauthorized access, you must supply a (username, password) with the correct credentials", ret.url, ret.status_code, ret.content) ret.json = JsonHook(ret) return ret def __init__(self, username, password, verify=True, max_conflict_retries=5, max_retries=5, single_session=True, log_requests=False): if username: self.auth = (username, password) else: self.auth = None self.verify = verify self.max_retries = max_retries self.log_requests = log_requests self.max_conflict_retries = max_conflict_retries self.session = None if single_session: self.session = self._make_session() if log_requests: self.log = {} self.log["nb_request"] = 0 self.log["requests"] = {} def _make_session(self): session = requests.Session() http = requests.adapters.HTTPAdapter(max_retries=self.max_retries) https = requests.adapters.HTTPAdapter(max_retries=self.max_retries) session.mount('http://', http) session.mount('https://', https) return session def __getattr__(self, request_function_name): if self.session is not None: session = self.session else : session = self._make_session() try: request_function = getattr(session, request_function_name) except AttributeError: raise AttributeError("Attribute '%s' not found (no Aikido move available)" % request_function_name) auth = object.__getattribute__(self, "auth") verify = object.__getattribute__(self, "verify") if self.log_requests: log = object.__getattribute__(self, "log") log["nb_request"] += 1 log["requests"][request_function.__name__] += 1 return AikidoSession.Holder(request_function, auth, max_conflict_retries=self.max_conflict_retries, verify=verify) def disconnect(self): pass class Connection(object): LOAD_BLANCING_METHODS = {'round-robin', 'random'} def __init__(self, arangoURL = 'http://127.0.0.1:8529', username = None, password = None, verify = True, verbose = False, statsdClient = None, reportFileName = None, loadBalancing = "round-robin", use_grequests = False, use_jwt_authentication=False, use_lock_for_reseting_jwt=True, max_retries=5, max_conflict_retries=5 ): if loadBalancing not in Connection.LOAD_BLANCING_METHODS: raise ValueError("loadBalancing should be one of : %s, got %s" % (Connection.LOAD_BLANCING_METHODS, loadBalancing) ) self.loadBalancing = loadBalancing self.currentURLId = 0 self.username = username self.use_grequests = use_grequests self.use_jwt_authentication = use_jwt_authentication self.use_lock_for_reseting_jwt = use_lock_for_reseting_jwt self.max_retries = max_retries self.max_conflict_retries = max_conflict_retries self.action = ConnectionAction(self) self.databases = {} self.verbose = verbose if isinstance(arangoURL, str): self.arangoURL = [arangoURL] else: self.arangoURL = arangoURL for i, url in enumerate(self.arangoURL): if url[-1] == "/": self.arangoURL[i] = url[:-1] self.identifier = None self.startTime = None self.session = None self.resetSession(username, password, verify) self.users = Users(self) if reportFileName != None: self.reportFile = open(reportFileName, 'a') else: self.reportFile = None self.statsdc = statsdClient self.reload()
Apache License 2.0
aspose-words-cloud/aspose-words-cloud-python
asposewordscloud/models/ps_save_options_data.py
PsSaveOptionsData.page_count
python
def page_count(self, page_count): self._page_count = page_count
Sets the page_count of this PsSaveOptionsData. Gets or sets the number of pages to render. # noqa: E501 :param page_count: The page_count of this PsSaveOptionsData. # noqa: E501 :type: int
https://github.com/aspose-words-cloud/aspose-words-cloud-python/blob/abf8fccfed40aa2b09c6cdcaf3f2723e1f412d85/asposewordscloud/models/ps_save_options_data.py#L632-L640
import pprint import re import datetime import six import json class PsSaveOptionsData(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'allow_embedding_post_script_fonts': 'bool', 'custom_time_zone_info_data': 'TimeZoneInfoData', 'dml3_d_effects_rendering_mode': 'str', 'dml_effects_rendering_mode': 'str', 'dml_rendering_mode': 'str', 'file_name': 'str', 'flat_opc_xml_mapping_only': 'bool', 'iml_rendering_mode': 'str', 'save_format': 'str', 'update_created_time_property': 'bool', 'update_fields': 'bool', 'update_last_printed_property': 'bool', 'update_last_saved_time_property': 'bool', 'update_sdt_content': 'bool', 'zip_output': 'bool', 'color_mode': 'str', 'jpeg_quality': 'int', 'metafile_rendering_options': 'MetafileRenderingOptionsData', 'numeral_format': 'str', 'optimize_output': 'bool', 'page_count': 'int', 'page_index': 'int', 'use_book_fold_printing_settings': 'bool' } attribute_map = { 'allow_embedding_post_script_fonts': 'AllowEmbeddingPostScriptFonts', 'custom_time_zone_info_data': 'CustomTimeZoneInfoData', 'dml3_d_effects_rendering_mode': 'Dml3DEffectsRenderingMode', 'dml_effects_rendering_mode': 'DmlEffectsRenderingMode', 'dml_rendering_mode': 'DmlRenderingMode', 'file_name': 'FileName', 'flat_opc_xml_mapping_only': 'FlatOpcXmlMappingOnly', 'iml_rendering_mode': 'ImlRenderingMode', 'save_format': 'SaveFormat', 'update_created_time_property': 'UpdateCreatedTimeProperty', 'update_fields': 'UpdateFields', 'update_last_printed_property': 'UpdateLastPrintedProperty', 'update_last_saved_time_property': 'UpdateLastSavedTimeProperty', 'update_sdt_content': 'UpdateSdtContent', 'zip_output': 'ZipOutput', 'color_mode': 'ColorMode', 'jpeg_quality': 'JpegQuality', 'metafile_rendering_options': 'MetafileRenderingOptions', 'numeral_format': 'NumeralFormat', 'optimize_output': 'OptimizeOutput', 'page_count': 'PageCount', 'page_index': 'PageIndex', 'use_book_fold_printing_settings': 'UseBookFoldPrintingSettings' } def __init__(self, allow_embedding_post_script_fonts=None, custom_time_zone_info_data=None, dml3_d_effects_rendering_mode=None, dml_effects_rendering_mode=None, dml_rendering_mode=None, file_name=None, flat_opc_xml_mapping_only=None, iml_rendering_mode=None, save_format=None, update_created_time_property=None, update_fields=None, update_last_printed_property=None, update_last_saved_time_property=None, update_sdt_content=None, zip_output=None, color_mode=None, jpeg_quality=None, metafile_rendering_options=None, numeral_format=None, optimize_output=None, page_count=None, page_index=None, use_book_fold_printing_settings=None): self._allow_embedding_post_script_fonts = None self._custom_time_zone_info_data = None self._dml3_d_effects_rendering_mode = None self._dml_effects_rendering_mode = None self._dml_rendering_mode = None self._file_name = None self._flat_opc_xml_mapping_only = None self._iml_rendering_mode = None self._save_format = None self._update_created_time_property = None self._update_fields = None self._update_last_printed_property = None self._update_last_saved_time_property = None self._update_sdt_content = None self._zip_output = None self._color_mode = None self._jpeg_quality = None self._metafile_rendering_options = None self._numeral_format = None self._optimize_output = None self._page_count = None self._page_index = None self._use_book_fold_printing_settings = None self.discriminator = None if allow_embedding_post_script_fonts is not None: self.allow_embedding_post_script_fonts = allow_embedding_post_script_fonts if custom_time_zone_info_data is not None: self.custom_time_zone_info_data = custom_time_zone_info_data if dml3_d_effects_rendering_mode is not None: self.dml3_d_effects_rendering_mode = dml3_d_effects_rendering_mode if dml_effects_rendering_mode is not None: self.dml_effects_rendering_mode = dml_effects_rendering_mode if dml_rendering_mode is not None: self.dml_rendering_mode = dml_rendering_mode if file_name is not None: self.file_name = file_name if flat_opc_xml_mapping_only is not None: self.flat_opc_xml_mapping_only = flat_opc_xml_mapping_only if iml_rendering_mode is not None: self.iml_rendering_mode = iml_rendering_mode if save_format is not None: self.save_format = save_format if update_created_time_property is not None: self.update_created_time_property = update_created_time_property if update_fields is not None: self.update_fields = update_fields if update_last_printed_property is not None: self.update_last_printed_property = update_last_printed_property if update_last_saved_time_property is not None: self.update_last_saved_time_property = update_last_saved_time_property if update_sdt_content is not None: self.update_sdt_content = update_sdt_content if zip_output is not None: self.zip_output = zip_output if color_mode is not None: self.color_mode = color_mode if jpeg_quality is not None: self.jpeg_quality = jpeg_quality if metafile_rendering_options is not None: self.metafile_rendering_options = metafile_rendering_options if numeral_format is not None: self.numeral_format = numeral_format if optimize_output is not None: self.optimize_output = optimize_output if page_count is not None: self.page_count = page_count if page_index is not None: self.page_index = page_index if use_book_fold_printing_settings is not None: self.use_book_fold_printing_settings = use_book_fold_printing_settings @property def allow_embedding_post_script_fonts(self): return self._allow_embedding_post_script_fonts @allow_embedding_post_script_fonts.setter def allow_embedding_post_script_fonts(self, allow_embedding_post_script_fonts): self._allow_embedding_post_script_fonts = allow_embedding_post_script_fonts @property def custom_time_zone_info_data(self): return self._custom_time_zone_info_data @custom_time_zone_info_data.setter def custom_time_zone_info_data(self, custom_time_zone_info_data): self._custom_time_zone_info_data = custom_time_zone_info_data @property def dml3_d_effects_rendering_mode(self): return self._dml3_d_effects_rendering_mode @dml3_d_effects_rendering_mode.setter def dml3_d_effects_rendering_mode(self, dml3_d_effects_rendering_mode): allowed_values = ["Basic", "Advanced"] if not dml3_d_effects_rendering_mode.isdigit(): if dml3_d_effects_rendering_mode not in allowed_values: raise ValueError( "Invalid value for `dml3_d_effects_rendering_mode` ({0}), must be one of {1}" .format(dml3_d_effects_rendering_mode, allowed_values)) self._dml3_d_effects_rendering_mode = dml3_d_effects_rendering_mode else: self._dml3_d_effects_rendering_mode = allowed_values[int(dml3_d_effects_rendering_mode) if six.PY3 else long(dml3_d_effects_rendering_mode)] @property def dml_effects_rendering_mode(self): return self._dml_effects_rendering_mode @dml_effects_rendering_mode.setter def dml_effects_rendering_mode(self, dml_effects_rendering_mode): self._dml_effects_rendering_mode = dml_effects_rendering_mode @property def dml_rendering_mode(self): return self._dml_rendering_mode @dml_rendering_mode.setter def dml_rendering_mode(self, dml_rendering_mode): self._dml_rendering_mode = dml_rendering_mode @property def file_name(self): return self._file_name @file_name.setter def file_name(self, file_name): self._file_name = file_name @property def flat_opc_xml_mapping_only(self): return self._flat_opc_xml_mapping_only @flat_opc_xml_mapping_only.setter def flat_opc_xml_mapping_only(self, flat_opc_xml_mapping_only): self._flat_opc_xml_mapping_only = flat_opc_xml_mapping_only @property def iml_rendering_mode(self): return self._iml_rendering_mode @iml_rendering_mode.setter def iml_rendering_mode(self, iml_rendering_mode): self._iml_rendering_mode = iml_rendering_mode @property def save_format(self): return self._save_format @save_format.setter def save_format(self, save_format): self._save_format = save_format @property def update_created_time_property(self): return self._update_created_time_property @update_created_time_property.setter def update_created_time_property(self, update_created_time_property): self._update_created_time_property = update_created_time_property @property def update_fields(self): return self._update_fields @update_fields.setter def update_fields(self, update_fields): self._update_fields = update_fields @property def update_last_printed_property(self): return self._update_last_printed_property @update_last_printed_property.setter def update_last_printed_property(self, update_last_printed_property): self._update_last_printed_property = update_last_printed_property @property def update_last_saved_time_property(self): return self._update_last_saved_time_property @update_last_saved_time_property.setter def update_last_saved_time_property(self, update_last_saved_time_property): self._update_last_saved_time_property = update_last_saved_time_property @property def update_sdt_content(self): return self._update_sdt_content @update_sdt_content.setter def update_sdt_content(self, update_sdt_content): self._update_sdt_content = update_sdt_content @property def zip_output(self): return self._zip_output @zip_output.setter def zip_output(self, zip_output): self._zip_output = zip_output @property def color_mode(self): return self._color_mode @color_mode.setter def color_mode(self, color_mode): self._color_mode = color_mode @property def jpeg_quality(self): return self._jpeg_quality @jpeg_quality.setter def jpeg_quality(self, jpeg_quality): self._jpeg_quality = jpeg_quality @property def metafile_rendering_options(self): return self._metafile_rendering_options @metafile_rendering_options.setter def metafile_rendering_options(self, metafile_rendering_options): self._metafile_rendering_options = metafile_rendering_options @property def numeral_format(self): return self._numeral_format @numeral_format.setter def numeral_format(self, numeral_format): self._numeral_format = numeral_format @property def optimize_output(self): return self._optimize_output @optimize_output.setter def optimize_output(self, optimize_output): self._optimize_output = optimize_output @property def page_count(self): return self._page_count @page_count.setter
MIT License
girder/girder
plugins/jobs/girder_jobs/models/job.py
Job.findWithPermissions
python
def findWithPermissions(self, query=None, offset=0, limit=0, timeout=None, fields=None, sort=None, user=None, level=AccessType.READ, types=None, statuses=None, jobUser=None, parentJob=None, **kwargs): if query is None: query = {} if jobUser == 'all': pass elif jobUser == 'none' or jobUser is None: query['userId'] = None else: query['userId'] = jobUser['_id'] if types is not None: query['type'] = {'$in': types} if statuses is not None: query['status'] = {'$in': statuses} if parentJob: query['parentId'] = parentJob['_id'] return super().findWithPermissions( query, offset=offset, limit=limit, timeout=timeout, fields=fields, sort=sort, user=user, level=level, **kwargs)
Search the list of jobs. :param query: The search query (see general MongoDB docs for "find()") :type query: dict :param offset: The offset into the results :type offset: int :param limit: Maximum number of documents to return :type limit: int :param timeout: Cursor timeout in ms. Default is no timeout. :type timeout: int :param fields: A mask for filtering result documents by key, or None to return the full document, passed to MongoDB find() as the `projection` param. :type fields: `str, list of strings or tuple of strings for fields to be included from the document, or dict for an inclusion or exclusion projection`. :param sort: The sort order. :type sort: List of (key, order) tuples. :param user: The user to check policies against. :type user: dict or None :param level: The access level. Explicitly passing None skips doing permissions checks. :type level: AccessType :param types: job type filter. :type types: array of type string, or None. :param statuses: job status filter. :type statuses: array of status integer, or None. :param jobUser: The user who owns the job. :type jobUser: dict, 'all', 'none', or None. :param parentJob: Parent Job. :returns: A pymongo Cursor or CommandCursor. If a CommandCursor, it has been augmented with a count function.
https://github.com/girder/girder/blob/baf9a08e804cf6da5c4735aeca7ecc4f02e1e54f/plugins/jobs/girder_jobs/models/job.py#L74-L127
import datetime from bson import json_util from girder import events from girder.constants import AccessType, SortDir from girder.exceptions import ValidationException from girder.models.model_base import AccessControlledModel from girder.models.notification import Notification from girder.models.token import Token from girder.models.user import User from ..constants import JobStatus, JOB_HANDLER_LOCAL class Job(AccessControlledModel): def initialize(self): self.name = 'job' compoundSearchIndex = ( ('userId', SortDir.ASCENDING), ('created', SortDir.DESCENDING), ('type', SortDir.ASCENDING), ('status', SortDir.ASCENDING) ) self.ensureIndices([(compoundSearchIndex, {}), 'created', 'parentId', 'celeryTaskId']) self.exposeFields(level=AccessType.READ, fields={ 'title', 'type', 'created', 'interval', 'when', 'status', 'progress', 'log', 'meta', '_id', 'public', 'parentId', 'asynchronous', 'updated', 'timestamps', 'handler', 'jobInfoSpec'}) self.exposeFields(level=AccessType.SITE_ADMIN, fields={'args', 'kwargs'}) def validate(self, job): self._validateStatus(job['status']) return job def _validateStatus(self, status): if not JobStatus.isValid(status): raise ValidationException( 'Invalid job status %s.' % status, field='status') def _validateChild(self, parentJob, childJob): if str(parentJob['_id']) == str(childJob['_id']): raise ValidationException('Child Id cannot be equal to Parent Id') if childJob['parentId']: raise ValidationException('Cannot overwrite the Parent Id') def list(self, user=None, types=None, statuses=None, limit=0, offset=0, sort=None, currentUser=None, parentJob=None): return self.findWithPermissions( offset=offset, limit=limit, sort=sort, user=currentUser, types=types, statuses=statuses, jobUser=user, parentJob=parentJob)
Apache License 2.0
openpathsampling/openpathsampling
openpathsampling/pathmover.py
PathMover.move
python
def move(self, sample_set): return paths.EmptyMoveChange()
Run the generation starting with the initial sample_set specified. Parameters ---------- sample_set : SampleSet the initially used sampleset Returns ------- samples : MoveChange the MoveChange instance describing the change from the old to the new SampleSet
https://github.com/openpathsampling/openpathsampling/blob/dcee878247bdf627aae96ccbc65d2857d5d155b9/openpathsampling/pathmover.py#L448-L465
import abc import logging import numpy as np import random import openpathsampling as paths from openpathsampling.netcdfplus import StorableNamedObject, StorableObject from openpathsampling.pathmover_inout import InOutSet, InOut from openpathsampling.rng import default_rng from .ops_logging import initialization_logging from .treelogic import TreeMixin from openpathsampling.deprecations import deprecate, has_deprecations from openpathsampling.deprecations import (SAMPLE_DETAILS, MOVE_DETAILS, NEW_SNAPSHOT_KWARG_SELECTOR) from future.utils import with_metaclass logger = logging.getLogger(__name__) init_log = logging.getLogger('openpathsampling.initialization') def make_list_of_pairs(inlist): if inlist is None: return None _ = len(inlist) try: _ = len(inlist[0]) list_of_lists = True except TypeError: list_of_lists = False if list_of_lists: for elem in inlist: assert len(elem) == 2, "List of lists: inner list length != 2" outlist = inlist else: assert len(inlist) % 2 == 0, "Flattened list: length not divisible by 2" outlist = [ [a, b] for (a, b) in zip( inlist[slice(0, None, 2)], inlist[slice(1, None, 2)]) ] return outlist class SampleNaNError(Exception): def __init__(self, message, trial_sample, details): super(SampleNaNError, self).__init__(message) self.trial_sample = trial_sample self.details = details class SampleMaxLengthError(Exception): def __init__(self, message, trial_sample, details): super(SampleMaxLengthError, self).__init__(message) self.trial_sample = trial_sample self.details = details class MoveChangeNaNError(Exception): pass class PathMover(with_metaclass(abc.ABCMeta, TreeMixin, StorableNamedObject)): def __init__(self): StorableNamedObject.__init__(self) self._rng = default_rng() self._in_ensembles = None self._out_ensembles = None self._len = None self._inout = None self._trust_candidate = False _is_ensemble_change_mover = None @property def is_ensemble_change_mover(self): if self._is_ensemble_change_mover is None: return False else: return self._is_ensemble_change_mover _is_canonical = None @property def is_canonical(self): return self._is_canonical @property def default_name(self): return self.__class__.__name__[:-5] @property def _subnodes(self): return self.submovers @property def identifier(self): return self @staticmethod def _default_match(original, test): if isinstance(test, paths.PathMover): return original is test elif issubclass(test, paths.PathMover): return original.__class__ is test else: return False @property def submovers(self): return [] @staticmethod def _flatten(ensembles): if type(ensembles) is list: return [s for ens in ensembles for s in PathMover._flatten(ens)] else: return [ensembles] def move_replica_state(self, replica_states): return self.in_out.move(replica_states) def sub_replica_state(self, replica_states): return [replica_states] * len(self.submovers) def _generate_in_out(self): if len(self.output_ensembles) == 0: return { InOutSet([]) } elif (len(self.input_ensembles) == len(self.output_ensembles) == 1): in_ens = self.input_ensembles[0] out_ens = self.output_ensembles[0] return InOutSet([InOut([((in_ens, out_ens, 0), 1)])]) else: raise NotImplementedError( 'Please implement the in-out-matrix for this mover.') @property def in_out(self): if self._inout is None: self._inout = self._generate_in_out() return self._inout def _ensemble_signature(self, as_set=False): inp = tuple(self.input_ensembles) out = tuple(self.output_ensembles) if as_set: inp = set(inp) out = set(out) return inp, out @property def ensemble_signature(self): return self._ensemble_signature() @property def ensemble_signature_set(self): return self._ensemble_signature(as_set=True) @property def input_ensembles(self): if self._in_ensembles is None: ensembles = self._get_in_ensembles() self._in_ensembles = list(set(self._flatten(ensembles))) return self._in_ensembles @property def output_ensembles(self): if self._out_ensembles is None: ensembles = self._get_out_ensembles() self._out_ensembles = list(set(self._flatten(ensembles))) return self._out_ensembles def _get_in_ensembles(self): return [] def _get_out_ensembles(self): return self._get_in_ensembles() @staticmethod def legal_sample_set(sample_set, ensembles=None, replicas='all'): mover_replicas = sample_set.replica_list() if replicas == 'all': selected_replicas = sample_set.replica_list() else: selected_replicas = replicas reps = list(set(mover_replicas) & set(selected_replicas)) rep_samples = [] for rep in reps: rep_samples.extend(sample_set.all_from_replica(rep)) if ensembles is None: ensembles = 'all' if ensembles == 'all': legal_samples = rep_samples else: ens_samples = [] if type(ensembles) is not list: ensembles = [ensembles] for ens in ensembles: ens_samples.extend(sample_set.all_from_ensemble(ens)) legal_samples = list(set(rep_samples) & set(ens_samples)) return legal_samples @staticmethod def select_sample(sample_set, ensembles=None, replicas=None): if replicas is None: replicas = 'all' logger.debug( "replicas: " + str(replicas) + " ensembles: " + repr(ensembles)) legal = PathMover.legal_sample_set(sample_set, ensembles, replicas) for sample in legal: logger.debug( "legal: (" + str(sample.replica) + "," + str(sample.trajectory) + "," + repr(sample.ensemble) + ")") selected = random.choice(legal) logger.debug( "selected sample: (" + str(selected.replica) + "," + str(selected.trajectory) + "," + repr(selected.ensemble) + ")") return selected @abc.abstractmethod
MIT License
robmadole/jig
src/jig/tests/noseplugin.py
TestSetup.options
python
def options(self, parser, env): parser.add_option( '--unicodenazi', default=False, action='store_true', help='Turn unicode-nazi on' )
Required by Nose to add options.
https://github.com/robmadole/jig/blob/6596e15afb0bb7f69850a71d9071440ba101f539/src/jig/tests/noseplugin.py#L106-L113
from os import listdir, mkdir from os.path import join, realpath, isdir, expanduser from tempfile import mkdtemp from shutil import rmtree from subprocess import call, Popen, PIPE from nose.plugins.base import Plugin def _create_git_repo_property(repo_harness_dir): from jig.conf import JIG_DIR_NAME def getter(self): try: return self._gitrepodir except AttributeError: pass try: rhd = realpath(repo_harness_dir) if not isdir(rhd): mkdir(rhd) repo = mkdtemp(dir=rhd) except: raise TestSetupError( 'Tried to create a directory to hold ' 'the test repositories and could not.') retcode = call( ['git', 'init', repo], stdin=PIPE, stdout=PIPE, stderr=PIPE) exclude_file = join(repo, '.git', 'info', 'exclude') with open(exclude_file, 'w') as fh: fh.write(JIG_DIR_NAME) if retcode != 0: raise TestSetupError( 'Could not initialize a Git repository to ' 'run tests, is Git installed?') Popen( ['git', 'config', '--local', 'user.email', 'no+reply@jig'], stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=repo ).wait() Popen( ['git', 'config', '--local', 'user.name', 'Jig'], stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=repo ).wait() self._gitrepodir = repo return repo def setter(self, value): self._gitrepodir = value def deleter(self): try: rmtree(self._gitrepodir) except (AttributeError, OSError): pass try: delattr(self, '_gitrepodir') except AttributeError: pass return property(getter, setter, deleter) class TestSetupError(Exception): pass class TestSetup(Plugin): enabled = True name = 'jig' score = 500 def __init__(self): super(TestSetup, self).__init__() self.repo_harness_dir = mkdtemp()
BSD 2-Clause Simplified License
fidelity/stoke
stoke/status.py
StokeStatus.grad_accum
python
def grad_accum(self): return self._status.get("grad_accum")
Shortcut to get grad accumulation
https://github.com/fidelity/stoke/blob/5aae84bcafe1890d50a3d9e2e5366a367e31729d/stoke/status.py#L377-L379
import os from enum import Enum from typing import List, Optional, Union import attr import torch from stoke.configs import ( AMPConfig, ApexConfig, ClipGradConfig, ClipGradNormConfig, DDPConfig, DeepspeedConfig, DeepspeedFP16Config, FairscaleFSDPConfig, FairscaleOSSConfig, FairscaleSDDPConfig, HorovodConfig, ) from stoke.extensions import _FairscaleFSDPConfig class DistributedOptions(Enum): horovod = "horovod" ddp = "ddp" deepspeed = "deepspeed" class FP16Options(Enum): apex_O1 = "apex_O1" apex_O2 = "apex_O2" amp = "amp" deepspeed = "deepspeed" class _MissingLocalRankException(Exception): pass class StokeStatus: def __init__( self, batch_size_per_device: int, grad_accum: Optional[int], grad_clip: Optional[Union[ClipGradConfig, ClipGradNormConfig]], gpu: bool, fp16: Optional[FP16Options], distributed: Optional[DistributedOptions], fairscale_oss: bool, fairscale_sddp: bool, fairscale_fsdp: bool, configs: Optional[ List[ Union[ AMPConfig, ApexConfig, DDPConfig, DeepspeedConfig, FairscaleOSSConfig, FairscaleSDDPConfig, FairscaleFSDPConfig, HorovodConfig, ] ] ], ): self._key_list = [ "AMPConfig", "ApexConfig", "DDPConfig", "DeepspeedConfig", "FairscaleOSSConfig", "FairscaleSDDPConfig", "FairscaleFSDPConfig" "HorovodConfig", ] self._configs = self._set_configs(configs=configs) self._status = { "cuda": torch.cuda.is_available(), "nccl": torch.distributed.is_nccl_available(), "batch_size": batch_size_per_device, "grad_accum": grad_accum if grad_accum is not None else 1, "grad_clip": grad_clip, "gpu": gpu, "distributed": distributed, "zero": self._configs.get("DeepspeedConfig").zero_optimization.stage if self._configs.get("DeepspeedConfig") else None, "oss": fairscale_oss, "sharded": fairscale_sddp, "fully_sharded": fairscale_fsdp, "world_size": -1, } self._status.update({"fp16": self._set_fp16(fp16=fp16)}) self._check_all_raised_combinations() def _check_all_raised_combinations(self): if self.gpu and not self.cuda: raise ValueError("Stoke -- GPU(s) cannot be used as CUDA is not available") if self.is_fairscale and ( self.is_distributed_deepspeed or self.is_fp16_deepspeed ): raise ValueError( f"Stoke -- Cannot use both fairscale extensions " f"(currently: oss: {self.oss}, sddp: {self.sharded}) " f"and deepspeed (currently: distributed: {self.is_distributed_deepspeed}, " f"fp16: {self.is_fp16_deepspeed})" ) if ( not self.cuda or not self.gpu or not self.nccl ) and self.distributed is not None: raise ValueError( f"Stoke -- Distributed requires CUDA (currently: {self.cuda}), GPU (currently: {self.gpu}), " f"and NCCL (currently: {self.nccl})" ) if not self.cuda and (self.fp16 is not None): raise ValueError(f"Stoke -- FP16 training requires CUDA availability") if ( not self.cuda or not self.gpu or not self.nccl or not self.is_distributed_ddp ) and self.is_fairscale: raise ValueError( f"Stoke -- Fairscale extensions (currently: oss: {self.oss}, sddp: {self.sharded}) " f"requires CUDA (currently: {self.cuda}), " f"GPU (currently: {self.gpu}), " f"DDP (currently: {self.is_distributed_ddp}) and NCCL (currently: {self.nccl})" ) if self.sharded and not self.oss: raise ValueError( f"Stoke -- Fairscale SDDP requires OSS (currently: oss: {self.oss}, sddp: {self.sharded})" ) if (self.sharded or self.oss) and self.fully_sharded: raise ValueError( f"Stoke -- Fairscale FSDP does not require SDDP or OSS as it manages OSS itself" f"(currently: oss: {self.oss}, sddp: {self.sharded}. fsdp: {self.fully_sharded})" ) if self.is_fairscale and self.is_fp16_apex: raise ValueError( f"Stoke -- Fairscale does not currently support APEX (currently: {self.is_fp16_apex}) " f"for mixed precision" ) if (self.oss or self.fully_sharded) and isinstance( self.grad_clip, ClipGradConfig ): raise ValueError( f"Stoke -- Fairscale OSS and FSDP do not currently support torch.nn.utils.clip_grad_value_ " f"(currently: {type(self.grad_clip).__name__})" ) if self.is_fp16_deepspeed and not self.is_distributed_deepspeed: raise ValueError( f"Stoke -- Deepspeed FP16 (currently: {self.is_fp16_deepspeed}) requires the use of " f"Deepspeed distributed (currently: {self.is_distributed_deepspeed})" ) if ( self.is_distributed_deepspeed and self.fp16 is not None and not self.is_fp16_deepspeed ): raise ValueError( f"Stoke -- Deepspeed distributed (currently: {self.is_distributed_deepspeed}) only " f"supports its own internal FP16 implementation (currently: {self.fp16})" ) if ( self.is_distributed_deepspeed and self.zero > 0 and not self.is_fp16_deepspeed ): raise ValueError( f"Stoke -- Deepspeed ZeRO extension (currently: Stage-{self.zero}) requires Deepspeed" f"FP16 extension (currently: {self.is_fp16_deepspeed})" ) def _set_fp16(self, fp16: Optional[FP16Options]): if self._status.get("cuda") and (fp16 is not None): if fp16 == "apex_O1" or fp16 == "apex_O2": try: from apex import amp except ImportError as e: print( e, ": Stoke -- apex cannot be imported -- please install (https://github.com/NVIDIA/apex)", ) return fp16 else: return None def _set_configs(self, configs): if configs is not None: config_dict = {type(val).__name__: val for val in configs} else: config_dict = {} none_dict = {val: None for val in self._key_list if val not in config_dict} config_dict.update(none_dict) return config_dict def set_post_init_values(self, world_size: int): self._status.update({"world_size": world_size}) @property def status(self): return self._status @property def batch_size(self): return self._status.get("batch_size") @property def effective_batch_size(self): return self.batch_size * self.grad_accum * self._status.get("world_size") @property def grad_clip(self): return self._status.get("grad_clip") @property
Apache License 2.0
vinairesearch/blur-kernel-space-exploring
models/backbones/skip/util.py
get_activation
python
def get_activation(act_fun="LeakyReLU"): if isinstance(act_fun, str): if act_fun == "LeakyReLU": return nn.LeakyReLU(0.2, inplace=True) elif act_fun == "Swish": return Swish() elif act_fun == "ELU": return nn.ELU() elif act_fun == "none": return nn.Sequential() else: assert False else: return act_fun()
Either string defining an activation function or module (e.g. nn.ReLU)
https://github.com/vinairesearch/blur-kernel-space-exploring/blob/619c9b3b33961ef9311399d7cbbf92050a0c6b51/models/backbones/skip/util.py#L49-L65
import torch.nn as nn from .downsampler import Downsampler class Swish(nn.Module): def __init__(self): super(Swish, self).__init__() self.s = nn.Sigmoid() def forward(self, x): return x * self.s(x) def get_conv(in_f, out_f, kernel_size, stride=1, bias=True, pad="zero", downsample_mode="stride"): downsampler = None if stride != 1 and downsample_mode != "stride": if downsample_mode == "avg": downsampler = nn.AvgPool2d(stride, stride) elif downsample_mode == "max": downsampler = nn.MaxPool2d(stride, stride) elif downsample_mode in ["lanczos2", "lanczos3"]: downsampler = Downsampler( n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5, preserve_size=True ) else: assert False stride = 1 padder = None to_pad = int((kernel_size - 1) / 2) if pad == "reflection": padder = nn.ReflectionPad2d(to_pad) to_pad = 0 convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias) layers = filter(lambda x: x is not None, [padder, convolver, downsampler]) return nn.Sequential(*layers)
Apache License 2.0
bb-ricardo/netbox-sync
module/netbox/object_classes.py
NetBoxObject.add_tags
python
def add_tags(self, tags_to_add): self.update_tags(tags_to_add)
Add tag(s) to object Parameters ---------- tags_to_add: (str, list, dict, NBTag) tags to parse and add to current list of object tags Returns ------- None
https://github.com/bb-ricardo/netbox-sync/blob/6268a84cbd9b82525700293ddd558d16eb010a7d/module/netbox/object_classes.py#L637-L651
import json from ipaddress import ip_network, IPv4Network, IPv6Network from module.common.misc import grab, do_error_exit from module.common.logging import get_logger log = get_logger() class NetBoxObject: default_attributes = { "data": None, "is_new": True, "nb_id": 0, "updated_items": list(), "unset_items": list(), "source": None, } inventory = None def __init__(self, data=None, read_from_netbox=False, inventory=None, source=None): for attr_key, attr_value in self.default_attributes.items(): if isinstance(attr_value, (list, dict, set)): setattr(self, attr_key, attr_value.copy()) else: setattr(self, attr_key, attr_value) self.inventory = inventory self.data = dict() for key, data_type in self.data_model.items(): if data_type in NBObjectList.__subclasses__(): self.data[key] = data_type() self.update(data=data, read_from_netbox=read_from_netbox, source=source) def __repr__(self): return "<%s instance '%s' at %s>" % (self.__class__.__name__, self.get_display_name(), id(self)) def to_dict(self): out = dict() for key in dir(self): value = getattr(self, key) if "__" in key: continue if callable(value) is True: continue if key in ["inventory", "default_attributes", "data_model_relation"]: continue if key == "source": value = getattr(value, "name", None) if key == "data_model": data_model = dict() for data_key, data_value in value.items(): if isinstance(data_value, list): new_data_value = list() for possible_option in data_value: if type(possible_option) == type: new_data_value.append(str(possible_option)) else: new_data_value.append(possible_option) data_value = new_data_value if type(data_value) == type: data_value = str(data_value) data_model[data_key] = data_value value = data_model if key == "data": data = dict() for data_key, data_value in value.items(): if isinstance(data_value, (NetBoxObject, IPv4Network, IPv6Network)): data_value = repr(data_value) elif isinstance(data_value, NBObjectList): data_value = [repr(x) for x in data_value] data[data_key] = data_value value = data out[key] = value return out def __str__(self): return json.dumps(self.to_dict(), sort_keys=True, indent=4) @staticmethod def format_slug(text=None, max_len=50): if text is None or len(text) == 0: raise AttributeError("Argument 'text' can't be None or empty!") permitted_chars = ( "abcdefghijklmnopqrstuvwxyz" "0123456789" "_-" ) for sep in [" ", ",", "."]: text = text.replace(sep, "-") text = "".join([c for c in text.lower() if c in permitted_chars]) return text[0:max_len] def update(self, data=None, read_from_netbox=False, source=None): if data is None: return if not isinstance(data, dict): raise AttributeError("Argument 'data' needs to be a dict!") if data.get("id") is not None: self.nb_id = data.get("id") if read_from_netbox is True: self.is_new = False self.data = data self.updated_items = list() self.unset_items = list() return if source is not None: self.source = source display_name = self.get_display_name(data) if display_name is None: display_name = self.get_display_name() log.debug2(f"Parsing '{self.name}' data structure: {display_name}") parsed_data = dict() for key, value in data.items(): if key not in self.data_model.keys(): log.error(f"Found undefined data model key '{key}' for object '{self.__class__.__name__}'") continue if value is None: log.info(f"Found unset key '{key}' while parsing {display_name}. Skipping This key") continue defined_value_type = self.data_model.get(key) if key.startswith("primary_ip"): defined_value_type = NBIPAddress if isinstance(defined_value_type, int): if not isinstance(value, str): log.error(f"Invalid data type for '{self.__class__.__name__}.{key}' (must be str), got: '{value}'") continue value = value[0:defined_value_type] if key == "slug": value = self.format_slug(text=value, max_len=defined_value_type) else: value = value[0:defined_value_type] if isinstance(defined_value_type, list): if isinstance(value, NetBoxObject): if type(value) not in defined_value_type: log.error(f"Invalid data type for '{key}' (must be one of {defined_value_type}), " f"got: '{type(value)}'") continue elif value not in defined_value_type: log.error(f"Invalid data type for '{key}' (must be one of {defined_value_type}), got: '{value}'") continue type_check_failed = False for valid_type in [bool, str, int]: if defined_value_type == valid_type and not isinstance(value, valid_type): log.error(f"Invalid data type for '{key}' (must be {valid_type.__name__}), got: '{value}'") type_check_failed = True break if type_check_failed is True: continue if defined_value_type == NBTagList: value = self.compile_tags(value) if defined_value_type == NBVLANList: value = self.compile_vlans(value) if defined_value_type in NetBoxObject.__subclasses__(): if not isinstance(value, NetBoxObject): value = self.inventory.add_update_object(defined_value_type, data=value) if value.source is None: value.source = source parsed_data[key] = value if "slug" in self.data_model.keys() and parsed_data.get("slug") is None and parsed_data.get(self.primary_key) is not None: parsed_data["slug"] = self.format_slug(text=parsed_data.get(self.primary_key), max_len=self.data_model.get("slug")) for key, new_value in parsed_data.items(): current_value = self.data.get(key) if current_value == new_value: continue if isinstance(current_value, (NetBoxObject, NBObjectList)): current_value_str = str(current_value.get_display_name()) elif isinstance(self.data_model.get(key), list) and isinstance(current_value, dict): current_value_str = str(current_value.get("value")) elif key.startswith("primary_ip") and isinstance(current_value, dict): current_value_str = str(current_value.get("address")) else: current_value_str = str(current_value).replace("\r", "") if isinstance(new_value, (NetBoxObject, NBObjectList)): new_value_str = str(new_value.get_display_name()) else: new_value_str = str(new_value).replace("\r", "") if current_value is not None and self.data_model.get(key) in [int, float] and isinstance(new_value, (int, float)) and float(current_value) == float(new_value): continue if current_value_str == new_value_str: continue self.data[key] = new_value self.updated_items.append(key) if self.is_new is False: new_value_str = new_value_str.replace("\n", " ") log.info(f"{self.name.capitalize()} '{display_name}' attribute '{key}' changed from " f"'{current_value_str}' to '{new_value_str}'") self.resolve_relations() def get_display_name(self, data=None, including_second_key=False): this_data_set = data if data is None: this_data_set = self.data if this_data_set is None: return None my_name = this_data_set.get(self.primary_key) secondary_key = getattr(self, "secondary_key", None) enforce_secondary_key = getattr(self, "enforce_secondary_key", False) if my_name is not None and secondary_key is not None and (enforce_secondary_key is True or including_second_key is True): secondary_key_value = this_data_set.get(secondary_key) org_secondary_key_value = str(secondary_key_value) if isinstance(secondary_key_value, NetBoxObject): secondary_key_value = secondary_key_value.get_display_name() if isinstance(secondary_key_value, dict): secondary_key_value = self.get_display_name(data=secondary_key_value) if secondary_key_value is None: log.error(f"Unable to determine second key '{secondary_key}' for {self.name} '{my_name}', " f"got: {org_secondary_key_value}") log.error("This could cause serious errors and lead to wrongly assigned object relations!!!") my_name = f"{my_name} ({secondary_key_value})" return my_name def resolve_relations(self): for key, data_type in self.data_model.items(): if self.data.get(key) is None: continue if key.startswith("primary_ip"): data_type = NBIPAddress if data_type not in NetBoxObject.__subclasses__() + NBObjectList.__subclasses__(): continue data_value = self.data.get(key) if data_type in NBObjectList.__subclasses__(): resolved_object_list = data_type() for item in data_value: if isinstance(item, data_type.member_type): item_object = item else: item_object = self.inventory.get_by_data(data_type.member_type, data=item) if item_object is not None: resolved_object_list.append(item_object) resolved_data = resolved_object_list else: if data_value is None: continue if isinstance(data_value, NetBoxObject): resolved_data = data_value else: data_to_find = None if isinstance(data_value, int): data_to_find = {"id": data_value} elif isinstance(data_value, dict): data_to_find = data_value resolved_data = self.inventory.get_by_data(data_type, data=data_to_find) if resolved_data is not None: self.data[key] = resolved_data else: log.error(f"Problems resolving relation '{key}' for object '{self.get_display_name()}' and " f"value '{data_value}'") def get_dependencies(self): r = [x for x in self.data_model.values() if x in NetBoxObject.__subclasses__()] r.extend([x.member_type for x in self.data_model.values() if x in NBObjectList.__subclasses__()]) return r def get_tags(self): return [x.get_display_name() for x in self.data.get("tags", list())] def compile_tags(self, tags, remove=False): if tags is None or NBTagList not in self.data_model.values(): return sanitized_tag_strings = list() log.debug2(f"Compiling TAG list") new_tag_list = NBTagList() def extract_tags(this_tags): if isinstance(this_tags, NBTag): sanitized_tag_strings.append(this_tags.get_display_name()) elif isinstance(this_tags, str): sanitized_tag_strings.append(this_tags) elif isinstance(this_tags, dict) and this_tags.get("name") is not None: sanitized_tag_strings.append(this_tags.get("name")) if isinstance(tags, list): for tag in tags: extract_tags(tag) else: extract_tags(tags) current_tag_strings = self.get_tags() new_tags = list() removed_tags = list() for tag_name in sanitized_tag_strings: if tag_name not in current_tag_strings and remove is False: tag = self.inventory.add_update_object(NBTag, data={"name": tag_name}) new_tags.append(tag) if tag_name in current_tag_strings and remove is True: tag = self.inventory.get_by_data(NBTag, data={"name": tag_name}) removed_tags.append(tag) current_tags = grab(self, "data.tags", fallback=NBTagList()) if len(new_tags) > 0: for tag in new_tags + current_tags: new_tag_list.append(tag) elif len(removed_tags) > 0: for tag in current_tags: if tag not in removed_tags: new_tag_list.append(tag) else: new_tag_list = current_tags return new_tag_list def update_tags(self, tags, remove=False): if tags is None or NBTagList not in self.data_model.values(): return action = "Adding" if remove is False else "Removing" log.debug2(f"{action} Tags: {tags}") current_tags = grab(self, "data.tags", fallback=NBTagList()) new_tags = self.compile_tags(tags, remove=remove) if str(current_tags.get_display_name()) != str(new_tags.get_display_name()): self.data["tags"] = new_tags self.updated_items.append("tags") log.info(f"{self.name.capitalize()} '{self.get_display_name()}' attribute 'tags' changed from " f"'{current_tags.get_display_name()}' to '{new_tags.get_display_name()}'")
MIT License
adaptivepele/adaptivepele
AdaptivePELE/spawning/spawning.py
EpsilonDegeneracyCalculator.divideProcessorsMetricProportional
python
def divideProcessorsMetricProportional(self, clusters, trajToDistribute): metrics = self.getMetrics(clusters) if isinstance(metrics, list): metrics = np.array(metrics) if self.parameters.condition == blockNames.SpawningParams.minValue: shiftValue = np.max(metrics) else: shiftValue = np.min(metrics) shiftedMetrics = np.subtract(metrics, shiftValue) bestClusters = shiftedMetrics.argsort() if self.parameters.condition == blockNames.SpawningParams.minValue: shiftedMetrics[bestClusters[self.parameters.nclusters:]] = 0 else: shiftedMetrics[bestClusters[:-self.parameters.nclusters]] = 0 metricWeights = self.parameters.metricWeights if metricWeights == blockNames.SpawningParams.linear: if abs(shiftedMetrics.sum()) < 1e-8: weights = np.ones(len(metrics))/len(metrics) else: weights = (1.*shiftedMetrics)/sum(shiftedMetrics) elif metricWeights == blockNames.SpawningParams.boltzmann: T = self.parameters.temperature kbT = 0.001987*T if abs(shiftedMetrics.sum()) < 1e-8: weights = np.ones(len(metrics))/len(metrics) else: weights = np.exp(-shiftedMetrics/kbT) weights /= sum(weights) else: raise ValueError("No appropiate value for the metricWeights " "was found, please specify a correct value. The " "default value of the metrics weighting is linear") return self.divideTrajAccordingToWeights(weights, trajToDistribute)
Distribute the trajectories among the clusters according to their metric. :param clusters: Existing clusters :type clusters: :py:class:`.Clusters` :param weights: Weight of each cluster :type weights: np.Array :param trajToDistribute: Number of processors to distribute :type trajToDistribute: int :returns: list -- List with the number of processors allocated to each cluster
https://github.com/adaptivepele/adaptivepele/blob/b7c908a53a2ba9ec19fa81a517377cc365176036/AdaptivePELE/spawning/spawning.py#L722-L780
from __future__ import absolute_import, division, print_function, unicode_literals from builtins import range import os import sys import math import glob import random import numpy as np import scipy.optimize as optim from abc import abstractmethod from scipy.linalg import lu, solve from AdaptivePELE.constants import blockNames from AdaptivePELE.constants import constants from AdaptivePELE.utilities import utilities from AdaptivePELE.spawning import spawningTypes from AdaptivePELE.spawning import densitycalculator try: basestring except NameError: basestring = str PYEMMA = True try: import pyemma.msm as msm from AdaptivePELE.freeEnergies import computeDeltaG as computedG except ImportError: PYEMMA = False MATPLOTLIB = True try: import matplotlib.pyplot as plt except ImportError: MATPLOTLIB = False if MATPLOTLIB: try: plt.style.use("ggplot") except AttributeError: pass def reward(x, rews): return -(x[:, np.newaxis]*rews).sum() def return_sign(i, m, n, r): if i <= n-m: return 1 elif i <= r: return 0 else: return -1 def getSizes(clusters): sizes = np.zeros(len(clusters)) for i, cluster in enumerate(clusters): sizes[i] = cluster.elements return sizes def calculateContactsVar(deltaR, epsMax): if deltaR < 0.1: return 0 elif deltaR > 1.0: return epsMax * 0.09 else: return epsMax * 0.09 * deltaR class SpawningAlgorithmBuilder: def build(self, spawningBlock): spawningParams = SpawningParams() spawningParams.buildSpawningParameters(spawningBlock) spawningCalculatorBuilder = SpawningBuilder() spawningCalculator = spawningCalculatorBuilder.buildSpawningCalculator(spawningBlock, spawningParams) return spawningCalculator class SpawningBuilder: def buildSpawningCalculator(self, spawningBlock, spawningParams): densityBuilder = densitycalculator.DensityCalculatorBuilder() densityCalculator = densityBuilder.build(spawningBlock) spawningTypeString = spawningBlock[blockNames.StringSpawningTypes.type] if spawningTypeString == blockNames.StringSpawningTypes.sameWeight: spawningCalculator = SameWeightDegeneracyCalculator(spawningParams) elif spawningTypeString == blockNames.StringSpawningTypes.independent: spawningCalculator = IndependentRunsCalculator(spawningParams) elif spawningTypeString == blockNames.StringSpawningTypes.independentMetric: spawningCalculator = IndependentMetricCalculator(spawningParams) elif spawningTypeString == blockNames.StringSpawningTypes.inverselyProportional: spawningCalculator = InverselyProportionalToPopulationCalculator(spawningParams, densityCalculator) elif spawningTypeString == blockNames.StringSpawningTypes.epsilon: spawningCalculator = EpsilonDegeneracyCalculator(spawningParams, densityCalculator) elif spawningTypeString == blockNames.StringSpawningTypes.fast: spawningCalculator = FASTDegeneracyCalculator(spawningParams, densityCalculator) elif spawningTypeString == blockNames.StringSpawningTypes.variableEpsilon: spawningCalculator = VariableEpsilonDegeneracyCalculator(spawningParams, densityCalculator) elif spawningTypeString == blockNames.StringSpawningTypes.UCB: spawningCalculator = UCBCalculator(spawningParams, densityCalculator) elif spawningTypeString == blockNames.StringSpawningTypes.REAP: spawningCalculator = REAPCalculator(spawningParams) elif spawningTypeString == blockNames.StringSpawningTypes.null: spawningCalculator = NullSpawningCalculator(spawningParams) elif spawningTypeString == blockNames.StringSpawningTypes.ProbabilityMSMCalculator: spawningCalculator = ProbabilityMSMCalculator(spawningParams) elif spawningTypeString == blockNames.StringSpawningTypes.MetastabilityMSMCalculator: spawningCalculator = MetastabilityMSMCalculator(spawningParams) elif spawningTypeString == blockNames.StringSpawningTypes.UncertaintyMSMCalculator: spawningCalculator = UncertaintyMSMCalculator(spawningParams) elif spawningTypeString == blockNames.StringSpawningTypes.IndependentMSMCalculator: spawningCalculator = IndependentMSMCalculator(spawningParams) else: sys.exit("Unknown spawning type! Choices are: " + str(spawningTypes.SPAWNING_TYPE_TO_STRING_DICTIONARY.values())) return spawningCalculator class SpawningParams: def __init__(self): self.epsilon = None self.temperature = None self.threshold = None self.reportFilename = None self.reportCol = None self.decrement = None self.varEpsilonType = None self.maxEpsilon = None self.minEpsilon = None self.variationWindow = None self.maxEpsilonWindow = None self.metricWeights = None self.alpha = None self.nclusters = None self.period = None self.metricInd = None self.condition = blockNames.SpawningParams.minValue self.lagtime = None self.minPos = None self.sasaColumn = None self.filter_value = None self.filter_col = None self.filterByMetric = None def buildSpawningParameters(self, spawningBlock): spawningParamsBlock = spawningBlock[blockNames.SpawningParams.params] spawningType = spawningBlock[blockNames.StringSpawningTypes.type] if spawningType not in spawningTypes.MSMSpawning: self.reportFilename = spawningParamsBlock[blockNames.SpawningParams.report_filename] if spawningType == blockNames.StringSpawningTypes.independent: return self.condition = spawningParamsBlock.get(blockNames.SpawningParams.condition, blockNames.SpawningParams.minValue) self.filterByMetric = spawningParamsBlock.get(blockNames.SpawningParams.filterByMetric, False) self.filter_col = spawningParamsBlock.get(blockNames.SpawningParams.filter_col) if self.filter_col is None and self.filterByMetric: raise utilities.RequiredParameterMissingException("Column not specified for cluster filtering") if self.filter_col is not None: self.filter_col -= 1 self.filter_value = spawningParamsBlock.get(blockNames.SpawningParams.filter_value) if self.filter_value is None and self.filterByMetric: raise utilities.RequiredParameterMissingException("Filtering value not specified for cluster filtering") self.reportCol = spawningParamsBlock.get(blockNames.SpawningParams.report_col) if self.reportCol is not None: self.reportCol -= 1 if spawningType == blockNames.StringSpawningTypes.epsilon or spawningType == blockNames.StringSpawningTypes.variableEpsilon: self.epsilon = spawningParamsBlock[blockNames.SpawningParams.epsilon] self.nclusters = spawningParamsBlock.get(blockNames.SpawningParams.nclusters, 5) self.metricWeights = spawningParamsBlock.get(blockNames.SpawningParams.metricWeights, blockNames.SpawningParams.linear) self.condition = spawningParamsBlock.get(blockNames.SpawningParams.condition, blockNames.SpawningParams.minValue) if spawningType == blockNames.StringSpawningTypes.epsilon or spawningType == blockNames.StringSpawningTypes.variableEpsilon or spawningType == blockNames.StringSpawningTypes.fast or spawningType == blockNames.StringSpawningTypes.simulatedAnnealing or spawningType == blockNames.StringSpawningTypes.UCB or spawningType == blockNames.StringSpawningTypes.REAP: self.temperature = spawningParamsBlock.get(blockNames.SpawningParams.temperature, 1000) self.reportCol = spawningParamsBlock[blockNames.SpawningParams.report_col]-1 if spawningType == blockNames.StringSpawningTypes.variableEpsilon: self.varEpsilonType = spawningParamsBlock[blockNames.SpawningParams.varEpsilonType] self.maxEpsilon = spawningParamsBlock[blockNames.SpawningParams.maxEpsilon] if self.varEpsilonType == blockNames.VariableEpsilonTypes.linearVariation: self.minEpsilon = spawningParamsBlock.get(blockNames.SpawningParams.minEpsilon, self.epsilon) self.variationWindow = spawningParamsBlock[blockNames.SpawningParams.variationWindow] self.maxEpsilonWindow = spawningParamsBlock[blockNames.SpawningParams.maxEpsilonWindow] self.period = spawningParamsBlock.get(blockNames.SpawningParams.period, self.variationWindow) self.period += np.sign(np.abs(self.variationWindow-self.period)) if spawningType == blockNames.StringSpawningTypes.UCB: self.alpha = spawningParamsBlock.get(blockNames.SpawningParams.alpha, 8.0) if spawningType == blockNames.StringSpawningTypes.REAP: self.metricInd = spawningParamsBlock.get(blockNames.SpawningParams.metricsInd, -1) if spawningType == blockNames.StringSpawningTypes.independentMetric: self.reportCol = spawningParamsBlock[blockNames.SpawningParams.report_col]-1 self.condition = spawningParamsBlock.get(blockNames.SpawningParams.condition, blockNames.SpawningParams.minValue) if spawningType in spawningTypes.MSMSpawning: self.lagtime = spawningParamsBlock[blockNames.SpawningParams.lagtime] self.condition = spawningParamsBlock.get(blockNames.SpawningParams.condition, blockNames.SpawningParams.minValue) self.minPos = spawningParamsBlock.get(blockNames.SpawningParams.minPos) self.sasaColumn = spawningParamsBlock.get(blockNames.SpawningParams.SASA_column) if self.sasaColumn is not None: self.sasaColumn -= 1 class SpawningCalculator: def __init__(self): self.type = "BaseClass" @abstractmethod def calculate(self, clusters, trajToDivide, currentEpoch=None, outputPathConstants=None): pass @abstractmethod def log(self): pass @abstractmethod def createPlots(self, outputPathConstants, currentEpoch, clusters): pass def writeSpawningInitialStructures(self, outputPathConstants, degeneracyOfRepresentatives, clustering, iteration, topologies=None): tmpInitialStructuresTemplate = outputPathConstants.tmpInitialStructuresTemplate counts = 0 procMapping = [] for i, cluster in enumerate(clustering.clusters.clusters): for _ in range(int(degeneracyOfRepresentatives[i])): outputFilename = tmpInitialStructuresTemplate % (iteration, counts) print('Writing to ', outputFilename, 'cluster', i) procMapping.append(cluster.writeSpawningStructure(outputFilename)) counts += 1 print("counts & cluster centers", counts, np.where(np.array(degeneracyOfRepresentatives) > 0)[0].size) return counts, procMapping def divideTrajAccordingToWeights(self, weights, trajToDistribute): degeneracy = [] for i, weight in enumerate(weights): degeneracy.append(int(weight*trajToDistribute)) decimalPart = [] decimalPart = [math.modf(weight*trajToDistribute)[0] for weight in weights] sortedDecimals = np.argsort(decimalPart) sortedDecimals = sortedDecimals[::-1] leftProcessors = trajToDistribute-sum(degeneracy) for i in range(leftProcessors): degeneracy[sortedDecimals[i]] += 1 return degeneracy def divideProportionalToArray(self, array, trajToDistribute): if isinstance(array, list): array = np.array(array) weights = array/sum(array) return self.divideTrajAccordingToWeights(weights, trajToDistribute) def divideInverselyProportionalToArray(self, array, trajToDistribute): if isinstance(array, list): array = np.array(array) weights = 1./array weights[weights == np.inf] = 0 if weights.any(): weights /= sum(weights) else: weights[:] = 1./weights.shape[0] return self.divideTrajAccordingToWeights(weights, trajToDistribute) def getMetrics(self, clusters): metrics = np.zeros(len(clusters)) for i, cluster in enumerate(clusters): metrics[i] = cluster.getMetric() return metrics def shouldWriteStructures(self): return True class DensitySpawningCalculator(SpawningCalculator): def __init__(self, densityCalculator=densitycalculator.NullDensityCalculator()): SpawningCalculator.__init__(self) self.type = "BaseDensityClass" self.densityCalculator = densityCalculator def calculateDensities(self, clusters): densities = np.zeros(len(clusters)) for i, cluster in enumerate(clusters): contacts = cluster.getContacts() cluster.density = self.densityCalculator.calculate(contacts, cluster.contactThreshold) densities[i] = cluster.density return densities class IndependentRunsCalculator(SpawningCalculator): def __init__(self, parameters): SpawningCalculator.__init__(self) self.type = spawningTypes.SPAWNING_TYPES.independent self.parameters = parameters def writeSpawningInitialStructures(self, outputPathConstants, degeneracyOfRepresentatives, clustering, iteration, topologies=None): procMapping = [] trajWildcard = os.path.join(outputPathConstants.epochOutputPathTempletized, constants.trajectoryBasename) trajectories = utilities.getReportList(trajWildcard % (iteration-1)) for num, trajectory in enumerate(trajectories): snapshots = utilities.getSnapshots(trajectory) lastSnapshot = snapshots[-1] nSnapshots = len(snapshots) del snapshots numTraj = utilities.getReportNum(trajectory) outputFilename = outputPathConstants.tmpInitialStructuresTemplate % (iteration, num) procMapping.append((iteration-1, numTraj, nSnapshots-1)) if isinstance(lastSnapshot, basestring): with open(outputFilename, 'w') as f: f.write(lastSnapshot) else: utilities.write_mdtraj_object_PDB(lastSnapshot, outputFilename, topologies.getTopology(iteration-1, numTraj)) return len(trajectories), procMapping class IndependentMetricCalculator(SpawningCalculator): def __init__(self, parameters): SpawningCalculator.__init__(self) self.type = spawningTypes.SPAWNING_TYPES.independentMetric self.parameters = parameters def writeSpawningInitialStructures(self, outputPathConstants, degeneracyOfRepresentatives, clustering, iteration, topologies=None): procMapping = [] trajWildcard = os.path.join(outputPathConstants.epochOutputPathTempletized, constants.trajectoryBasename) trajectories = utilities.getReportList(trajWildcard % (iteration-1)) for num in range(len(trajectories)): reportFilename = os.path.join(outputPathConstants.epochOutputPathTempletized % (iteration-1), "%s_%d" % (self.parameters.reportFilename, num+1)) metric_array = np.genfromtxt(reportFilename, missing_values="--", filling_values=0) if len(metric_array.shape) < 2: metric_array = metric_array[np.newaxis, :] trajectory = utilities.getReportList("%s_%d.*" % (trajWildcard % (iteration-1), num+1)) assert len(trajectory) == 1, "Too many trajectories found in IndependentMetricCalculator" trajectory = trajectory[0] if self.parameters.condition == blockNames.SpawningParams.minValue: snapshot_ind = np.argmin(metric_array[:, self.parameters.reportCol]) else: snapshot_ind = np.argmax(metric_array[:, self.parameters.reportCol]) snapshots = utilities.getSnapshots(trajectory) snapshot = snapshots[snapshot_ind] del snapshots numTraj = int(os.path.splitext(trajectory.rsplit("_", 1)[-1])[0]) outputFilename = outputPathConstants.tmpInitialStructuresTemplate % (iteration, num) procMapping.append((iteration-1, numTraj, snapshot_ind)) if isinstance(snapshot, basestring): with open(outputFilename, 'w') as f: f.write(snapshot) else: utilities.write_mdtraj_object_PDB(snapshot, outputFilename, topologies.getTopology(iteration-1, numTraj)) return len(trajectories), procMapping class SameWeightDegeneracyCalculator(SpawningCalculator): def __init__(self, parameters): SpawningCalculator.__init__(self) self.type = spawningTypes.SPAWNING_TYPES.sameWeight self.parameters = parameters def calculate(self, clusters, trajToDistribute, currentEpoch=None, outputPathConstants=None): numClusters = len(clusters) trajToDistribute = min(trajToDistribute, numClusters) samples = random.sample(range(numClusters), trajToDistribute) degeneracy = [0] * len(clusters) for sample in samples: degeneracy[sample] = 1 return degeneracy def log(self): pass class InverselyProportionalToPopulationCalculator(DensitySpawningCalculator): def __init__(self, parameters, densityCalculator=densitycalculator.NullDensityCalculator()): DensitySpawningCalculator.__init__(self, densityCalculator) self.type = spawningTypes.SPAWNING_TYPES.inverselyProportional self.parameters = parameters def log(self): pass def calculate(self, clusters, trajToDistribute, currentEpoch=None, outputPathConstants=None): sizes = getSizes(clusters) densities = self.calculateDensities(clusters) if densities.any(): weights = sizes/densities else: weights = sizes argweights = weights.argsort() weights_trimmed = np.zeros(len(sizes)) + 1e6 weights_trimmed[argweights[:trajToDistribute]] = weights[argweights[:trajToDistribute]] return self.divideInverselyProportionalToArray(weights_trimmed, trajToDistribute) class EpsilonDegeneracyCalculator(DensitySpawningCalculator): def __init__(self, parameters, densityCalculator=densitycalculator.NullDensityCalculator()): DensitySpawningCalculator.__init__(self, densityCalculator) self.inverselyProportionalCalculator = InverselyProportionalToPopulationCalculator(parameters, densityCalculator) self.type = spawningTypes.SPAWNING_TYPES.epsilon self.degeneracyInverselyProportional = None self.degeneracyMetricProportional = None self.degeneracyTotal = None self.parameters = parameters def log(self): if self.degeneracyTotal is not None: print("[SpawningLog] Total: %s" % str(self.degeneracyTotal)) if self.degeneracyInverselyProportional is not None: print("[SpawningLog] Inversely prop: %s" % str(self.degeneracyInverselyProportional)) if self.degeneracyMetricProportional is not None: print("[SpawningLog] Metric prop: %s" % str(self.degeneracyMetricProportional)) def calculate(self, clusters, trajToDistribute, currentEpoch=None, outputPathConstants=None): trajToMetricProportional = int(self.parameters.epsilon * trajToDistribute) trajToInverselyProportional = trajToDistribute - trajToMetricProportional self.degeneracyInverselyProportional = self.inverselyProportionalCalculator.calculate(clusters, trajToInverselyProportional) self.degeneracyMetricProportional = self.divideProcessorsMetricProportional(clusters, trajToMetricProportional) self.degeneracyTotal = np.array(self.degeneracyInverselyProportional) + np.array(self.degeneracyMetricProportional) return self.degeneracyTotal.tolist()
MIT License
iovation/launchkey-python
launchkey/clients/service.py
ServiceClient.authorization_request
python
def authorization_request(self, user, context=None, policy=None, title=None, ttl=None, push_title=None, push_body=None, denial_reasons=None): kwargs = {'username': user} if context is not None: kwargs['context'] = context if title is not None: kwargs['title'] = title if ttl is not None: kwargs['ttl'] = ttl if push_title is not None: kwargs['push_title'] = push_title if push_body is not None: kwargs['push_body'] = push_body if policy is not None: if not isinstance(policy, AuthPolicy): raise InvalidParameters( "Please verify the input policy is a " "launchkey.entities.service.AuthPolicy class") kwargs['policy'] = policy.get_policy() if denial_reasons is not None: if not isinstance(denial_reasons, (list, set)): raise InvalidParameters( "Please ensure that input denial_reasons are a list of " "launchkey.entities.service.DenialReason classes.") parsed_reasons = [] for reason in denial_reasons: if not isinstance(reason, DenialReason): raise InvalidParameters( "Please verify that denial_reasons are " "launchkey.entities.service.DenialReason classes.") parsed_reasons.append( {"id": reason.denial_id, "reason": reason.reason, "fraud": reason.fraud} ) kwargs['denial_reasons'] = parsed_reasons response = self._transport.post("/service/v3/auths", self._subject, **kwargs) data = self._validate_response(response, AuthorizeValidator) return AuthorizationRequest(data.get('auth_request'), data.get('push_package'), data.get('device_ids'))
Authorize a transaction for the provided user. This get_service_service method would be utilized if you are using this as a secondary factor for user login or authorizing a single transaction within your application. This will NOT begin a user session. :param user: LaunchKey Username, User Push ID, or Directory User ID for the End User :param context: Arbitrary string of data up to 400 characters to be presented to the End User during authorization to provide context regarding the individual request :param policy: Authorization policy override for this authorization. The policy can only increase the security level any existing policy in the Service Profile. It can never reduce the security level of the Service Profile's policy. :param title: String of data up to 200 characters to be presented to the End User during authorization as the title of the individual authorization request :param ttl: Time for this authorization request to be valid. If no value is provided, the system default will be used. :param push_title: Title that will appear in the mobile authenticator's push message. This feature is only available for Directory Services that have push credentials configured. :param push_body: Body that will appear in the mobile authenticator's push message. This feature is only available for Directory Services that have push credentials configured. :param denial_reasons: List of denial reasons to present to the user if they deny the request. This list must include at least two items. At least one of the items must have a fraud value of false and at least one of the items must have a fraud value of true. If no denial_reasons are given the defaults will be used. If a list is provided and denial context inquiry is not enabled for the Directory, this request will error. This feature is only available for Directory Services. :raise: launchkey.exceptions.InvalidParameters - Input parameters were not correct :raise: launchkey.exceptions.InvalidPolicyInput - Input policy was not valid :raise: launchkey.exceptions.PolicyFailure - Auth creation failed due to user not passing policy :raise: launchkey.exceptions.EntityNotFound - Username was invalid or the user does not have any valid devices :raise: launchkey.exceptions.RateLimited - Too many authorization requests have been created for this user :raise: launchkey.exceptions.InvalidPolicy - The input policy is not valid. It should be a launchkey.clients.service.AuthPolicy. Please wait and try again. :raise: launchkey.exceptions.AuthorizationInProgress - Authorization request already exists for the requesting user. That request either needs to be responded to, expire out, or be canceled with cancel_authorization_request(). :return AuthorizationResponse: Unique identifier for tracking status of the authorization request
https://github.com/iovation/launchkey-python/blob/4e835da3f761e539a62289eda74ced12c1f5d36e/launchkey/clients/service.py#L81-L175
import warnings from json import loads from launchkey.exceptions import InvalidParameters, UnableToDecryptWebhookRequest, UnexpectedAuthorizationResponse, UnexpectedAPIResponse, UnexpectedWebhookRequest, XiovJWTValidationFailure, XiovJWTDecryptionFailure from launchkey.utils.shared import XiovJWTService, deprecated from launchkey.entities.validation import AuthorizationResponseValidator, AuthorizeSSEValidator, AuthorizeValidator, ServiceTOTPVerificationValidator from launchkey.entities.service import AuthPolicy, AuthorizationResponse, SessionEndRequest, AuthorizationRequest, AdvancedAuthorizationResponse, DenialReason from .base import BaseClient, api_call class ServiceClient(BaseClient): def __init__(self, subject_id, transport): super(ServiceClient, self).__init__('svc', subject_id, transport) self.x_iov_jwt_service = XiovJWTService(self._transport, self._subject) @api_call def authorize(self, user, context=None, policy=None, title=None, ttl=None, push_title=None, push_body=None): warnings.warn('This method has been deprecated and will be removed' ' in a future major release!', DeprecationWarning) auth = self.authorization_request(user, context, policy, title, ttl, push_title, push_body) return auth.auth_request @api_call
MIT License
trungdong/prov
src/prov/model.py
ProvActivity.wasEndedBy
python
def wasEndedBy(self, trigger, ender=None, time=None, attributes=None): self._bundle.end(self, trigger, ender, time, other_attributes=attributes) return self
Creates a new end record for this activity. :param trigger: Entity triggering the end of this activity. :param ender: Optionally extra activity to state a qualified end through which the trigger entity for the end is generated (default: None). :param time: Optional time for the end (default: None). Either a :py:class:`datetime.datetime` object or a string that can be parsed by :py:func:`dateutil.parser`. :param attributes: Optional other attributes as a dictionary or list of tuples to be added to the record optionally (default: None).
https://github.com/trungdong/prov/blob/15d33469305b7c8a8834a5da76b8ce71330077d6/src/prov/model.py#L785-L799
from collections import defaultdict from copy import deepcopy import datetime import io import itertools import logging import os import shutil import tempfile from urllib.parse import urlparse import dateutil.parser from prov import Error, serializers from prov.constants import * from prov.identifier import Identifier, QualifiedName, Namespace __author__ = "Trung Dong Huynh" __email__ = "trungdong@donggiang.com" logger = logging.getLogger(__name__) def _ensure_datetime(value): if isinstance(value, str): return dateutil.parser.parse(value) else: return value def parse_xsd_datetime(value): try: return dateutil.parser.parse(value) except ValueError: pass return None def parse_boolean(value): if value.lower() in ("false", "0"): return False elif value.lower() in ("true", "1"): return True else: return None DATATYPE_PARSERS = { datetime.datetime: parse_xsd_datetime, } XSD_DATATYPE_PARSERS = { XSD_STRING: str, XSD_DOUBLE: float, XSD_LONG: int, XSD_INT: int, XSD_BOOLEAN: parse_boolean, XSD_DATETIME: parse_xsd_datetime, XSD_ANYURI: Identifier, } def parse_xsd_types(value, datatype): return ( XSD_DATATYPE_PARSERS[datatype](value) if datatype in XSD_DATATYPE_PARSERS else None ) def first(a_set): return next(iter(a_set), None) def _ensure_multiline_string_triple_quoted(value): s = str(value) s = s.replace('"', '\\"') if "\n" in s: return '"""%s"""' % s else: return '"%s"' % s def encoding_provn_value(value): if isinstance(value, str): return _ensure_multiline_string_triple_quoted(value) elif isinstance(value, datetime.datetime): return '"{0}" %% xsd:dateTime'.format(value.isoformat()) elif isinstance(value, float): return '"%g" %%%% xsd:float' % value elif isinstance(value, bool): return '"%i" %%%% xsd:boolean' % value else: return str(value) class Literal(object): def __init__(self, value, datatype=None, langtag=None): self._value = str(value) if langtag: if datatype is None: logger.debug( "Assuming prov:InternationalizedString as the type of " '"%s"@%s' % (value, langtag) ) datatype = PROV["InternationalizedString"] elif datatype != PROV["InternationalizedString"]: logger.warning( 'Invalid data type (%s) for "%s"@%s, overridden as ' "prov:InternationalizedString." % (datatype, value, langtag) ) datatype = PROV["InternationalizedString"] self._datatype = datatype self._langtag = str(langtag) if langtag is not None else None def __str__(self): return self.provn_representation() def __repr__(self): return "<Literal: %s>" % self.provn_representation() def __eq__(self, other): return ( ( self._value == other.value and self._datatype == other.datatype and self._langtag == other.langtag ) if isinstance(other, Literal) else False ) def __ne__(self, other): return not (self == other) def __hash__(self): return hash((self._value, self._datatype, self._langtag)) @property def value(self): return self._value @property def datatype(self): return self._datatype @property def langtag(self): return self._langtag def has_no_langtag(self): return self._langtag is None def provn_representation(self): if self._langtag: return "%s@%s" % ( _ensure_multiline_string_triple_quoted(self._value), str(self._langtag), ) else: return "%s %%%% %s" % ( _ensure_multiline_string_triple_quoted(self._value), str(self._datatype), ) class ProvException(Error): pass class ProvWarning(Warning): pass class ProvExceptionInvalidQualifiedName(ProvException): qname = None def __init__(self, qname): self.qname = qname def __str__(self): return "Invalid Qualified Name: %s" % self.qname class ProvElementIdentifierRequired(ProvException): def __str__(self): return ( "An identifier is missing. All PROV elements require a valid " "identifier." ) class ProvRecord(object): FORMAL_ATTRIBUTES = () _prov_type = None def __init__(self, bundle, identifier, attributes=None): self._bundle = bundle self._identifier = identifier self._attributes = defaultdict(set) if attributes: self.add_attributes(attributes) def __hash__(self): return hash((self.get_type(), self._identifier, frozenset(self.attributes))) def copy(self): return PROV_REC_CLS[self.get_type()]( self._bundle, self.identifier, self.attributes ) def get_type(self): return self._prov_type def get_asserted_types(self): return self._attributes[PROV_TYPE] def add_asserted_type(self, type_identifier): self._attributes[PROV_TYPE].add(type_identifier) def get_attribute(self, attr_name): attr_name = self._bundle.valid_qualified_name(attr_name) return self._attributes[attr_name] @property def identifier(self): return self._identifier @property def attributes(self): return [ (attr_name, value) for attr_name, values in self._attributes.items() for value in values ] @property def args(self): return tuple( first(self._attributes[attr_name]) for attr_name in self.FORMAL_ATTRIBUTES ) @property def formal_attributes(self): return tuple( (attr_name, first(self._attributes[attr_name])) for attr_name in self.FORMAL_ATTRIBUTES ) @property def extra_attributes(self): return [ (attr_name, attr_value) for attr_name, attr_value in self.attributes if attr_name not in self.FORMAL_ATTRIBUTES ] @property def bundle(self): return self._bundle @property def label(self): return ( first(self._attributes[PROV_LABEL]) if self._attributes[PROV_LABEL] else self._identifier ) @property def value(self): return self._attributes[PROV_VALUE] def _auto_literal_conversion(self, literal): if isinstance(literal, ProvRecord): literal = literal.identifier if isinstance(literal, str): return str(literal) elif isinstance(literal, QualifiedName): return self._bundle.valid_qualified_name(literal) elif isinstance(literal, Literal) and literal.has_no_langtag(): if literal.datatype: value = parse_xsd_types(literal.value, literal.datatype) else: value = self._auto_literal_conversion(literal.value) if value is not None: return value return literal def add_attributes(self, attributes): if attributes: if isinstance(attributes, dict): attributes = attributes.items() if PROV_ATTR_COLLECTION in [_i[0] for _i in attributes]: is_collection = True else: is_collection = False for attr_name, original_value in attributes: if original_value is None: continue attr = self._bundle.valid_qualified_name(attr_name) if attr is None: raise ProvExceptionInvalidQualifiedName(attr_name) if attr in PROV_ATTRIBUTE_QNAMES: qname = ( original_value.identifier if isinstance(original_value, ProvRecord) else original_value ) value = self._bundle.valid_qualified_name(qname) elif attr in PROV_ATTRIBUTE_LITERALS: value = ( original_value if isinstance(original_value, datetime.datetime) else parse_xsd_datetime(original_value) ) else: value = self._auto_literal_conversion(original_value) if value is None: raise ProvException( "Invalid value for attribute %s: %s" % (attr, original_value) ) if ( not is_collection and attr in PROV_ATTRIBUTES and self._attributes[attr] ): existing_value = first(self._attributes[attr]) is_not_same_value = True try: is_not_same_value = value != existing_value except TypeError: pass if is_not_same_value: raise ProvException( "Cannot have more than one value for attribute %s" % attr ) else: continue self._attributes[attr].add(value) def __eq__(self, other): if not isinstance(other, ProvRecord): return False if self.get_type() != other.get_type(): return False if self._identifier and not (self._identifier == other._identifier): return False return set(self.attributes) == set(other.attributes) def __str__(self): return self.get_provn() def get_provn(self): items = [] relation_id = "" if self._identifier: identifier = str(self._identifier) if self.is_element(): items.append(identifier) else: relation_id = identifier + "; " for attr in self.FORMAL_ATTRIBUTES: if attr in self._attributes and self._attributes[attr]: value = first(self._attributes[attr]) items.append( value.isoformat() if isinstance(value, datetime.datetime) else str(value) ) else: items.append("-") extra = [] for attr in self._attributes: if attr not in self.FORMAL_ATTRIBUTES: for value in self._attributes[attr]: try: provn_represenation = value.provn_representation() except AttributeError: provn_represenation = encoding_provn_value(value) extra.append("%s=%s" % (str(attr), provn_represenation)) if extra: items.append("[%s]" % ", ".join(extra)) prov_n = "%s(%s%s)" % ( PROV_N_MAP[self.get_type()], relation_id, ", ".join(items), ) return prov_n def is_element(self): return False def is_relation(self): return False class ProvElement(ProvRecord): def __init__(self, bundle, identifier, attributes=None): if identifier is None: raise ProvElementIdentifierRequired() super(ProvElement, self).__init__(bundle, identifier, attributes) def is_element(self): return True def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self._identifier) class ProvRelation(ProvRecord): def is_relation(self): return True def __repr__(self): identifier = " %s" % self._identifier if self._identifier else "" element_1, element_2 = [qname for _, qname in self.formal_attributes[:2]] return "<%s:%s (%s, %s)>" % ( self.__class__.__name__, identifier, element_1, element_2, ) class ProvEntity(ProvElement): _prov_type = PROV_ENTITY def wasGeneratedBy(self, activity, time=None, attributes=None): self._bundle.generation(self, activity, time, other_attributes=attributes) return self def wasInvalidatedBy(self, activity, time=None, attributes=None): self._bundle.invalidation(self, activity, time, other_attributes=attributes) return self def wasDerivedFrom( self, usedEntity, activity=None, generation=None, usage=None, attributes=None ): self._bundle.derivation( self, usedEntity, activity, generation, usage, other_attributes=attributes ) return self def wasAttributedTo(self, agent, attributes=None): self._bundle.attribution(self, agent, other_attributes=attributes) return self def alternateOf(self, alternate2): self._bundle.alternate(self, alternate2) return self def specializationOf(self, generalEntity): self._bundle.specialization(self, generalEntity) return self def hadMember(self, entity): self._bundle.membership(self, entity) return self class ProvActivity(ProvElement): FORMAL_ATTRIBUTES = (PROV_ATTR_STARTTIME, PROV_ATTR_ENDTIME) _prov_type = PROV_ACTIVITY def set_time(self, startTime=None, endTime=None): if startTime is not None: self._attributes[PROV_ATTR_STARTTIME] = {startTime} if endTime is not None: self._attributes[PROV_ATTR_ENDTIME] = {endTime} def get_startTime(self): values = self._attributes[PROV_ATTR_STARTTIME] return first(values) if values else None def get_endTime(self): values = self._attributes[PROV_ATTR_ENDTIME] return first(values) if values else None def used(self, entity, time=None, attributes=None): self._bundle.usage(self, entity, time, other_attributes=attributes) return self def wasInformedBy(self, informant, attributes=None): self._bundle.communication(self, informant, other_attributes=attributes) return self def wasStartedBy(self, trigger, starter=None, time=None, attributes=None): self._bundle.start(self, trigger, starter, time, other_attributes=attributes) return self
MIT License
mpeven/ntu_rgb
opengl_viewer/opengl_viewer.py
OpenGlViewer.view
python
def view(self): glutInit() glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH) glutInitWindowSize(width, height) glutInitWindowPosition(0, 0) glutCreateWindow("Optical Flow Viewer") glutMouseFunc(self.mouse_button) glutMotionFunc(self.mouse_motion) glutDisplayFunc(self.draw) glutIdleFunc(self.draw) glutReshapeFunc(self.reshape_func) glutKeyboardFunc(self.key_pressed) glutSpecialFunc(self.sp_key_pressed) glClearColor(0., 0., 0., 1.) glClearDepth(1.0) glEnable(GL_DEPTH_TEST) glDepthFunc(GL_LESS) glEnable(GL_COLOR_MATERIAL) glEnable(GL_TEXTURE_2D) glShadeModel(GL_SMOOTH) glutMainLoop()
Main function to create window and register callbacks for displaying
https://github.com/mpeven/ntu_rgb/blob/4a8b43c521500907d2f241e4b440381cf8c62350/opengl_viewer/opengl_viewer.py#L139-L167
import sys import datetime as dt import time import numpy as np from tqdm import tqdm from PIL import Image from OpenGL.GLUT import * from OpenGL.GLU import * from OpenGL.GL import * from opengl_viewer.voxel_flow import Voxel_Flow_3D from opengl_viewer.optical_flow import Optical_flow_3D from opengl_viewer.camera import Camera from opengl_viewer.shapes import * width, height = 1000, 800 step_size0 = 0.05 rotation_angle0 = 2 * np.pi/180 class OpenGlViewer: def __init__(self, op_flow, record=False): self.record = record self.last_frame_change = time.time() + 5 self.last_draw = time.time() self.frame = 0 self.draw_fps = 20 self.fps = 0 self.last_key = None self.last_key_t = dt.datetime.now() self.camera = Camera() self.step_size = step_size0 self.rotation_angle = rotation_angle0 self.quadric = gluNewQuadric() self.op_flow = Voxel_Flow_3D(op_flow) self.num_frames = self.op_flow.num_frames self.buffers = None def draw(self): if self.frame == self.get_frame(): glMatrixMode(GL_MODELVIEW) glLoadIdentity() gluLookAt(*self.camera.get_viewing_matrix()) return glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glMatrixMode(GL_MODELVIEW) glLoadIdentity() gluLookAt(*self.camera.get_viewing_matrix()) self.set_fps() self.draw_axes() if self.buffers is None: self.create_vbo() self.draw_vbo() glFlush() glutSwapBuffers() if self.record: screenshot = glReadPixels(0,0,width,height,GL_RGB,GL_UNSIGNED_BYTE) image = Image.frombytes("RGB", (width, height), screenshot) image = image.transpose(Image.FLIP_TOP_BOTTOM) import glob frame_num = len(glob.glob('/Users/mpeven/Projects/Activity_Recognition/screenshots/*')) image.save('screenshots/frame_{:05}.jpg'.format(frame_num)) print(self.frame) if self.frame == (self.num_frames - 1): glutLeaveMainLoop() def create_vbo(self): self.buffers = glGenBuffers(self.op_flow.num_frames * 3) for frame in tqdm(range(self.op_flow.num_frames), "Sending VBO to GPU"): glBindBuffer(GL_ARRAY_BUFFER, self.buffers[frame*3]) glBufferData(GL_ARRAY_BUFFER, len(self.op_flow.get_colors(frame))*4, (ctypes.c_float*len(self.op_flow.get_colors(frame)))(*self.op_flow.get_colors(frame)), GL_DYNAMIC_DRAW) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.buffers[1 + frame*3]) glBufferData(GL_ELEMENT_ARRAY_BUFFER, len(self.op_flow.get_indices(frame))*4, (ctypes.c_uint*len(self.op_flow.get_indices(frame)))(*self.op_flow.get_indices(frame)), GL_DYNAMIC_DRAW) glBindBuffer(GL_ARRAY_BUFFER, self.buffers[2 + frame*3]) glBufferData(GL_ARRAY_BUFFER, len(self.op_flow.get_vertices(frame))*4, (ctypes.c_float*len(self.op_flow.get_vertices(frame)))(*self.op_flow.get_vertices(frame)), GL_DYNAMIC_DRAW) def draw_vbo(self): frame = self.get_frame() glEnableClientState(GL_VERTEX_ARRAY) glEnableClientState(GL_COLOR_ARRAY) glBindBuffer(GL_ARRAY_BUFFER, self.buffers[2 + frame*3]) glVertexPointer(3, GL_FLOAT, 0, None) glBindBuffer(GL_ARRAY_BUFFER, self.buffers[0 + frame*3]) glColorPointer(3, GL_FLOAT, 0, None) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.buffers[1 + frame*3]) glDrawElements(GL_TRIANGLES, len(self.op_flow.get_indices(frame)), GL_UNSIGNED_INT, None) glDisableClientState(GL_COLOR_ARRAY) glDisableClientState(GL_VERTEX_ARRAY)
MIT License
ucaiado/rl_trading
market_sim/_agents/dissertation_tests.py
QLearningAgent1.__init__
python
def __init__(self, env, i_id, d_normalizers, d_ofi_scale, f_min_time=3600., f_gamma=0.5, f_alpha=0.5, i_numOfTilings=16, s_decay_fun=None, f_ttoupdate=5., d_initial_pos={}, s_hedging_on='DI1F19', b_hedging=True, b_keep_pos=True): super(QLearningAgent1, self).__init__(env, i_id, d_normalizers, d_ofi_scale, f_min_time, f_gamma, f_alpha, i_numOfTilings, s_decay_fun, f_ttoupdate, d_initial_pos, s_hedging_on, b_hedging, b_keep_pos) self.s_agent_name = 'QLearningAgent1' self.features_names = ['position']
Initialize a QLearningAgent. Save all parameters as attributes :param env: Environment Object. The Environment where the agent acts :param i_id: integer. Agent id :param d_normalizers: dictionary. The maximum range of each feature :param f_min_time*: float. Minimum time in seconds to the agent react :param f_gamma*: float. weight of delayed versus immediate rewards :param f_alpha*: the initial learning rate used :param i_numOfTilings*: unmber of tiling desired :param s_decay_fun*: string. The exploration factor decay function :param f_ttoupdate*. float. time in seconds to choose a diferent action
https://github.com/ucaiado/rl_trading/blob/f4168c69f44fe5a11a06461387d4591426a43735/market_sim/_agents/dissertation_tests.py#L32-L58
from agent_rl import QLearningAgent class QLearningAgent1(QLearningAgent): actions_to_open = [None]
Apache License 2.0
datitran/object_detector_app
object_detection/core/data_decoder.py
DataDecoder.Decode
python
def Decode(self, data): pass
Return a single image and associated labels. Args: data: a string tensor holding a serialized protocol buffer corresponding to data for a single image. Returns: tensor_dict: a dictionary containing tensors. Possible keys are defined in reader.Fields.
https://github.com/datitran/object_detector_app/blob/44e8eddeb931cced5d8cf1e283383c720a5706bf/object_detection/core/data_decoder.py#L31-L42
from abc import ABCMeta from abc import abstractmethod class DataDecoder(object): __metaclass__ = ABCMeta @abstractmethod
MIT License
airbus-seclab/rebus
rebus/storage.py
Storage.processed_stats
python
def processed_stats(self, domain): raise NotImplementedError
Returns a list of couples, (agent names, number of processed selectors) and the total amount of selectors in this domain.
https://github.com/airbus-seclab/rebus/blob/1d83e50cf4fde563e09c856a658051a9dbab0622/rebus/storage.py#L197-L202
from rebus.tools.registry import Registry import threading import re import sqlite3 class StorageRegistry(Registry): pass class Storage(object): _name_ = "Storage" _desc_ = "N/A" STORES_INTSTATE = False @staticmethod def register(f): return StorageRegistry.register_ref(f, key="_name_") def __init__(self, options=None): pass def find(self, domain, selector_regex, limit=0, offset=0): raise NotImplementedError def find_by_selector(self, domain, selector_prefix, limit=0, offset=0): raise NotImplementedError def find_by_uuid(self, domain, uuid): raise NotImplementedError def find_by_value(self, domain, selector_prefix, value_regex): raise NotImplementedError def list_uuids(self, domain): raise NotImplementedError def get_descriptor(self, domain, selector): raise NotImplementedError def get_value(self, domain, selector): raise NotImplementedError def get_children(self, domain, selector, recurse=True): raise NotImplementedError def add(self, descriptor): raise NotImplementedError def mark_processed(self, domain, selector, agent_name, config_txt): raise NotImplementedError def mark_processable(self, domain, selector, agent_name, config_txt): raise NotImplementedError def get_processed(self, domain, selector): raise NotImplementedError def get_processable(self, domain, selector): raise NotImplementedError
BSD 2-Clause Simplified License
qahive/robotframework-puppeteer
PuppeteerLibrary/keywords/waiting.py
WaitingKeywords.wait_until_element_finished_animating
python
def wait_until_element_finished_animating(self, selenium_locator, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_until_element_finished_animating(selenium_locator, timeout))
Waits until the specific element is finished animating. Check by check element position.
https://github.com/qahive/robotframework-puppeteer/blob/fba8f5c71dcec0a778a9ed22129bf1dc5e8ef1c3/PuppeteerLibrary/keywords/waiting.py#L211-L217
from PuppeteerLibrary.base.robotlibcore import keyword from PuppeteerLibrary.ikeywords.iwaiting_async import iWaitingAsync from PuppeteerLibrary.base.librarycomponent import LibraryComponent class WaitingKeywords(LibraryComponent): def __init__(self, ctx): super().__init__(ctx) def get_async_keyword_group(self) -> iWaitingAsync: return self.ctx.get_current_library_context().get_async_keyword_group(type(self).__name__) @keyword def wait_for_request_url(self, url, method='GET', body=None, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_for_request_url(url, method, body, timeout)) @keyword def wait_for_response_url(self, url, status=200, body=None, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_for_response_url(url, status, body, timeout)) @keyword def wait_for_navigation(self, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_for_navigation(timeout)) @keyword def wait_until_page_contains_element(self, locator, timeout=None): self.loop.run_until_complete(self.get_async_keyword_group().wait_until_page_contains_element(locator, timeout)) @keyword def wait_until_element_is_hidden(self, locator, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_until_element_is_hidden(locator, timeout)) @keyword def wait_until_element_is_visible(self, locator, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_until_element_is_visible(locator, timeout)) @keyword def wait_until_page_contains(self, text, timeout=None): self.loop.run_until_complete(self.get_async_keyword_group().wait_until_page_contains(text, timeout)) @keyword def wait_until_page_does_not_contains(self, text, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_until_page_does_not_contains(text, timeout)) @keyword def wait_until_element_contains(self, locator, text, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_until_element_contains(locator, text, timeout)) @keyword def wait_until_element_does_not_contains(self, locator, text, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_until_element_does_not_contains(locator, text, timeout)) @keyword def wait_until_location_contains(self, expected, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_until_location_contains(expected, timeout)) @keyword def wait_until_location_does_not_contains(self, expected, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_until_location_does_not_contains(expected, timeout)) @keyword def wait_until_element_is_enabled(self, selenium_locator, timeout=None): return self.loop.run_until_complete(self.get_async_keyword_group().wait_until_element_is_enabled(selenium_locator, timeout)) @keyword
Apache License 2.0
simpeg/simpeg
SimPEG/flow/richards/empirical.py
Haverkamp_theta.derivM
python
def derivM(self, u): return ( self._derivTheta_r(u) + self._derivTheta_s(u) + self._derivAlpha(u) + self._derivBeta(u) )
derivative with respect to m .. code:: import sympy as sy alpha, u, beta, theta_r, theta_s = sy.symbols( 'alpha u beta theta_r theta_s', real=True ) f_n = ( alpha * (theta_s - theta_r) / (alpha + abs(u)**beta) + theta_r )
https://github.com/simpeg/simpeg/blob/a264ba6a32ba3c83d82601add37f51d8e1cc5e90/SimPEG/flow/richards/empirical.py#L117-L140
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import scipy.sparse as sp from scipy import constants from ... import utils, props def _get_projections(u): nP = len(u) bools = u >= 0 ind_p = np.where(bools)[0] ind_n = np.where(~bools)[0] P_p = sp.csr_matrix((np.ones(len(ind_p)), (ind_p, ind_p)), shape=(nP, nP)) P_n = sp.csr_matrix((np.ones(len(ind_n)), (ind_n, ind_n)), shape=(nP, nP)) return P_p, P_n def _partition_args(mesh, Hcond, Theta, hcond_args, theta_args, **kwargs): hcond_params = {k: kwargs[k] for k in kwargs if k in hcond_args} theta_params = {k: kwargs[k] for k in kwargs if k in theta_args} other_params = {k: kwargs[k] for k in kwargs if k not in hcond_args + theta_args} if len(other_params) > 0: raise Exception("Unknown parameters: {}".format(other_params)) hcond = Hcond(mesh, **hcond_params) theta = Theta(mesh, **theta_params) return hcond, theta class NonLinearModel(props.HasModel): counter = None mesh = None def __init__(self, mesh, **kwargs): self.mesh = mesh super(NonLinearModel, self).__init__(**kwargs) @property def nP(self): return self.mesh.nC class BaseWaterRetention(NonLinearModel): def plot(self, ax=None): import matplotlib.pyplot as plt if ax is None: plt.figure() ax = plt.subplot(111) self.validate() h = -np.logspace(-2, 3, 1000) ax.semilogx(-h, self(h)) ax.set_title("Water retention curve") ax.set_xlabel("Soil water potential, $- \psi$") ax.set_ylabel("Water content, $\\theta$") class BaseHydraulicConductivity(NonLinearModel): def plot(self, ax=None): import matplotlib.pyplot as plt if ax is None: plt.figure() ax = plt.subplot(111) self.validate() h = -np.logspace(-2, 3, 1000) ax.loglog(-h, self(h)) ax.set_title("Hydraulic conductivity function") ax.set_xlabel("Soil water potential, $- \psi$") ax.set_ylabel("Hydraulic conductivity, $K$") class Haverkamp_theta(BaseWaterRetention): theta_r, theta_rMap, theta_rDeriv = props.Invertible( "residual water content [L3L-3]", default=0.075 ) theta_s, theta_sMap, theta_sDeriv = props.Invertible( "saturated water content [L3L-3]", default=0.287 ) alpha, alphaMap, alphaDeriv = props.Invertible("", default=1.611e06) beta, betaMap, betaDeriv = props.Invertible("", default=3.96) def _get_params(self): return self.theta_r, self.theta_s, self.alpha, self.beta def __call__(self, u): theta_r, theta_s, alpha, beta = self._get_params() f = alpha * (theta_s - theta_r) / (alpha + abs(u) ** beta) + theta_r if np.isscalar(theta_s): f[u >= 0] = theta_s else: f[u >= 0] = theta_s[u >= 0] return f
MIT License
gnosis/safe-transaction-service
safe_transaction_service/contracts/tasks.py
reindex_contracts_without_metadata_task
python
def reindex_contracts_without_metadata_task() -> int: try: i = 0 for address in Contract.objects.without_metadata().values_list( "address", flat=True ): logger.info("Reindexing contract %s", address) create_or_update_contract_with_metadata_task.apply_async( (address,), priority=0 ) i += 1 return i finally: close_gevent_db_connection()
Try to reindex existing contracts without metadata :return: Number of contracts missing
https://github.com/gnosis/safe-transaction-service/blob/b26efe58f1c4bf89c5461c38065bc3c51ba4af04/safe_transaction_service/contracts/tasks.py#L41-L59
from django.db import IntegrityError, transaction from celery import app from celery.utils.log import get_task_logger from eth_typing import ChecksumAddress from gnosis.eth.clients import EtherscanRateLimitError from safe_transaction_service.history.models import MultisigTransaction from safe_transaction_service.utils.ethereum import get_ethereum_network from safe_transaction_service.utils.utils import close_gevent_db_connection from .models import Contract logger = get_task_logger(__name__) @app.shared_task() def create_missing_contracts_with_metadata_task() -> int: try: i = 0 for ( address ) in MultisigTransaction.objects.not_indexed_metadata_contract_addresses(): logger.info("Detected missing contract %s", address) create_or_update_contract_with_metadata_task.apply_async( (address,), priority=0 ) i += 1 return i finally: close_gevent_db_connection() @app.shared_task()
MIT License
openstack/keystone
keystone/server/flask/request_processing/middleware/auth_context.py
AuthContextMiddleware.factory
python
def factory(cls, global_config, **local_config): def _factory(app): conf = global_config.copy() conf.update(local_config) return cls(app, **local_config) return _factory
Used for loading in middleware (holdover from paste.deploy).
https://github.com/openstack/keystone/blob/1e7ecca881a51144d61ae8026e1a77d6669997e2/keystone/server/flask/request_processing/middleware/auth_context.py#L482-L488
import functools import itertools import re import wsgiref.util import http.client from keystonemiddleware import auth_token import oslo_i18n from oslo_log import log from oslo_serialization import jsonutils import webob.dec import webob.exc from keystone.common import authorization from keystone.common import context from keystone.common import provider_api from keystone.common import render_token from keystone.common import tokenless_auth from keystone.common import utils import keystone.conf from keystone import exception from keystone.federation import constants as federation_constants from keystone.federation import utils as federation_utils from keystone.i18n import _ from keystone.models import token_model CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs CONTEXT_ENV = 'openstack.context' __all__ = ('AuthContextMiddleware',) CONF = keystone.conf.CONF LOG = log.getLogger(__name__) JSON_ENCODE_CONTENT_TYPES = set(['application/json', 'application/json-home']) ACCESS_RULES_MIN_VERSION = token_model.ACCESS_RULES_MIN_VERSION def best_match_language(req): if not req.accept_language: return None return req.accept_language.best_match( oslo_i18n.get_available_languages('keystone')) def base_url(context): url = CONF['public_endpoint'] if url: substitutions = dict( itertools.chain(CONF.items(), CONF.eventlet_server.items())) url = url % substitutions elif 'environment' in context: url = wsgiref.util.application_uri(context['environment']) url = re.sub(r'/v(3|(2\.0))/*$', '', url) url = utils.remove_standard_port(url) else: url = 'http://localhost:%d' % CONF.eventlet_server.public_port return url.rstrip('/') def middleware_exceptions(method): @functools.wraps(method) def _inner(self, request): try: return method(self, request) except exception.Error as e: LOG.warning(e) return render_exception(e, request=request, user_locale=best_match_language(request)) except TypeError as e: LOG.exception(e) return render_exception(exception.ValidationError(e), request=request, user_locale=best_match_language(request)) except Exception as e: LOG.exception(e) return render_exception(exception.UnexpectedError(exception=e), request=request, user_locale=best_match_language(request)) return _inner def render_response(body=None, status=None, headers=None, method=None): if headers is None: headers = [] else: headers = list(headers) headers.append(('Vary', 'X-Auth-Token')) if body is None: body = b'' status = status or (http.client.NO_CONTENT, http.client.responses[http.client.NO_CONTENT]) else: content_types = [v for h, v in headers if h == 'Content-Type'] if content_types: content_type = content_types[0] else: content_type = None if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES: body = jsonutils.dump_as_bytes(body, cls=utils.SmarterEncoder) if content_type is None: headers.append(('Content-Type', 'application/json')) status = status or (http.client.OK, http.client.responses[http.client.OK]) def _convert_to_str(headers): str_headers = [] for header in headers: str_header = [] for value in header: if not isinstance(value, str): str_header.append(str(value)) else: str_header.append(value) str_headers.append(tuple(str_header)) return str_headers headers = _convert_to_str(headers) resp = webob.Response(body=body, status='%d %s' % status, headerlist=headers, charset='utf-8') if method and method.upper() == 'HEAD': stored_headers = resp.headers.copy() resp.body = b'' for header, value in stored_headers.items(): resp.headers[header] = value return resp def render_exception(error, context=None, request=None, user_locale=None): error_message = error.args[0] message = oslo_i18n.translate(error_message, desired_locale=user_locale) if message is error_message: message = str(message) body = {'error': { 'code': error.code, 'title': error.title, 'message': message, }} headers = [] if isinstance(error, exception.AuthPluginException): body['error']['identity'] = error.authentication elif isinstance(error, exception.Unauthorized): local_context = {} if request: local_context = {'environment': request.environ} elif context and 'environment' in context: local_context = {'environment': context['environment']} url = base_url(local_context) headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url)) return render_response(status=(error.code, error.title), body=body, headers=headers) class AuthContextMiddleware(provider_api.ProviderAPIMixin, auth_token.BaseAuthProtocol): kwargs_to_fetch_token = True def __init__(self, app): super(AuthContextMiddleware, self).__init__(app, log=LOG, service_type='identity') self.token = None def fetch_token(self, token, **kwargs): try: self.token = self.token_provider_api.validate_token( token, access_rules_support=ACCESS_RULES_MIN_VERSION) return render_token.render_token_response_from_model(self.token) except exception.TokenNotFound: raise auth_token.InvalidToken(_('Could not find token')) def _build_tokenless_auth_context(self, request): tokenless_helper = tokenless_auth.TokenlessAuthHelper(request.environ) (domain_id, project_id, trust_ref, unscoped, system) = ( tokenless_helper.get_scope()) user_ref = tokenless_helper.get_mapped_user( project_id, domain_id) if user_ref['type'] == federation_utils.UserType.EPHEMERAL: auth_context = {} auth_context['group_ids'] = user_ref['group_ids'] auth_context[federation_constants.IDENTITY_PROVIDER] = ( user_ref[federation_constants.IDENTITY_PROVIDER]) auth_context[federation_constants.PROTOCOL] = ( user_ref[federation_constants.PROTOCOL]) if domain_id and project_id: msg = _('Scoping to both domain and project is not allowed') raise ValueError(msg) if domain_id: auth_context['domain_id'] = domain_id if project_id: auth_context['project_id'] = project_id auth_context['roles'] = user_ref['roles'] else: token = token_model.TokenModel() token.user_id = user_ref['id'] token.methods = [CONF.tokenless_auth.protocol] token.domain_id = domain_id token.project_id = project_id auth_context = {'user_id': user_ref['id']} auth_context['is_delegated_auth'] = False if domain_id: auth_context['domain_id'] = domain_id if project_id: auth_context['project_id'] = project_id auth_context['roles'] = [role['name'] for role in token.roles] return auth_context def _validate_trusted_issuer(self, request): if not CONF.tokenless_auth.trusted_issuer: return False issuer = request.environ.get(CONF.tokenless_auth.issuer_attribute) if not issuer: msg = ('Cannot find client issuer in env by the ' 'issuer attribute - %s.') LOG.info(msg, CONF.tokenless_auth.issuer_attribute) return False if issuer in CONF.tokenless_auth.trusted_issuer: return True msg = ('The client issuer %(client_issuer)s does not match with ' 'the trusted issuer %(trusted_issuer)s') LOG.info( msg, {'client_issuer': issuer, 'trusted_issuer': CONF.tokenless_auth.trusted_issuer}) return False @middleware_exceptions def process_request(self, request): context_env = request.environ.get(CONTEXT_ENV, {}) token = request.headers.get(authorization.AUTH_TOKEN_HEADER) if CONF.admin_token and (token == CONF.admin_token): context_env['is_admin'] = True LOG.warning( "The use of the '[DEFAULT] admin_token' configuration" "option presents a significant security risk and should " "not be set. This option is deprecated in favor of using " "'keystone-manage bootstrap' and will be removed in a " "future release.") request.environ[CONTEXT_ENV] = context_env if not context_env.get('is_admin', False): resp = super(AuthContextMiddleware, self).process_request(request) if resp: return resp if request.token_auth.user is not None: request.set_user_headers(request.token_auth.user) self.fill_context(request) def _keystone_specific_values(self, token, request_context): request_context.token_reference = ( render_token.render_token_response_from_model(token) ) if token.domain_scoped: request_context.is_admin_project = False request_context.domain_id = token.domain_id request_context.domain_name = token.domain['name'] if token.oauth_scoped: request_context.is_delegated_auth = True request_context.oauth_consumer_id = ( token.access_token['consumer_id'] ) request_context.oauth_access_token_id = token.access_token_id if token.trust_scoped: request_context.is_delegated_auth = True request_context.trust_id = token.trust_id if token.is_federated: request_context.group_ids = [] for group in token.federated_groups: request_context.group_ids.append(group['id']) else: request_context.group_ids = [] def fill_context(self, request): if authorization.AUTH_CONTEXT_ENV in request.environ: msg = ('Auth context already exists in the request ' 'environment; it will be used for authorization ' 'instead of creating a new one.') LOG.warning(msg) return kwargs = { 'authenticated': False, 'overwrite': True} request_context = context.RequestContext.from_environ( request.environ, **kwargs) request.environ[context.REQUEST_CONTEXT_ENV] = request_context if request.environ.get(CONTEXT_ENV, {}).get('is_admin', False): request_context.is_admin = True auth_context = {} elif request.token_auth.has_user_token: if not self.token: self.token = PROVIDERS.token_provider_api.validate_token( request.user_token, access_rules_support=request.headers.get( authorization.ACCESS_RULES_HEADER) ) self._keystone_specific_values(self.token, request_context) request_context.auth_token = request.user_token auth_context = request_context.to_policy_values() additional = { 'trust_id': request_context.trust_id, 'trustor_id': request_context.trustor_id, 'trustee_id': request_context.trustee_id, 'domain_id': request_context._domain_id, 'domain_name': request_context.domain_name, 'group_ids': request_context.group_ids, 'token': self.token } auth_context.update(additional) elif self._validate_trusted_issuer(request): auth_context = self._build_tokenless_auth_context(request) token_attributes = frozenset(( 'user_id', 'project_id', 'domain_id', 'user_domain_id', 'project_domain_id', 'user_domain_name', 'project_domain_name', 'roles', 'is_admin', 'project_name', 'domain_name', 'system_scope', 'is_admin_project', 'service_user_id', 'service_user_name', 'service_project_id', 'service_project_name', 'service_user_domain_id' 'service_user_domain_name', 'service_project_domain_id', 'service_project_domain_name', 'service_roles')) for attr in token_attributes: if attr in auth_context: setattr(request_context, attr, auth_context[attr]) request_context.token_reference = {'token': None} else: return request_context.authenticated = True LOG.debug('RBAC: auth_context: %s', auth_context) request.environ[authorization.AUTH_CONTEXT_ENV] = auth_context @classmethod
Apache License 2.0
pythongssapi/python-gssapi
gssapi/sec_contexts.py
SecurityContext.lifetime
python
def lifetime(self): return rsec_contexts.context_time(self)
The amount of time for which this context remains valid
https://github.com/pythongssapi/python-gssapi/blob/69f8b3a1108e24cdeba400361bc31d3c4bc45f0a/gssapi/sec_contexts.py#L418-L420
from gssapi.raw import sec_contexts as rsec_contexts from gssapi.raw import message as rmessage from gssapi.raw import named_tuples as tuples from gssapi.raw.types import RequirementFlag, IntEnumFlagSet import gssapi.exceptions as excs from gssapi import _utils from gssapi.names import Name from gssapi.creds import Credentials class SecurityContext(rsec_contexts.SecurityContext, metaclass=_utils.CheckLastError): def __new__(cls, base=None, token=None, name=None, creds=None, lifetime=None, flags=None, mech=None, channel_bindings=None, usage=None): if token is not None: base = rsec_contexts.import_sec_context(token) return super(SecurityContext, cls).__new__(cls, base) def __init__(self, base=None, token=None, name=None, creds=None, lifetime=None, flags=None, mech=None, channel_bindings=None, usage=None): self._last_err = None if base is None and token is None: if usage is not None: if usage not in ('initiate', 'accept'): msg = "Usage must be either 'initiate' or 'accept'" raise excs.UnknownUsageError(msg, obj="security context") self.usage = usage elif creds is not None and creds.usage != 'both': self.usage = creds.usage elif name is not None: self.usage = 'initiate' else: self.usage = 'accept' if self.usage == 'initiate': if name is None: raise TypeError("You must pass the 'name' argument when " "creating an initiating security context") self._target_name = name self._mech = mech self._desired_flags = IntEnumFlagSet(RequirementFlag, flags) self._desired_lifetime = lifetime else: if (name is not None or flags is not None or mech is not None or lifetime is not None): raise TypeError("You must pass at most the 'creds' " "argument when creating an accepting " "security context") self._channel_bindings = channel_bindings self._creds = creds self._delegated_creds = None else: try: if self.locally_initiated: self.usage = 'initiate' else: self.usage = 'accept' except excs.MissingContextError: msg = ("Cannot extract usage from a partially completed " "context") raise excs.UnknownUsageError(msg, obj="security context") self._complete = None def get_signature(self, message): return rmessage.get_mic(self, message) def verify_signature(self, message, mic): return rmessage.verify_mic(self, message, mic) def wrap(self, message, encrypt): return rmessage.wrap(self, message, encrypt) def unwrap(self, message): return rmessage.unwrap(self, message) def encrypt(self, message): res = self.wrap(message, encrypt=True) if not res.encrypted: raise excs.EncryptionNotUsed("Wrapped message was not encrypted") return res.message def decrypt(self, message): res = self.unwrap(message) if (not res.encrypted and self.actual_flags & RequirementFlag.confidentiality): raise excs.EncryptionNotUsed("The context was established with " "encryption, but unwrapped message " "was not encrypted", unwrapped_message=res.message) return res.message def get_wrap_size_limit(self, desired_output_size, encrypted=True): return rmessage.wrap_size_limit(self, desired_output_size, encrypted) def process_token(self, token): rsec_contexts.process_context_token(self, token) def export(self): return rsec_contexts.export_sec_context(self) _INQUIRE_ARGS = ('initiator_name', 'target_name', 'lifetime', 'mech', 'flags', 'locally_init', 'complete') @_utils.check_last_err def _inquire(self, **kwargs): if not kwargs: default_val = True else: default_val = False for arg in self._INQUIRE_ARGS: kwargs[arg] = kwargs.get(arg, default_val) res = rsec_contexts.inquire_context(self, **kwargs) if (kwargs.get('initiator_name', False) and res.initiator_name is not None): init_name = Name(res.initiator_name) else: init_name = None if (kwargs.get('target_name', False) and res.target_name is not None): target_name = Name(res.target_name) else: target_name = None return tuples.InquireContextResult(init_name, target_name, res.lifetime, res.mech, res.flags, res.locally_init, res.complete) @property
ISC License
edaub/fdfault
python/fdfault/front.py
front.__str__
python
def __str__(self): outstr = "Front: output = "+str(self.outputstatus) if self.outputstatus: outstr += ", field = "+self.field+", value = "+str(self.value) return outstr
Returns string representation of front
https://github.com/edaub/fdfault/blob/ec066f032ba109843164429aa7d9e7352485d735/python/fdfault/front.py#L130-L135
from __future__ import division, print_function class front(object): def __init__(self): self.outputstatus = False self.field = 'V' self.value = 0.001 def get_output(self): return self.outputstatus def set_output(self, newoutput): assert type(newoutput) is bool, "front output must be a boolean" self.outputstatus = newoutput def get_field(self): return self.field def set_field(self, field): assert (field == "U" or field == "V"), "Incorrect field name" self.field = field def get_value(self): return self.value def set_value(self, value): assert (value > 0.), "Front threshhold must be positive" self.value = value def write_input(self,f): f.write("[fdfault.frontlist]\n") f.write(str(int(self.outputstatus))+"\n") if self.outputstatus: f.write(self.field+"\n") f.write(repr(self.value)+"\n")
MIT License
awslabs/amazon-s3-find-and-forget
backend/lambda_layers/decorators/python/decorators.py
json_body_loader
python
def json_body_loader(handler): @functools.wraps(handler) def wrapper(event, context): if isinstance(event.get("body"), str): event["body"] = json.loads(event["body"]) return handler(event, context) return wrapper
Decorator which loads the JSON body of a request
https://github.com/awslabs/amazon-s3-find-and-forget/blob/1b6929571296ed4e5a71d1e6df07a1c1c855c19d/backend/lambda_layers/decorators/python/decorators.py#L39-L51
import functools import inspect import json import logging import os from copy import deepcopy from uuid import uuid4 import boto3 import jsonschema from botocore.exceptions import ClientError from boto_utils import DecimalEncoder, parse_s3_url logger = logging.getLogger() logger.setLevel(os.getenv("LogLevel", logging.INFO)) s3 = boto3.resource("s3") def with_logging(handler): logging.setLogRecordFactory(LogRecord) @functools.wraps(handler) def wrapper(event, *args, **kwargs): logger.debug("## HANDLER: %s", handler.__name__) logger.debug("## ENVIRONMENT VARIABLES") logger.debug(json.dumps(os.environ.copy())) logger.debug("## EVENT") logger.debug("Event: %s", event) return handler(event, *args, **kwargs) return wrapper
Apache License 2.0
bioconda/bioconda-utils
test/test_utils.py
single_build
python
def single_build(request, recipes_fixture): if request.param: logger.error("Making recipe builder") docker_builder = docker_utils.RecipeBuilder( use_host_conda_bld=True, docker_base_image=DOCKER_BASE_IMAGE) mulled_test = True logger.error("DONE") else: docker_builder = None mulled_test = False logger.error("Fixture: Building 'one' %s", "within docker" if docker_builder else "locally") build.build( recipe=recipes_fixture.recipe_dirs['one'], pkg_paths=recipes_fixture.pkgs['one'], docker_builder=docker_builder, mulled_test=mulled_test, ) logger.error("Fixture: Building 'one' %s -- DONE", "within docker" if docker_builder else "locally") yield recipes_fixture.pkgs['one'] for pkg in recipes_fixture.pkgs['one']: ensure_missing(pkg)
Builds the "one" recipe.
https://github.com/bioconda/bioconda-utils/blob/df49b2169672255d5937b181cb86fbe08f7ebaaa/test/test_utils.py#L102-L128
import os import sys import subprocess as sp import pytest import yaml import tempfile import requests import uuid import contextlib import tarfile import logging import shutil from textwrap import dedent from conda_build import metadata from bioconda_utils import __version__ from bioconda_utils import utils from bioconda_utils import pkg_test from bioconda_utils import docker_utils from bioconda_utils import build from bioconda_utils import upload from helpers import ensure_missing, Recipes logger = logging.getLogger(__name__) TEST_LABEL = 'bioconda-utils-test' DOCKER_BASE_IMAGE = "quay.io/bioconda/bioconda-utils-test-env-cos7:latest" SKIP_DOCKER_TESTS = sys.platform.startswith('darwin') SKIP_NOT_OSX = not sys.platform.startswith('darwin') if SKIP_DOCKER_TESTS: PARAMS = [False] IDS = ['system conda'] else: PARAMS = [True, False] IDS = ['with docker', 'system conda'] @contextlib.contextmanager def ensure_env_missing(env_name): def _clean(): proc = sp.run(['conda', 'env', 'list'], stdout=sp.PIPE, stderr=sp.STDOUT, check=True, universal_newlines=True) if env_name in proc.stdout: sp.run(['conda', 'env', 'remove', '-y', '-n', env_name], stdout=sp.PIPE, stderr=sp.STDOUT, check=True, universal_newlines=True) _clean() yield _clean() @pytest.fixture(scope='module') def recipes_fixture(): rcp = Recipes('test_case.yaml') rcp.write_recipes() rcp.pkgs = {} for key, val in rcp.recipe_dirs.items(): rcp.pkgs[key] = utils.built_package_paths(val) yield rcp for pkgs in rcp.pkgs.values(): for pkg in pkgs: ensure_missing(pkg) @pytest.fixture(scope='module') def config_fixture(): config = utils.load_config( os.path.join(os.path.dirname(__file__), "test-config.yaml")) yield config @pytest.fixture(scope='module', params=PARAMS, ids=IDS)
MIT License
akarat/exchange
exchange.py
fixer
python
def fixer(base, target): api_url = 'http://api.fixer.io/latest' resp = requests.get( api_url, params={ 'base': base, 'symbols': target, }, timeout=1, ) data = resp.json() return decimal.Decimal(data['rates'][target])
Parse data from fixer.io.
https://github.com/akarat/exchange/blob/36d5246b4b5bd57b72f6f1d54aedcb983368c328/exchange.py#L80-L92
import re import decimal import logging import requests __author__ = 'Hsiaoming Yang <me@lepture.com>' __version__ = '0.3' __all__ = ['rate'] logger = logging.getLogger('exchange') def rate(base, target, error_log=None): if base == target: return decimal.Decimal(1.00) services = [yahoo, fixer, ecb] if error_log is None: error_log = _error_log for fn in services: try: return fn(base, target) except Exception as e: error_log(e) return None def _error_log(e): logger.exception('Exchange Exception: %r' % e) def yahoo(base, target): api_url = 'http://download.finance.yahoo.com/d/quotes.csv' resp = requests.get( api_url, params={ 'e': '.csv', 'f': 'sl1d1t1', 's': '{0}{1}=X'.format(base, target) }, timeout=1, ) value = resp.text.split(',', 2)[1] return decimal.Decimal(value)
BSD 3-Clause New or Revised License
wilsonwangthu/neural_graph_evolution
agent/evolution_agent.py
evolutionary_agent.build_ppo_update_op
python
def build_ppo_update_op(self): self.build_update_op_preprocess() self.ratio_clip = tf.clip_by_value(self.ratio, 1.0 - self.args.ppo_clip, 1.0 + self.args.ppo_clip) self.surr = tf.minimum(self.ratio_clip * self.advantage_placeholder, self.ratio * self.advantage_placeholder) self.surr = -tf.reduce_mean(self.surr) self.vf_loss = self.baseline_network.get_vf_loss() self.loss = self.surr if self.args.use_kl_penalty: self.loss += self.kl_lambda_placeholder * self.kl self.loss += self.args.kl_eta * tf.square(tf.maximum(0.0, self.kl - 2.0 * self.args.target_kl)) self.weight_decay_loss = 0.0 for var in tf.trainable_variables(): self.weight_decay_loss += tf.nn.l2_loss(var) if self.args.use_weight_decay: self.loss += self.weight_decay_loss * self.args.weight_decay_coeff self.lr_placeholder = tf.placeholder(tf.float32, [], name='learning_rate') self.current_lr = self.args.lr if self.args.use_gnn_as_policy: self.optimizer = tf.train.AdamOptimizer(self.lr_placeholder) self.tvars = tf.trainable_variables() self.grads = tf.gradients(self.loss, self.tvars) self.clipped_grads, _ = tf.clip_by_global_norm( self.grads, self.args.grad_clip_value, name='clipping_gradient' ) self.update_op = self.optimizer.apply_gradients( zip(self.clipped_grads, self.tvars) ) else: self.update_op = tf.train.AdamOptimizer( learning_rate=self.lr_placeholder, epsilon=1e-5 ).minimize(self.loss) self.tvars = tf.trainable_variables() self.grads = tf.gradients(self.loss, self.tvars) if self.args.shared_network: assert False self.update_vf_op = tf.train.AdamOptimizer( learning_rate=self.args.value_lr, epsilon=1e-5 ).minimize(self.vf_loss)
@brief: The only difference from the vpg update is that here we clip the ratio
https://github.com/wilsonwangthu/neural_graph_evolution/blob/9142b7f1c32c476aa44c83da38d3f0bacc276d73/agent/evolution_agent.py#L456-L522
import numpy as np import tensorflow as tf import init_path import random from util import utils from util import logger from util import parallel_util from util import ob_normalizer import os from .agent import base_agent from graph_util import graph_data_util from .rollout_master_agent import parallel_rollout_master_agent import time from util import agent_util from graph_util import gnn_io_util from env import model_gen from env import hierarchy_model from lxml import etree from evolution import species2species class evolutionary_agent(base_agent, parallel_rollout_master_agent): def __init__(self, args, task_q, result_q, agent_id, name_scope='evolutionary_agent'): self.agent_id = agent_id base_agent.__init__( self, args=args, observation_size=-1, action_size=-1, task_q=task_q, result_q=result_q, name_scope=name_scope ) self.reset_running_mean_info() self.ob_normalizer = ob_normalizer.normalizer() self.baseline_network = None self.env_info = None self.error_count = 0 self.best_reward = -np.inf self.timesteps_so_far = 0 self._npr = np.random.RandomState(args.seed + self.agent_id) self.debug = 0 self.last_average_reward = np.nan self.end_average_reward = np.nan self.start_time = None self.is_dead_species = False self.brute_search_reward = [] if self.args.nervenetplus: if self.args.fc_pruning: assert self.args.use_nervenet else: assert self.args.use_nervenet and self.args.use_gnn_as_policy def rollout_and_train(self): start_count_reward_iteration = self.args.evolutionary_sub_iteration * (1 - self.args.reward_count_percentage) self.end_average_reward = [] self.last_average_reward = 0 for i_iteration in range(self.args.evolutionary_sub_iteration): if_record_reward = i_iteration > np.floor(start_count_reward_iteration) rollout_data = self.ask_for_rollouts() self.update_running_means(rollout_data) stats = self.update_parameters(rollout_data) if self.step_policy_network is not None: self.set_step_policy(self.get_policy()) if if_record_reward: self.end_average_reward.append(stats['avg_reward']) self.last_average_reward = stats['avg_reward'] if self.args.brute_force_search: self.brute_search_reward.append(stats['avg_reward']) stats['brute_reward'] = self.brute_search_reward return stats def ask_for_rollouts(self): self.ob_normalizer.set_parameters( self.running_mean_info['mean'], self.running_mean_info['variance'], self.running_mean_info['step'] ) num_timesteps_received = 0 rollout_data = [] num_rollouts = 0 while True: num_rollouts += 1 if self.args.test and self.args.test < num_rollouts: break if num_timesteps_received >= self.args.timesteps_per_batch: break traj_episode = self.rollout_for_one_episode() rollout_data.append(traj_episode) num_timesteps_received += len(traj_episode['rewards']) return rollout_data def rollout_for_one_episode(self): if self.args.nervenetplus: self.current_state = { node_type: np.zeros( [len(self.node_info['node_type_dict'][node_type]), self.args.gnn_node_hidden_dim] ) for node_type in self.node_info['node_type_dict'] } hidden_state = { node_type: [] for node_type in self.node_info['node_type_dict'] } obs, actions, rewards, action_dists_mu, action_dists_logstd, raw_obs = [], [], [], [], [], [] path = dict() raw_ob = self.env.reset() ob = self.ob_normalizer.filter(raw_ob) while True: if self.args.nervenetplus: for node_type in self.node_info['node_type_dict']: hidden_state[node_type].append( self.current_state[node_type] ) action, action_dist_mu, action_dist_logstd = self.act(ob) obs.append(ob) raw_obs.append(raw_ob) actions.append(action) action_dists_mu.append(action_dist_mu) action_dists_logstd.append(action_dist_logstd) try: result = self.env.step(action) except Exception as ex: mj_err = str(ex) logger.error(mj_err) if 'mjWARN_INERTIA' in mj_err: self.error_count += 1 if self.error_count >= 10: logger.info('Killing the species') result = [] result.append(np.random.rand(*raw_ob.shape)) result.append(-2048.0) result.append(1) self.is_dead_species = True else: logger.info('Resetting a specific species') obs, actions, rewards, action_dists_mu, action_dists_logstd, raw_obs = [], [], [], [], [], [] path = dict() raw_ob = self.env.reset() ob = self.ob_normalizer.filter(raw_ob) continue elif 'mjWARN_BADQACC' in mj_err or 'mjWARN_BADCTRL' in mj_err: logger.info('Killing the species') result = [] result.append(np.random.rand(*raw_ob.shape)) result.append(-2048.0) result.append(1) self.is_dead_species = True else: result = self.env.step(action) raw_ob = result[0] ob = self.ob_normalizer.filter(result[0]) rewards.append((result[1])) if result[2]: path = { "obs": np.array(obs), "raw_obs": np.array(raw_obs), "action_dists_mu": np.concatenate(action_dists_mu), "action_dists_logstd": np.concatenate(action_dists_logstd), "rewards": np.array(rewards), "actions": np.array(actions) } if self.args.nervenetplus: path['hidden_state'] = hidden_state for node_type in self.node_info['node_type_dict']: path['hidden_state'][node_type] = np.concatenate(hidden_state[node_type]) break return path def act(self, obs): obs = np.expand_dims(obs, 0) feed_dict, _ = self.prepared_policy_network_feeddict( obs, step_model=self.args.nervenetplus ) if self.args.nervenetplus: step_input_hidden_state = self.step_policy_network.get_input_hidden_state_placeholder() for node_type in self.node_info['node_type_dict']: feed_dict[step_input_hidden_state[node_type]] = self.current_state[node_type] results = self.session.run( [self.step_action_dist_mu, self.step_action_dist_logstd] + self.step_policy_network.get_output_hidden_state_list(), feed_dict=feed_dict ) action_dist_mu = results[0] action_dist_logstd = results[1] self.current_state = { node_info: results[2 + iid] for iid, node_info in enumerate(self.node_info['node_type_dict']) } else: action_dist_mu, action_dist_logstd = self.session.run( [self.action_dist_mu, self.action_dist_logstd], feed_dict=feed_dict ) act = action_dist_mu + np.exp(action_dist_logstd) * self._npr.randn(*action_dist_logstd.shape) act = act.ravel() return act, action_dist_mu, action_dist_logstd def build_env(self, received_data=None): assert 'evo' in self.args.task allow_monitor = 0 if received_data is None: if self.args.new_species_struct: if self.args.more_body_nodes_at_start: body_num = random.randint(3, 6) else: body_num = 3 species = hierarchy_model.Species(self.args, body_num=body_num) adj_matrix, node_attr = species.get_gene() xml_struct, xml_str = species.get_xml() else: adj_matrix, node_attr, xml_str = model_gen.get_initial_settings() xml_str = etree.tostring(xml_str, pretty_print=True) else: if 'rank_info' in received_data: generation, rank = [int(info) for info in received_data['rank_info'].split('_')] if generation % self.args.species_visualize_freq == 0 and rank <= self.args.visualize_top_species: allow_monitor = True if self.args.new_species_struct: species = received_data['species'] adj_matrix = received_data['adj_matrix'] node_attr = received_data['node_attr'] xml_str = received_data['xml_str'] if 'fish' in self.args.task: from env import fish_env_wrapper self.env = fish_env_wrapper.dm_evofish3d_wrapper( self.args, self._npr.randint(0, 99999), allow_monitor, adj_matrix=adj_matrix, xml_str=xml_str ) elif 'walker' in self.args.task or 'cheetah' in self.args.task or 'hopper' in self.args.task: from env import walker_env_wrapper self.env = walker_env_wrapper.dm_evowalker_wrapper( self.args, self._npr.randint(0, 99999), allow_monitor, adj_matrix=adj_matrix, xml_str=xml_str ) else: raise NotImplementedError if allow_monitor and received_data is not None: self.env.set_output_dir(received_data['video_save_path']) self.is_dm_env = True self.action_size = self.env.get_action_size() self.observation_size = self.env.get_observation_size() if self.args.new_species_struct: self.species = species self.adj_matrix = adj_matrix self.node_attr = node_attr self.xml_str = xml_str def run(self): received_signal, received_data = self.task_q.get() if received_signal == parallel_util.END_SIGNAL: self.task_q.task_done() elif received_signal == parallel_util.AGENT_EVOLUTION_START: self.build_env(received_data) self.build_models() training_stats = self.rollout_and_train() self.task_q.task_done() species_data = self.get_species_info(training_stats, received_data) self.result_q.put(species_data) elif received_signal == parallel_util.AGENT_EVOLUTION_TRAIN: self.build_env(received_data) self.build_models(received_data) training_stats = self.rollout_and_train() self.task_q.task_done() species_data = self.get_species_info(training_stats, received_data) self.result_q.put(species_data) def build_models(self, received_data=None): self.start_time = time.time() self.build_session() self.build_policy_network(adj_matrix=self.adj_matrix, node_attr=self.node_attr) self.baseline_network, self.target_return_placeholder, self.raw_obs_placeholder = agent_util.build_baseline_network( self.args, self.session, self.name_scope, self.observation_size, self.gnn_placeholder_list, self.obs_placeholder ) self.build_ppo_update_op() self.update_parameters = self.update_ppo_parameters self.session.run(tf.global_variables_initializer()) self.get_policy = utils.GetPolicyWeights(self.session, self.policy_var_list) self.set_policy = utils.SetPolicyWeights(self.session, self.policy_var_list) if received_data is not None: if received_data['reset']: logger.info('Not Inheriting from parents!') else: if self.args.use_gnn_as_policy: current_species_format = self.get_species_info() processed_species_info = species2species.process_inherited_info( raw_species_info=received_data, current_species_format=current_species_format, is_nervenet=self.args.use_gnn_as_policy ) self.set_policy(processed_species_info['policy_weights']) self.set_running_means( processed_species_info['running_mean_info'] ) self.current_lr = received_data['lr'] else: if received_data['SpcID'] > 0 and self.args.fc_amortized_fitness: self.set_policy(received_data['policy_weights']) self.set_running_means( received_data['running_mean_info'] ) self.prepare_feed_dict_map() self.current_kl_lambda = 1
MIT License
tyiannak/deep_audio_features
deep_audio_features/lib/training.py
progress
python
def progress(loss, epoch, batch, batch_size, dataset_size): batches = math.ceil(float(dataset_size) / batch_size) count = batch * batch_size bar_len = 40 filled_len = int(round(bar_len * count / float(dataset_size))) bar = '=' * filled_len + '-' * (bar_len - filled_len) status = 'Epoch {}, Loss: {:.4f}'.format(epoch, loss) _progress_str = "\r \r [{}] ...{}".format(bar, status) sys.stdout.write(_progress_str) sys.stdout.flush() if batch == batches: print()
Print the progress of the training for each epoch
https://github.com/tyiannak/deep_audio_features/blob/9cfaf4f12883752ffe7eaaa373c2667893a00e3b/deep_audio_features/lib/training.py#L343-L360
import torch import sys import math from copy import deepcopy import numpy as np from sklearn.metrics import f1_score def train_and_validate(model, train_loader, valid_loader, loss_function, optimizer, epochs, cnn=False, validation_epochs=5, early_stopping=False, patience=10): scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', verbose=True) device = next(model.parameters()).device print(next(iter(train_loader))) EPOCHS = epochs VALIDATION_EPOCHS = validation_epochs all_accuracy_training = [] all_accuracy_validation = [] all_train_loss = [] all_valid_loss = [] all_valid_f1 = [] best_model = None best_model_epoch = 0 min_loss = 1000 f1_max = 0 early_stop_counter = 0 for epoch in range(1, EPOCHS + 1): scheduler.step(epoch) train_loss, train_acc = train(epoch, train_loader, model, loss_function, optimizer, cnn=cnn) all_train_loss.append(train_loss) all_accuracy_training.append(train_acc) valid_loss, valid_acc, valid_f1 = validate(epoch, valid_loader, model, loss_function, validation_epochs, cnn=cnn) if best_model is None: best_model = deepcopy(model).to('cpu') best_model_epoch = epoch f1_max = valid_f1 elif valid_f1 > f1_max + 1e-5: f1_max = valid_f1 best_model = deepcopy(model).to('cpu') best_model_epoch = epoch early_stop_counter = 0 else: early_stop_counter += 1 all_valid_loss.append(valid_loss) all_accuracy_validation.append(valid_acc) all_valid_f1.append(valid_f1) if epoch < 4 * VALIDATION_EPOCHS: continue if early_stopping is False: continue STOP = True if early_stop_counter > patience: print(f'\nIncreasing loss..') print(f'\nResetting model to epoch {best_model_epoch}.') model.to('cpu') best_model = best_model.to(device) return best_model, all_train_loss, all_valid_loss, all_accuracy_training, all_accuracy_validation, all_valid_f1, epoch """ if (abs(all_valid_loss[-1] - all_valid_loss[-2]) < 1e-3 and abs(all_valid_loss[-2] - all_valid_loss[-3]) < 1e-3): print(f'\nVery small change in loss..') print(f'\nResetting model to epoch {best_model_epoch}.') # Remove unnessesary model model.to('cpu') best_model = best_model.to(device) # Exit 2 loops at the same time, go to testing return best_model, all_train_loss, all_valid_loss, \ all_accuracy_training, all_accuracy_validation, epoch """ print(f'\nTraining exited normally at epoch {epoch}.') model.to('cpu') best_model = best_model.to(device) return best_model, all_train_loss, all_valid_loss, all_accuracy_training, all_accuracy_validation, all_valid_f1, epoch def train(_epoch, dataloader, model, loss_function, optimizer, cnn=False): model.train() training_loss = 0.0 correct = 0 device = next(model.parameters()).device for index, batch in enumerate(dataloader, 1): inputs, labels, lengths = batch inputs = inputs.to(device) labels = labels.type('torch.LongTensor').to(device) optimizer.zero_grad() if cnn is False: y_pred = model.forward(inputs, lengths) else: inputs = inputs[:, np.newaxis, :, :] y_pred = model.forward(inputs) loss = loss_function(y_pred, labels) labels_cpu = labels.detach().clone().to('cpu').numpy() correct += sum([int(a == b) for a, b in zip(labels_cpu, np.argmax(y_pred.detach().clone().to('cpu').numpy(), axis=1))]) loss.backward() optimizer.step() training_loss += loss.data.item() progress(loss=loss.data.item(), epoch=_epoch, batch=index, batch_size=dataloader.batch_size, dataset_size=len(dataloader.dataset)) progress(loss=training_loss / len(dataloader.dataset), epoch=_epoch, batch=index, batch_size=dataloader.batch_size, dataset_size=len(dataloader.dataset)) accuracy = correct/len(dataloader.dataset) * 100 return training_loss / len(dataloader.dataset), accuracy def validate(_epoch, dataloader, model, loss_function, validation_epochs, cnn=False): model.eval() correct = 0 device = next(model.parameters()).device with torch.no_grad(): valid_loss = 0 pred_all = [] actual_labels = [] for index, batch in enumerate(dataloader, 1): inputs, labels, lengths = batch inputs = inputs.to(device) labels = labels.type('torch.LongTensor').to(device) if cnn is False: y_pred = model.forward(inputs, lengths) else: inputs = inputs[:, np.newaxis, :, :] y_pred = model.forward(inputs) loss = loss_function(y_pred, labels) valid_loss += loss.data.item() y_pred = np.argmax(y_pred.detach().clone().to('cpu').numpy(), axis=1) pred_all.append(y_pred) labels_cpu = labels.detach().clone().to('cpu').numpy() actual_labels.append(labels_cpu) correct += sum([int(a == b) for a, b in zip(labels_cpu, y_pred)]) accuracy = correct / len(dataloader.dataset) labels = [item for sublist in actual_labels for item in sublist] preds = [item for sublist in pred_all for item in sublist] f1 = f1_score(labels, preds, average='macro') if _epoch % validation_epochs == 0: print('\nValidation results for epoch {}:'.format(_epoch)) print(' --> loss: {}'.format( round(valid_loss/len(dataloader.dataset), 4))) print(' --> accuracy: {}'.format(round(accuracy, 4))) print(' --> f1 score: {}'.format(round(f1, 4))) return valid_loss / len(dataloader.dataset), accuracy, f1 def test(model, dataloader, cnn=False, classifier=True): device = next(model.parameters()).device model.eval() correct = 0 posteriors = [] y_pred = [] y_true = [] for index, batch in enumerate(dataloader, 1): inputs, labels, lengths = batch inputs = inputs.to(device) labels = labels.type('torch.LongTensor').to(device) if cnn is False: out = model.forward(inputs, lengths) else: inputs = inputs[:, np.newaxis, :, :] out = model.forward(inputs) if classifier is False: posteriors.append(out.cpu().detach().numpy()) y_pred.append(None) y_true.append(None) else: predictions = torch.argmax(out, -1) y_pred.append(predictions.cpu().data.numpy()) y_true.append(labels.cpu().data.numpy()) posteriors.append(out[0].cpu().detach().numpy()) if classifier is True: y_pred = np.array(y_pred).flatten() y_true = np.array(y_true).flatten() return posteriors, y_pred, y_true
MIT License
vegesm/siamese-pose-estimation
src/util/pose_utils.py
keys_to_stack
python
def keys_to_stack(x): keys = sorted(x.keys()) key2id = {k: i for i, k in enumerate(keys)} x_len = sum([v.shape[0] for v in x.values()]) framelist = np.empty((x_len, 2), dtype='int32') start = 0 for k in keys: cnt = x[k].shape[0] framelist[start:start + cnt, 1] = key2id[k] framelist[start:start + cnt, 0] = np.arange(cnt) start += cnt assert start == len(framelist) for i in range(len(keys) - 1): assert keys[i] < keys[i + 1] for i in range(len(framelist) - 1): assert framelist[i, 1] <= framelist[i + 1, 1] return framelist, keys
Creates an index that can be used to merge the data that is in baseline format. The output is deterministic. :returns: ``(framelist, keys)`` tuple, ``keys`` is an array of video sequence keys, ``framelist`` is a numpy array, each row contains a frame number and a key id which can be looked up in keys.
https://github.com/vegesm/siamese-pose-estimation/blob/1b585eac4d04d4b3aefebeac367be4f85f3d59e1/src/util/pose_utils.py#L59-L87
import numpy as np def get_pose_count(data_dict): pose_lens = {} for key in data_dict.keys(): subject, action, seq, camera = key if (subject, action, seq) in pose_lens: assert pose_lens[(subject, action, seq)] == len(data_dict[key]), 'Inconsistent number of poses at ' + str(key) else: pose_lens[(subject, action, seq)] = len(data_dict[key]) return sum([x for x in pose_lens.values()]) def pose_index(data_dict): pose_lens = {} for key in data_dict.keys(): subject, action, seq, camera = key if (subject, action, seq) in pose_lens: assert pose_lens[(subject, action, seq)] == len(data_dict[key]) else: pose_lens[(subject, action, seq)] = len(data_dict[key]) N = sum([x for x in pose_lens.values()]) poselist = np.zeros((N, 2), 'int64') keys = sorted(pose_lens.keys()) start = 0 for i, k in enumerate(keys): n = pose_lens[k] poselist[start:start + n, 0] = np.arange(n) poselist[start:start + n, 1] = i start += n assert start == len(poselist) for i in range(len(keys) - 1): assert keys[i] < keys[i + 1] for i in range(len(poselist) - 1): assert poselist[i, 1] <= poselist[i + 1, 1] return poselist, keys
MIT License
jarbashivemind/hivemind-voice-sat
mycroft_voice_satellite/speech/mic.py
ResponsiveRecognizer._wait_until_wake_word
python
def _wait_until_wake_word(self, source, sec_per_buffer, bus): num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE * source.SAMPLE_WIDTH) silence = get_silence(num_silent_bytes) byte_data = silence buffers_per_check = self.sec_between_ww_checks / sec_per_buffer buffers_since_check = 0.0 max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source) test_size = self.sec_to_bytes(self.TEST_WW_SEC, source) said_wake_word = False energies = [] idx_energy = 0 avg_energy = 0.0 energy_avg_samples = int(5 / sec_per_buffer) counter = 0 ww_frames = deque(maxlen=7) while not said_wake_word and not self._stop_signaled: if self._skip_wake_word(source): break chunk = self.record_sound_chunk(source) ww_frames.append(chunk) energy = self.calc_energy(chunk, source.SAMPLE_WIDTH) if energy < self.energy_threshold * self.multiplier: self._adjust_threshold(energy, sec_per_buffer) if len(energies) < energy_avg_samples: energies.append(energy) avg_energy += float(energy) / energy_avg_samples else: avg_energy -= float(energies[idx_energy]) / energy_avg_samples avg_energy += float(energy) / energy_avg_samples energies[idx_energy] = energy idx_energy = (idx_energy + 1) % energy_avg_samples if energy < avg_energy * 1.5: if energy > self.energy_threshold: self.energy_threshold = energy * 1.2 counter += 1 needs_to_grow = len(byte_data) < max_size if needs_to_grow: byte_data += chunk else: byte_data = byte_data[len(chunk):] + chunk buffers_since_check += 1.0 self.feed_hotwords(chunk) if buffers_since_check > buffers_per_check: buffers_since_check -= buffers_per_check chopped = byte_data[-test_size:] if test_size < len(byte_data) else byte_data audio_data = chopped + silence said_hot_word = False for hotword in self.check_for_hotwords(audio_data, bus): said_hot_word = True engine = self.hotword_engines[hotword]["engine"] sound = self.hotword_engines[hotword]["sound"] utterance = self.hotword_engines[hotword]["utterance"] listen = self.hotword_engines[hotword]["listen"] LOG.debug("Hot Word: " + hotword) if sound: try: audio_file = resolve_resource_file(sound) source.mute() if audio_file.endswith(".wav"): play_wav(audio_file).wait() elif audio_file.endswith(".mp3"): play_mp3(audio_file).wait() elif audio_file.endswith(".ogg"): play_ogg(audio_file).wait() else: play_audio(audio_file).wait() source.unmute() except Exception as e: LOG.warning(e) payload = { 'hotword': hotword, 'start_listening': listen, 'sound': sound, "engine": engine.__class__.__name__ } bus.emit("recognizer_loop:hotword", payload) if utterance: payload = { 'utterances': [utterance] } bus.emit("recognizer_loop:utterance", payload) audio = None mtd = self._compile_metadata(hotword) if self.save_wake_words: audio = self._create_audio_data(byte_data, source) if not isdir(self.saved_wake_words_dir): os.mkdir(self.saved_wake_words_dir) fn = join( self.saved_wake_words_dir, '_'.join(str(mtd[k]) for k in sorted(mtd)) + '.wav' ) with open(fn, 'wb') as f: f.write(audio.get_wav_data()) fn = join( self.saved_wake_words_dir, '_'.join(str(mtd[k]) for k in sorted(mtd)) + '.json' ) with open(fn, 'w') as f: json.dump(mtd, f, indent=4) if listen: said_wake_word = True if said_hot_word: byte_data = silence
Listen continuously on source until a wake word is spoken Args: source (AudioSource): Source producing the audio chunks sec_per_buffer (float): Fractional number of seconds in each chunk
https://github.com/jarbashivemind/hivemind-voice-sat/blob/cce575392710c5de636150e20bd60f2fe9a6f865/mycroft_voice_satellite/speech/mic.py#L440-L594
import audioop from time import sleep, time as get_time from collections import deque import os from os.path import isdir, join import pyaudio import speech_recognition from hashlib import md5 from speech_recognition import ( Microphone, AudioSource, AudioData ) import json from threading import Lock from mycroft_voice_satellite.configuration import CONFIGURATION from mycroft_voice_satellite.speech.signal import check_for_signal from mycroft_voice_satellite.playback import play_audio, play_mp3, play_ogg, play_wav, resolve_resource_file from ovos_utils.log import LOG from ovos_utils.lang.phonemes import get_phonemes class MutableStream: def __init__(self, wrapped_stream, format, muted=False): assert wrapped_stream is not None self.wrapped_stream = wrapped_stream self.muted = muted if muted: self.mute() self.SAMPLE_WIDTH = pyaudio.get_sample_size(format) self.muted_buffer = b''.join([b'\x00' * self.SAMPLE_WIDTH]) self.read_lock = Lock() def mute(self): with self.read_lock: self.muted = True self.wrapped_stream.stop_stream() def unmute(self): with self.read_lock: self.muted = False self.wrapped_stream.start_stream() def read(self, size, of_exc=False): frames = deque() remaining = size with self.read_lock: while remaining > 0: if self.muted: return self.muted_buffer to_read = min(self.wrapped_stream.get_read_available(), remaining) if to_read <= 0: sleep(.01) continue result = self.wrapped_stream.read(to_read, exception_on_overflow=of_exc) frames.append(result) remaining -= to_read input_latency = self.wrapped_stream.get_input_latency() if input_latency > 0.2: LOG.warning("High input latency: %f" % input_latency) audio = b"".join(list(frames)) return audio def close(self): self.wrapped_stream.close() self.wrapped_stream = None def is_stopped(self): try: return self.wrapped_stream.is_stopped() except Exception as e: LOG.error(repr(e)) return True def stop_stream(self): return self.wrapped_stream.stop_stream() class MutableMicrophone(Microphone): def __init__(self, device_index=None, sample_rate=16000, chunk_size=1024, mute=False): Microphone.__init__(self, device_index=device_index, sample_rate=sample_rate, chunk_size=chunk_size) self.muted = False if mute: self.mute() def __enter__(self): return self._start() def _start(self): assert self.stream is None, "This audio source is already inside a context manager" self.audio = pyaudio.PyAudio() self.stream = MutableStream(self.audio.open( input_device_index=self.device_index, channels=1, format=self.format, rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK, input=True, ), self.format, self.muted) return self def __exit__(self, exc_type, exc_value, traceback): return self._stop() def _stop(self): try: if not self.stream.is_stopped(): self.stream.stop_stream() self.stream.close() except Exception: LOG.exception('Failed to stop mic input stream') self.stream = None self.audio.terminate() def restart(self): self._stop() self._start() def mute(self): self.muted = True if self.stream: self.stream.mute() def unmute(self): self.muted = False if self.stream: self.stream.unmute() def is_muted(self): return self.muted def get_silence(num_bytes): return b'\0' * num_bytes class ResponsiveRecognizer(speech_recognition.Recognizer): SILENCE_SEC = 0.01 def __init__(self, hot_word_engines): self.config = CONFIGURATION listener_config = self.config.get('listener') self.overflow_exc = listener_config.get('overflow_exception', False) speech_recognition.Recognizer.__init__(self) self.audio = pyaudio.PyAudio() self.multiplier = listener_config.get('multiplier') self.energy_ratio = listener_config.get('energy_ratio') self.sec_between_ww_checks = listener_config.get("sec_between_ww_checks", 0.2) self.recording_timeout_with_silence = listener_config.get("recording_timeout_with_silence", 3.0) self.recording_timeout = listener_config.get("recording_timeout", 10.0) self.min_loud_sec = listener_config.get("min_loud_sec", 0.5) self.min_silence_at_end = listener_config.get("min_silence_at_end", 0.25) self.ambient_noise_adjustment_time = listener_config.get( "ambient_noise_adjustment_time", 0.5) self.auto_ambient_noise_adjustment = listener_config.get( "auto_ambient_noise_adjustment", False) data_path = os.path.expanduser(self.config["data_dir"]) self.save_wake_words = listener_config.get('record_wake_words', False) self.saved_wake_words_dir = join(data_path, 'hotwords') if not os.path.isdir(data_path): os.makedirs(data_path) if not os.path.isdir(self.saved_wake_words_dir): os.makedirs(self.saved_wake_words_dir) self._stop_signaled = False self._listen_triggered = False self._should_adjust_noise = False self.hotword_engines = hot_word_engines or {} num_phonemes = 10 for w in self.hotword_engines: phon = get_phonemes(w).split(" ") if len(phon) > num_phonemes: num_phonemes = len(phon) len_phoneme = listener_config.get('phoneme_duration', 120) / 1000.0 self.TEST_WW_SEC = num_phonemes * len_phoneme self.SAVED_WW_SEC = max(3, self.TEST_WW_SEC) def feed_hotwords(self, chunk): for hw in self.hotword_engines: self.hotword_engines[hw]["engine"].update(chunk) def record_sound_chunk(self, source): return source.stream.read(source.CHUNK, self.overflow_exc) @staticmethod def calc_energy(sound_chunk, sample_width): return audioop.rms(sound_chunk, sample_width) def _record_phrase( self, source, sec_per_buffer, stream=None, ww_frames=None ): num_loud_chunks = 0 noise = 0 max_noise = 25 min_noise = 0 silence_duration = 0 def increase_noise(level): if level < max_noise: return level + 200 * sec_per_buffer return level def decrease_noise(level): if level > min_noise: return level - 100 * sec_per_buffer return level min_loud_chunks = int(self.min_loud_sec / sec_per_buffer) max_chunks = int(self.recording_timeout / sec_per_buffer) num_chunks = 0 max_chunks_of_silence = int(self.recording_timeout_with_silence / sec_per_buffer) byte_data = get_silence(source.SAMPLE_WIDTH) if stream: stream.stream_start() phrase_complete = False while num_chunks < max_chunks and not phrase_complete: if ww_frames: chunk = ww_frames.popleft() else: chunk = self.record_sound_chunk(source) byte_data += chunk num_chunks += 1 if stream: stream.stream_chunk(chunk) energy = self.calc_energy(chunk, source.SAMPLE_WIDTH) test_threshold = self.energy_threshold * self.multiplier is_loud = energy > test_threshold if is_loud: noise = increase_noise(noise) num_loud_chunks += 1 else: noise = decrease_noise(noise) self._adjust_threshold(energy, sec_per_buffer) was_loud_enough = num_loud_chunks > min_loud_chunks quiet_enough = noise <= min_noise if quiet_enough: silence_duration += sec_per_buffer if silence_duration < self.min_silence_at_end: quiet_enough = False else: silence_duration = 0 recorded_too_much_silence = num_chunks > max_chunks_of_silence if quiet_enough and (was_loud_enough or recorded_too_much_silence): phrase_complete = True if check_for_signal('buttonPress'): phrase_complete = True return byte_data @staticmethod def sec_to_bytes(sec, source): return int(sec * source.SAMPLE_RATE) * source.SAMPLE_WIDTH def _skip_wake_word(self, source): signaled = False if check_for_signal('startListening') or self._listen_triggered: signaled = True elif check_for_signal('buttonPress', 1): sleep(0.25) if check_for_signal('buttonPress'): LOG.debug("Button Pressed, wakeword not needed") signaled = True if signaled: LOG.info("Listen signal detected") sound = self.config["listener"].get('listen_sound') audio_file = resolve_resource_file(sound) if audio_file: try: source.mute() if audio_file.endswith(".wav"): play_wav(audio_file).wait() elif audio_file.endswith(".mp3"): play_mp3(audio_file).wait() elif audio_file.endswith(".ogg"): play_ogg(audio_file).wait() else: play_audio(audio_file).wait() source.unmute() except Exception as e: LOG.warning(e) return signaled def stop(self): self._stop_signaled = True def _compile_metadata(self, hw): ww_module = self.hotword_engines[hw]["engine"].__class__.__name__ if ww_module == 'PreciseHotword': model_path = self.hotword_engines[hw]["engine"].precise_model with open(model_path, 'rb') as f: model_hash = md5(f.read()).hexdigest() else: model_hash = '0' return { 'name': self.hotword_engines[hw]["engine"].key_phrase.replace(' ', '-'), 'engine': md5(ww_module.encode('utf-8')).hexdigest(), 'time': str(int(1000 * get_time())), 'model': str(model_hash) } def trigger_listen(self): LOG.debug('Listen triggered from external source.') self._listen_triggered = True def trigger_ambient_noise_adjustment(self): LOG.debug("Ambient noise adjustment requested from external source") self._should_adjust_noise = True def _adjust_ambient_noise(self, source, time=None): time = time or self.ambient_noise_adjustment_time LOG.info("Adjusting for ambient noise, be silent!!!") self.adjust_for_ambient_noise(source, time) LOG.info("Ambient noise profile has been created") self._should_adjust_noise = False
Apache License 2.0
lemon24/reader
src/reader/core.py
Reader.update_feeds
python
def update_feeds( self, *, new: Optional[bool] = None, workers: int = 1, ) -> None: for url, value in self.update_feeds_iter(new=new, workers=workers): if isinstance(value, ParseError): log.exception( "update feed %r: error while getting/parsing feed, " "skipping; exception: %r", url, value.__cause__, exc_info=value, ) continue assert not isinstance(value, Exception), value
Update all the feeds that have updates enabled. Silently skip feeds that raise :exc:`ParseError`. Roughly equivalent to ``for _ in reader.update_feed_iter(...): pass``. Args: new (bool or None): Only update feeds that have never been updated / have been updated before. Defaults to None. workers (int): Number of threads to use when getting the feeds. Raises: StorageError .. versionchanged:: 1.11 Only update the feeds that have updates enabled. .. versionchanged:: 1.15 Update entries whenever their content changes, regardless of their :attr:`~Entry.updated` date. Content-only updates (not due to an :attr:`~Entry.updated` change) are limited to 24 consecutive updates, to prevent spurious updates for entries whose content changes excessively (for example, because it includes the current time). Previously, entries would be updated only if the entry :attr:`~Entry.updated` was *newer* than the stored one. .. versionchanged:: 2.0 Removed the ``new_only`` parameter. .. versionchanged:: 2.0 All parameters are keyword-only.
https://github.com/lemon24/reader/blob/b32fa7dbacadf5b4476c945241a717376b05b5da/src/reader/core.py#L730-L784
import builtins import itertools import logging import numbers import warnings from contextlib import nullcontext from datetime import datetime from datetime import timezone from types import MappingProxyType from typing import Any from typing import Callable from typing import Iterable from typing import Iterator from typing import List from typing import Mapping from typing import MutableSequence from typing import Optional from typing import overload from typing import Tuple from typing import TypeVar from typing import Union from typing_extensions import Literal import reader._updater from ._parser import default_parser from ._parser import Parser from ._parser import SESSION_TIMEOUT from ._requests_utils import TimeoutType from ._search import Search from ._storage import Storage from ._types import DEFAULT_RESERVED_NAME_SCHEME from ._types import EntryData from ._types import EntryFilterOptions from ._types import EntryUpdateIntent from ._types import FeedFilterOptions from ._types import FeedForUpdate from ._types import FeedUpdateIntent from ._types import fix_datetime_tzinfo from ._types import NameScheme from ._types import ParsedFeed from ._utils import make_pool_map from ._utils import zero_or_one from .exceptions import EntryNotFoundError from .exceptions import FeedMetadataNotFoundError from .exceptions import FeedNotFoundError from .exceptions import InvalidPluginError from .exceptions import ParseError from .exceptions import SearchNotEnabledError from .plugins import _DEFAULT_PLUGINS from .plugins import _PLUGINS from .types import _entry_argument from .types import _feed_argument from .types import Entry from .types import EntryCounts from .types import EntryInput from .types import EntrySearchCounts from .types import EntrySearchResult from .types import EntrySortOrder from .types import EntryUpdateStatus from .types import Feed from .types import FeedCounts from .types import FeedInput from .types import FeedSortOrder from .types import JSONType from .types import MISSING from .types import MissingType from .types import SearchSortOrder from .types import TagFilterInput from .types import UpdatedFeed from .types import UpdateResult log = logging.getLogger('reader') _T = TypeVar('_T') _U = TypeVar('_U') ReaderPluginType = Callable[['Reader'], None] AfterEntryUpdateHook = Callable[['Reader', EntryData, EntryUpdateStatus], None] AfterFeedUpdateHook = Callable[['Reader', str], None] def make_reader( url: str, *, feed_root: Optional[str] = None, plugins: Iterable[Union[str, ReaderPluginType]] = _DEFAULT_PLUGINS, session_timeout: TimeoutType = SESSION_TIMEOUT, reserved_name_scheme: Mapping[str, str] = DEFAULT_RESERVED_NAME_SCHEME, search_enabled: Union[bool, None, Literal['auto']] = 'auto', _storage: Optional[Storage] = None, _storage_factory: Any = None, ) -> 'Reader': if search_enabled not in ('auto', True, False, None): raise ValueError("search_enabled should be one of ('auto', True, False, None)") parser = default_parser(feed_root, session_timeout=session_timeout) try: name_scheme = NameScheme.from_value(reserved_name_scheme) except Exception as e: raise ValueError(f"invalid reserved name scheme: {reserved_name_scheme}") from e plugin_funcs: List[ReaderPluginType] = [] for plugin in plugins: if isinstance(plugin, str): if plugin not in _PLUGINS: raise InvalidPluginError(f"no such built-in plugin: {plugin!r}") plugin_func = _PLUGINS[plugin] else: plugin_func = plugin plugin_funcs.append(plugin_func) storage = _storage or Storage(url, factory=_storage_factory) try: search = Search(storage) if search_enabled is True: search.check_dependencies() search.enable() elif search_enabled is False: search.disable() reader = Reader( storage, search, parser, name_scheme, _enable_search=(search_enabled == 'auto'), _called_directly=False, ) for plugin_func in plugin_funcs: plugin_func(reader) except BaseException: storage.close() raise return reader class Reader: def __init__( self, _storage: Storage, _search: Search, _parser: Parser, _reserved_name_scheme: NameScheme, _enable_search: bool = False, _called_directly: bool = True, ): self._storage = _storage self._search = _search self._parser = _parser self._reserved_name_scheme = _reserved_name_scheme self._enable_search = _enable_search self._updater = reader._updater self.after_entry_update_hooks: MutableSequence[AfterEntryUpdateHook] = [] self.after_feed_update_hooks: MutableSequence[AfterFeedUpdateHook] = [] if _called_directly: warnings.warn( "Reader objects should be created using make_reader(); the Reader " "constructor is not stable yet and may change without any notice.", ) def close(self) -> None: self._storage.close() def add_feed(self, feed: FeedInput, *, allow_invalid_url: bool = False) -> None: url = _feed_argument(feed) if not allow_invalid_url: self._parser.validate_url(url) now = self._now() self._storage.add_feed(url, now) def delete_feed(self, feed: FeedInput) -> None: url = _feed_argument(feed) self._storage.delete_feed(url) def change_feed_url( self, old: FeedInput, new: FeedInput, *, allow_invalid_url: bool = False ) -> None: old_str = _feed_argument(old) new_str = _feed_argument(new) if not allow_invalid_url: self._parser.validate_url(new_str) self._storage.change_feed_url(old_str, new_str) def get_feeds( self, *, feed: Optional[FeedInput] = None, tags: TagFilterInput = None, broken: Optional[bool] = None, updates_enabled: Optional[bool] = None, sort: FeedSortOrder = 'title', limit: Optional[int] = None, starting_after: Optional[FeedInput] = None, ) -> Iterable[Feed]: filter_options = FeedFilterOptions.from_args( feed, tags, broken, updates_enabled ) if sort not in ('title', 'added'): raise ValueError("sort should be one of ('title', 'added')") if limit is not None: if not isinstance(limit, numbers.Integral) or limit < 1: raise ValueError("limit should be a positive integer") rv = self._storage.get_feeds( filter_options, sort, limit, _feed_argument(starting_after) if starting_after else None, ) for rv_feed in rv: yield fix_datetime_tzinfo(rv_feed, 'updated', 'added', 'last_updated') @overload def get_feed(self, feed: FeedInput) -> Feed: ... @overload def get_feed( self, feed: FeedInput, default: _T ) -> Union[Feed, _T]: ... def get_feed( self, feed: FeedInput, default: Union[MissingType, _T] = MISSING ) -> Union[Feed, _T]: return zero_or_one( self.get_feeds(feed=feed), lambda: FeedNotFoundError(_feed_argument(feed)), default, ) def get_feed_counts( self, *, feed: Optional[FeedInput] = None, tags: TagFilterInput = None, broken: Optional[bool] = None, updates_enabled: Optional[bool] = None, ) -> FeedCounts: filter_options = FeedFilterOptions.from_args( feed, tags, broken, updates_enabled ) return self._storage.get_feed_counts(filter_options) def set_feed_user_title(self, feed: FeedInput, title: Optional[str]) -> None: url = _feed_argument(feed) return self._storage.set_feed_user_title(url, title) def enable_feed_updates(self, feed: FeedInput) -> None: url = _feed_argument(feed) self._storage.set_feed_updates_enabled(url, True) def disable_feed_updates(self, feed: FeedInput) -> None: url = _feed_argument(feed) self._storage.set_feed_updates_enabled(url, False)
BSD 3-Clause New or Revised License
fnndsc/chris_ultron_backend
chris_backend/pacsfiles/views.py
PACSFileResource.get
python
def get(self, request, *args, **kwargs): pacs_file = self.get_object() return Response(pacs_file.fname)
Overriden to be able to make a GET request to an actual file resource.
https://github.com/fnndsc/chris_ultron_backend/blob/4d45e859aa9ee3789632e5ffa5e89502fafc37bb/chris_backend/pacsfiles/views.py#L79-L84
from rest_framework import generics, permissions from rest_framework.reverse import reverse from rest_framework.response import Response from collectionjson import services from core.renderers import BinaryFileRenderer from .models import PACSFile, PACSFileFilter from .serializers import PACSFileSerializer from .permissions import IsChrisOrReadOnly class PACSFileList(generics.ListCreateAPIView): http_method_names = ['get', 'post'] queryset = PACSFile.objects.all() serializer_class = PACSFileSerializer permission_classes = (permissions.IsAuthenticated, IsChrisOrReadOnly,) def list(self, request, *args, **kwargs): response = super(PACSFileList, self).list(request, *args, **kwargs) query_list = [reverse('pacsfile-list-query-search', request=request)] response = services.append_collection_querylist(response, query_list) template_data = {'path': '', 'PatientID': '', 'PatientName': '', 'PatientBirthDate': '', 'PatientAge': '', 'PatientSex': '', 'StudyDate': '', 'AccessionNumber': '', 'Modality': '', 'ProtocolName': '', 'StudyInstanceUID': '', 'StudyDescription': '', 'SeriesInstanceUID': '', 'SeriesDescription': '', 'pacs_name': ''} return services.append_collection_template(response, template_data) def create(self, request, *args, **kwargs): self.request.data.pop('fname', None) return super(PACSFileList, self).create(request, *args, **kwargs) class PACSFileListQuerySearch(generics.ListAPIView): http_method_names = ['get'] serializer_class = PACSFileSerializer queryset = PACSFile.objects.all() permission_classes = (permissions.IsAuthenticated,) filterset_class = PACSFileFilter class PACSFileDetail(generics.RetrieveAPIView): http_method_names = ['get'] queryset = PACSFile.objects.all() serializer_class = PACSFileSerializer permission_classes = (permissions.IsAuthenticated,) class PACSFileResource(generics.GenericAPIView): http_method_names = ['get'] queryset = PACSFile.objects.all() renderer_classes = (BinaryFileRenderer,) permission_classes = (permissions.IsAuthenticated,)
MIT License
py-lidbox/lidbox
lidbox/data/steps.py
augment_signals
python
def augment_signals(ds, augment_configs): augmented_datasets = [] for conf in augment_configs: aug_kwargs = {k: v for k, v in conf.items() if k not in {"type", "split"}} if conf["type"] == "random_resampling": augmented_datasets.append(augment_by_random_resampling(ds, **aug_kwargs)) elif conf["type"] == "additive_noise": augmented_datasets.append(augment_by_additive_noise(ds, **aug_kwargs)) else: logger.warning("Unknown signal augmentation type '%s', skipping", conf["type"]) return tf.data.experimental.sample_from_datasets([ds] + augmented_datasets)
Apply all augmentation methods specified in 'augment_config' and return a dataset where all elements are drawn randomly from the augmented and unaugmented datasets.
https://github.com/py-lidbox/lidbox/blob/e60d5ad2ff4d6076f9afaa780972c0301ee71ac8/lidbox/data/steps.py#L215-L229
import collections import io import json import logging import os import shutil import time logger = logging.getLogger(__name__) import tensorflow as tf TF_VERSION_MAJOR, TF_VERSION_MINOR = tuple(int(x) for x in tf.version.VERSION.split(".")[:2]) import lidbox import lidbox.data.tf_utils as tf_utils import lidbox.features as features import lidbox.features.audio as audio_features if lidbox.DEBUG: TF_AUTOTUNE = None tf.debugging.set_log_device_placement(True) else: TF_AUTOTUNE = tf.data.experimental.AUTOTUNE Step = collections.namedtuple("Step", ("key", "kwargs")) def from_steps(steps): logger.info("Initializing and preparing tf.data.Dataset instance from %d steps:\n %s", len(steps), "\n ".join(s.key for s in steps)) if steps[0].key != "initialize": logger.critical("When constructing a dataset, the first step must be 'initialize' but it was '%s'. The 'initialize' step is needed for first loading all metadata such as the utterance_id to wavpath mappings.", steps[0].key) return ds = initialize(**steps[0].kwargs) for step_num, step in enumerate(steps[1:], start=2): if step is None: logger.warning("Skipping no-op step with value None") continue step_fn = VALID_STEP_FUNCTIONS.get(step.key) if step_fn is None: logger.error("Skipping unknown step '%s'.", step.key) continue logger.info("Applying step number %d: '%s'.", step_num, step.key) ds = step_fn(ds, **step.kwargs) if not isinstance(ds, tf.data.Dataset): logger.critical("Failed to apply step '%s', it did not return a tf.data.Dataset instance but instead returned '%s'.", step.key, repr(ds)) return logger.info("All %d steps completed, returning prepared tf.data.Dataset instance.", len(steps)) return ds def pre_initialize(meta, config, labels): index2id = list(enumerate(meta["id"])) modified = False if not config.get("allow_unknown_labels", False): logger.info("'allow_unknown_labels' is False, dropping all utterances which are not in the set of all labels.") all_labels = set(labels) label2invalid = collections.defaultdict(list) for i, u in index2id: l = meta["label"][i] if l not in all_labels: label2invalid[l].append(u) if label2invalid: logger.warning( "%d invalid labels were found, with amount of utterances per label:\n %s", len(label2invalid), "\n ".join("{:12s}: {:12d}".format(l, len(u)) for l, u in label2invalid.items())) invalid_utts = {u for utts in label2invalid.values() for u in utts} logger.info("Dropping %d invalid utterances.", len(invalid_utts)) index2id = [(i, u) for i, u in index2id if u not in invalid_utts] modified = True else: logger.info("All utterances have valid labels.") if config.get("shuffle_utterances", False): logger.info("'shuffle_utterances' is True, shuffling utterance id list.") from random import shuffle shuffle(index2id) modified = True if modified: logger.info("Utterance id list was modified, updating all metadata to ensure correct order.") meta = {k: [v[i] for i, _ in index2id] for k, v in meta.items()} return meta def _feature_extraction_kwargs_to_args(config): valid_args = [ "type", "spectrogram", "melspectrogram", "mfcc", "db_spectrogram", "sample_minmax_scaling", "window_normalization", ] return [config.get(arg, {}) for arg in valid_args] def _element_shapes_dict(x): return {k: list(tf.shape(v).numpy()) for k, v in x.items()} def _dict_to_logstring(d): return "\n ".join("{}: {}".format(k, p) for k, p in d.items()) def _left_pad_lines(s, pad): return '\n'.join(pad * ' ' + line for line in s.splitlines()) def _get_device_or_default(config): tf_device = "/CPU" gpu_devices = tf.config.experimental.list_physical_devices("GPU") if "device" in config: tf_device = config["device"] elif gpu_devices: tf_device = "/GPU" return tf_device def append_predictions(ds, predictions): def _append_predictions(x, p): return dict(x, prediction=p) predictions_ds = tf.data.Dataset.from_tensor_slices(predictions) return (tf.data.Dataset .zip((ds, predictions_ds)) .map(_append_predictions, num_parallel_calls=TF_AUTOTUNE)) def apply_filters(ds, config): logger.info("Applying filters on every element in the dataset, keeping only elements which match the given config:\n %s", _dict_to_logstring(config)) filters = [] if "equal" in config: key = config["equal"]["key"] value = config["equal"]["value"] fn = (lambda x, k=key, v=value: k not in x or tf.math.reduce_all(x[k] == v)) filters.append((fn, key)) if "min_signal_length_ms" in config: key = "signal" min_signal_length_sec = tf.constant(1e-3 * config["min_signal_length_ms"], tf.float32) tf.debugging.assert_scalar(min_signal_length_sec, message="min_signal_length_ms must be a scalar") fn = (lambda x, k=key, v=min_signal_length_sec: k not in x or tf.size(x[k]) >= tf.cast(tf.cast(x["sample_rate"], tf.float32) * v, tf.int32)) filters.append((fn, "min_signal_length_sec")) if "min_shape" in config: key = config["min_shape"]["key"] min_shape = tf.constant(config["min_shape"]["shape"]) fn = (lambda x, k=key, v=min_shape: k not in x or tf.math.reduce_all(tf.shape(x[k]) >= v)) filters.append((fn, key)) if filters: logger.info("Using %d different filters:\n %s", len(filters), "\n ".join(name for fn, name in filters)) else: logger.warning("No filters defined, skipping filtering") return ds def all_ok(x): ok = True for fn, _ in filters: ok = ok and fn(x) return ok return ds.filter(all_ok) def apply_vad(ds): logger.info("Using previously computed voice activity decisions to drop signal frames marked as non-speech.") drop_keys_after_done = {"vad_frame_length_ms", "vad_is_speech"} def filter_signals_by_vad_decisions(x): vad_frame_length_sec = 1e-3 * tf.cast(x["vad_frame_length_ms"], tf.float32) vad_frame_length = tf.cast(tf.cast(x["sample_rate"], tf.float32) * vad_frame_length_sec, tf.int32) frames = tf.signal.frame(x["signal"], vad_frame_length, vad_frame_length, axis=0) voiced_signal = tf.reshape(frames[x["vad_is_speech"]], [-1]) return {k: v for k, v in dict(x, signal=voiced_signal).items() if k not in drop_keys_after_done} return ds.map(filter_signals_by_vad_decisions, num_parallel_calls=TF_AUTOTUNE) def as_supervised(ds): logger.info("Converting all elements to tuple pairs (inputs, targets) and dropping all other values.") def _as_supervised(x): return x["input"], x["target"] return ds.map(_as_supervised, num_parallel_calls=TF_AUTOTUNE)
MIT License
robustness-gym/meerkat
meerkat/nn/segmentation_column.py
_convert_rle2mask
python
def _convert_rle2mask( batch: DataPanel, input_columns: List[str], orig_dim, resize_dim=None, to_nan: bool = False, ): rle_data = batch[input_columns[0]].data height, width = orig_dim masks = [] for rle in rle_data: mask = np.zeros(width * height) if rle != "-1": array = np.asarray([int(x) for x in rle.split()]) starts = array[0::2] lengths = array[1::2] current_position = 0 for index, start in enumerate(starts): current_position += start mask[current_position : current_position + lengths[index]] = 1 current_position += lengths[index] mask = mask.reshape(width, height) if resize_dim is not None: mask = cv2.resize(mask, resize_dim, interpolation=cv2.INTER_CUBIC) if to_nan: mask[mask == 0] = np.nan masks.append(mask) return masks
Convert run length encoding (RLE) to 2D binary mask. Args: batch (DataPanel): DataPanel. input_columns: List of columns containing Run Length Encodings orig_dim (Tuple[int]): Shape of the image. resize_dim (Tuple[int]): Shape to resize to. Resizing is done with cubic interporlation. to_nan (bool, optional): Convert 0s to np.nan. Returns: List[np.ndarray]: List of np.ndarray containing binary mask
https://github.com/robustness-gym/meerkat/blob/e5808ffa82ce3bd03d0e58fc978d39c191f60571/meerkat/nn/segmentation_column.py#L98-L146
from __future__ import annotations import itertools from typing import List, Sequence, Union import numpy as np import pandas as pd import torch from tqdm import tqdm from meerkat.columns.tensor_column import TensorColumn from meerkat.datapanel import DataPanel from meerkat.nn.prediction_column import ( ClassificationOutputColumn, _ClassifierOutputType, ) from meerkat.tools.lazy_loader import LazyLoader cv2 = LazyLoader("cv2") Columnable = Union[Sequence, np.ndarray, pd.Series, torch.Tensor] class SegmentationOutputColumn(ClassificationOutputColumn): def __init__( self, logits: Columnable = None, probs: Columnable = None, preds: Columnable = None, num_classes: int = None, *args, **kwargs, ): super(SegmentationOutputColumn, self).__init__( logits=logits, probs=probs, preds=preds, num_classes=num_classes, *args, **kwargs, ) def binarymask( self, class_index: int ) -> TensorColumn: if self.num_classes > 2 and class_index is None: raise ValueError("Provide class_index in case of multi-class segmentation") if self.num_classes == 2: mask = TensorColumn( self.data if self._ctype == _ClassifierOutputType.PREDICTION else self.predictions().data ) else: preds = ( self.data if self._ctype == _ClassifierOutputType.PREDICTION else self.predictions().data ) mask = TensorColumn(torch.where(preds == class_index, 1, 0)) return mask @staticmethod def rle2mask( dataset: DataPanel, input_columns: List[str], orig_dim, resize_dim=None, to_nan: bool = False, batch_size: int = 32, ) -> TensorColumn: masks = [] for batch in tqdm( dataset[input_columns].batch(batch_size), total=(len(dataset) // batch_size + int(len(dataset) % batch_size != 0)), ): batch_masks = _convert_rle2mask( batch, input_columns, orig_dim, resize_dim, to_nan ) masks = list(itertools.chain(masks, batch_masks)) masks_col = TensorColumn(masks) dataset.add_column(f"Binary Mask (from {input_columns[0]})", masks_col) return masks_col
Apache License 2.0
mithi/hexy
hexy/robot/pro.py
HexapodPro.lean_back
python
def lean_back(self, offset = 45, back_knee = 0, middle_knee = 40, raised = -30, t = 0.2): self.left_back.replant(raised, back_knee, offset, t) self.right_back.replant(raised, back_knee, -offset, t) self.left_middle.replant(raised, middle_knee, -offset, t) self.right_middle.replant(raised, middle_knee, offset, t) self.left_front.pose(-offset, 0, 0) self.right_front.pose(offset, 0, 0) sleep(t)
brings the back legs even further to the back and the middle legs to the front and then brings his front legs up in the air
https://github.com/mithi/hexy/blob/a4f4dfcdf15a36b99521165b8bf013e51cc7f682/hexy/robot/pro.py#L49-L61
from hexapod import Hexapod from time import sleep class HexapodPro(Hexapod): def shake_head(self, maxx = 60, repetitions = 5, t = 0.2): for r in xrange(repetitions): self.look(maxx, t) self.look(-maxx, t) self.look() def point(self, t = 0.75): self.left_front.hip.pose(-45) self.left_front.knee.pose(-50) self.left_front.ankle.pose(-55) sleep(t) def wave(self, repetitions = 5, t = 0.2): self.left_front.ankle.pose() self.left_front.knee.pose(-50) for r in xrange(repetitions): self.left_front.hip.pose(-45) sleep(t) self.left_front.hip.pose(45) sleep(t) def dance_twist(self, maxx = 45, step = 5, repetitions = 3, t = 0.01): self.squat(60, t) for r in xrange(repetitions): for angle in xrange(-maxx, maxx, step): self.twist_hip(angle, t) for angle in xrange(maxx, -maxx, -step): self.twist_hip(angle, t) self.twist_hip() self.squat(60, t)
MIT License
mechanicalsoup/mechanicalsoup
mechanicalsoup/browser.py
Browser.post
python
def post(self, *args, **kwargs): response = self.session.post(*args, **kwargs) Browser.add_soup(response, self.soup_config) return response
Straightforward wrapper around `requests.Session.post <http://docs.python-requests.org/en/master/api/#requests.Session.post>`__. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`.
https://github.com/mechanicalsoup/mechanicalsoup/blob/72783b827b176bec8a3f9672a5222ce835b72a82/mechanicalsoup/browser.py#L154-L164
import os import tempfile import urllib import weakref import webbrowser import bs4 import bs4.dammit import requests from .__version__ import __title__, __version__ from .form import Form from .utils import LinkNotFoundError class Browser: def __init__(self, session=None, soup_config={'features': 'lxml'}, requests_adapters=None, raise_on_404=False, user_agent=None): self.raise_on_404 = raise_on_404 self.session = session or requests.Session() if hasattr(weakref, 'finalize'): self._finalize = weakref.finalize(self.session, self.close) else: self._finalize = self.close self.set_user_agent(user_agent) if requests_adapters is not None: for adaptee, adapter in requests_adapters.items(): self.session.mount(adaptee, adapter) self.soup_config = soup_config or dict() @staticmethod def __looks_like_html(response): text = response.text.lstrip().lower() return text.startswith('<html') or text.startswith('<!doctype') @staticmethod def add_soup(response, soup_config): if ("text/html" in response.headers.get("Content-Type", "") or Browser.__looks_like_html(response)): http_encoding = ( response.encoding if 'charset' in response.headers.get("Content-Type", "") else None ) html_encoding = bs4.dammit.EncodingDetector.find_declared_encoding( response.content, is_html=True ) encoding = http_encoding if http_encoding else html_encoding response.soup = bs4.BeautifulSoup( response.content, from_encoding=encoding, **soup_config ) else: response.soup = None def set_cookiejar(self, cookiejar): self.session.cookies = cookiejar def get_cookiejar(self): return self.session.cookies def set_user_agent(self, user_agent): if user_agent is None: requests_ua = requests.utils.default_user_agent() user_agent = f'{requests_ua} ({__title__}/{__version__})' self.session.headers['User-agent'] = user_agent def request(self, *args, **kwargs): response = self.session.request(*args, **kwargs) Browser.add_soup(response, self.soup_config) return response def get(self, *args, **kwargs): response = self.session.get(*args, **kwargs) if self.raise_on_404 and response.status_code == 404: raise LinkNotFoundError() Browser.add_soup(response, self.soup_config) return response
MIT License
dwavesystems/dimod
dimod/sampleset.py
SampleSet.first
python
def first(self): try: return next(self.data(sorted_by='energy', name='Sample')) except StopIteration: raise ValueError('{} is empty'.format(self.__class__.__name__))
Sample with the lowest-energy. Raises: ValueError: If empty. Example: >>> sampleset = dimod.ExactSolver().sample_ising({'a': 1}, {('a', 'b'): 1}) >>> sampleset.first Sample(sample={'a': -1, 'b': 1}, energy=-2.0, num_occurrences=1)
https://github.com/dwavesystems/dimod/blob/af5a722f96250034a9099043927bf8ebc5294e40/dimod/sampleset.py#L819-L835
import base64 import copy import itertools import json import numbers import collections.abc as abc from collections import namedtuple from typing import Any, Callable import numpy as np from numpy.lib import recfunctions from warnings import warn from dimod.exceptions import WriteableError from dimod.serialization.format import Formatter from dimod.serialization.utils import (pack_samples as _pack_samples, unpack_samples, serialize_ndarray, deserialize_ndarray, serialize_ndarrays, deserialize_ndarrays) from dimod.sym import Sense from dimod.variables import Variables, iter_deserialize_variables from dimod.vartypes import as_vartype, Vartype, DISCRETE from dimod.views.samples import SampleView, SamplesArray __all__ = ['append_data_vectors', 'append_variables', 'as_samples', 'concatenate', 'SampleSet'] def append_data_vectors(sampleset, **vectors): record = sampleset.record for name, vector in vectors.items(): if len(vector) != len(record.energy): raise ValueError("Length of vector {} must be equal to number of samples.".format(name)) try: vector = np.asarray(vector) if vector.ndim == 1: record = recfunctions.append_fields(record, name, vector, usemask=False, asrecarray=True) else: dtype = np.dtype([(name, vector[0].dtype, vector[0].shape)]) new_arr = recfunctions.unstructured_to_structured(vector, dtype=dtype) record = recfunctions.merge_arrays((record, new_arr), flatten=True, asrecarray=True) except (TypeError, AttributeError): raise ValueError("Field value type not supported.") return SampleSet(record, sampleset.variables, sampleset.info, sampleset.vartype) def append_variables(sampleset, samples_like, sort_labels=True): samples, labels = as_samples(samples_like) num_samples = len(sampleset) if samples.shape[0] == num_samples: pass elif samples.shape[0] == 1 and num_samples: samples = np.repeat(samples, num_samples, axis=0) else: msg = ("mismatched shape. The samples to append should either be " "a single sample or should match the length of the sample " "set. Empty sample sets cannot be appended to.") raise ValueError(msg) variables = sampleset.variables if any(v in variables for v in labels): msg = "Appended samples cannot contain variables in sample set" raise ValueError(msg) new_variables = list(variables) + labels new_samples = np.hstack((sampleset.record.sample, samples)) return type(sampleset).from_samples((new_samples, new_variables), sampleset.vartype, info=copy.deepcopy(sampleset.info), sort_labels=sort_labels, **sampleset.data_vectors) def as_samples(samples_like, dtype=None, copy=False, order='C'): if isinstance(samples_like, SampleSet): labels = list(samples_like.variables) if dtype is None: arr = np.copy(samples_like.record.sample) if copy else samples_like.record.sample return arr, labels else: return samples_like.record.sample.astype(dtype, copy=copy), labels if isinstance(samples_like, tuple) and len(samples_like) == 2: samples_like, labels = samples_like if not isinstance(labels, list) and labels is not None: labels = list(labels) else: labels = None if isinstance(samples_like, abc.Iterator): raise TypeError('samples_like cannot be an iterator') if isinstance(samples_like, abc.Mapping): return as_samples(([samples_like], labels), dtype=dtype, copy=copy, order=order) if (isinstance(samples_like, list) and samples_like and isinstance(samples_like[0], numbers.Number)): return as_samples(([samples_like], labels), dtype=dtype, copy=copy, order=order) if not isinstance(samples_like, np.ndarray): if any(isinstance(sample, abc.Mapping) for sample in samples_like): samples_like, old = list(samples_like), samples_like if labels is None: first = samples_like[0] if isinstance(first, abc.Mapping): labels = list(first) else: labels = list(range(len(first))) for idx, sample in enumerate(old): if isinstance(sample, abc.Mapping): try: samples_like[idx] = [sample[v] for v in labels] except KeyError: raise ValueError("samples_like and labels do not match") if dtype is None: if not hasattr(samples_like, 'dtype'): samples_like = np.asarray(samples_like) max_ = max(-samples_like.min(initial=0), +samples_like.max(initial=0)) if max_ <= np.iinfo(np.int8).max: dtype = np.int8 elif max_ <= np.iinfo(np.int16).max: dtype = np.int16 elif max_ < np.iinfo(np.int32).max: dtype = np.int32 elif max_ < np.iinfo(np.int64).max: dtype = np.int64 else: raise RuntimeError else: dtype = samples_like.dtype arr = np.array(samples_like, dtype=dtype, copy=copy, order=order) if arr.ndim > 2: raise ValueError("expected samples_like to be <= 2 dimensions") if arr.ndim < 2: if arr.size: arr = np.atleast_2d(arr) elif labels: arr = arr.reshape((0, len(labels))) else: arr = arr.reshape((0, 0)) if labels is None: return arr, list(range(arr.shape[1])) elif len(labels) != arr.shape[1]: raise ValueError("samples_like and labels dimensions do not match") else: return arr, labels def concatenate(samplesets, defaults=None): itertup = iter(samplesets) try: first = next(itertup) except StopIteration: raise ValueError("samplesets must contain at least one SampleSet") vartype = first.vartype variables = first.variables records = [first.record] records.extend(_iter_records(itertup, vartype, variables)) record = recfunctions.stack_arrays(records, defaults=defaults, asrecarray=True, usemask=False) return SampleSet(record, variables, {}, vartype) def _iter_records(samplesets, vartype, variables): for samples in samplesets: if samples.vartype is not vartype: samples = samples.change_vartype(vartype, inplace=False) if samples.variables != variables: new_record = samples.record.copy() order = [samples.variables.index(v) for v in variables] new_record.sample = samples.record.sample[:, order] yield new_record else: yield samples.record def infer_vartype(samples_like): if isinstance(samples_like, SampleSet): return samples_like.vartype samples, _ = as_samples(samples_like) ones_mask = (samples == 1) if ones_mask.all(): return None if (ones_mask ^ (samples == 0)).all(): return Vartype.BINARY if (ones_mask ^ (samples == -1)).all(): return Vartype.SPIN raise ValueError("given samples_like is of an unknown vartype") class SampleSet(abc.Iterable, abc.Sized): _REQUIRED_FIELDS = ['sample', 'energy', 'num_occurrences'] def __init__(self, record, variables, info, vartype): vartype = as_vartype(vartype, extended=True) if not isinstance(record, np.recarray): raise TypeError("input record must be a numpy recarray") elif not set(self._REQUIRED_FIELDS).issubset(record.dtype.fields): raise ValueError("input record must have {}, {} and {} as fields".format(*self._REQUIRED_FIELDS)) self._record = record num_samples, num_variables = record.sample.shape self._variables = variables = Variables(variables) if len(variables) != num_variables: msg = ("mismatch between number of variables in record.sample ({}) " "and labels ({})").format(num_variables, len(variables)) raise ValueError(msg) self._info = dict(info) self._vartype = vartype @classmethod def from_samples(cls, samples_like, vartype, energy, info=None, num_occurrences=None, aggregate_samples=False, sort_labels=True, **vectors): if aggregate_samples: return cls.from_samples(samples_like, vartype, energy, info=info, num_occurrences=num_occurrences, aggregate_samples=False, **vectors).aggregate() samples, variables = as_samples(samples_like) if sort_labels and variables: try: reindex, new_variables = zip(*sorted(enumerate(variables), key=lambda tup: tup[1])) except TypeError: pass else: if new_variables != variables: samples = samples[:, reindex] variables = new_variables num_samples, num_variables = samples.shape energy = np.asarray(energy) if num_occurrences is None: num_occurrences = np.ones(num_samples, dtype=int) else: num_occurrences = np.asarray(num_occurrences) datatypes = [('sample', samples.dtype, (num_variables,)), ('energy', energy.dtype), ('num_occurrences', num_occurrences.dtype)] for key, vector in vectors.items(): vectors[key] = vector = np.asarray(vector) datatypes.append((key, vector.dtype, vector.shape[1:])) record = np.rec.array(np.zeros(num_samples, dtype=datatypes)) record['sample'] = samples record['energy'] = energy record['num_occurrences'] = num_occurrences for key, vector in vectors.items(): record[key] = vector if info is None: info = {} return cls(record, variables, info, vartype) @classmethod def from_samples_bqm(cls, samples_like, bqm, **kwargs): samples_like = as_samples(samples_like) energies = bqm.energies(samples_like) return cls.from_samples(samples_like, energy=energies, vartype=bqm.vartype, **kwargs) @classmethod def from_future(cls, future, result_hook=None): obj = cls.__new__(cls) obj._future = future if result_hook is None: def result_hook(future): return future.result() elif not callable(result_hook): raise TypeError("expected result_hook to be callable") obj._result_hook = result_hook return obj def __len__(self): return self.record.__len__() def __iter__(self): return iter(self.samples(sorted_by='energy')) def __eq__(self, other): if not isinstance(other, SampleSet): return False if self.vartype != other.vartype or self.info != other.info: return False if self.record.dtype.fields.keys() != other.record.dtype.fields.keys(): return False for field in self.record.dtype.fields: if field == 'sample': continue if not (self.record[field] == other.record[field]).all(): return False if self.variables == other.variables: return (self.record.sample == other.record.sample).all() try: other_idx = [other.variables.index(v) for v in self.variables] except ValueError: return False return (self.record.sample == other.record.sample[:, other_idx]).all() def __getstate__(self): self.resolve() return self.__dict__ def __repr__(self): return "{}({!r}, {}, {}, {!r})".format(self.__class__.__name__, self.record, self.variables, self.info, self.vartype.name) def __str__(self): return Formatter().format(self) @property def data_vectors(self): return {field: self.record[field] for field in self.record.dtype.names if field != 'sample'} @property
Apache License 2.0
core-gatech-group/serpent-tools
serpentTools/utils/compare.py
splitDictByKeys
python
def splitDictByKeys(map0, map1, keySet=None): if keySet is None: keySet = set(map1.keys()) keySet.update(set(map0.keys())) missing = {0: set(), 1: set()} differentTypes = {} badShapes = {} goodKeys = set() for key in keySet: if key not in map0 or key not in map1: for mapD, misK in zip((map0, map1), (0, 1)): if key not in mapD: missing[misK].add(key) continue v0 = map0[key] v1 = map1[key] t0 = type(v0) t1 = type(v1) if t0 != t1: differentTypes[key] = (t0, t1) continue if t0 is ndarray: if v0.shape != v1.shape: badShapes[key] = (v0.shape, v1.shape) continue goodKeys.add(key) return missing[0], missing[1], differentTypes, badShapes, goodKeys
Return various sub-sets and dictionaries from two maps. Used to test the internal workings on :func:`getKeyMatchingShapes` Parameters ---------- map0: dict map1: dict Dictionaries to compare keySet: set or None Iterable collection of keys found in ``map0`` and ``map1``. Missing keys will be returned from this function under the ``missing0`` and ``missing1`` sets. If ``None``, take to be the set of keys that exist in both maps Returns ------- missing0: set Keys that exist in ``keySet`` but not in ``map0`` missing1: set Keys that exist in ``keySet`` but not in ``map1`` differentTypes: dict Dictionary with tuples ``{key: (t0, t1)}`` indicating the values ``map0[key]`` and ``map1[key]`` are of different types badShapes: dict Dictionary with tuples ``{key: (t0, t1)}`` indicating the values ``map0[key]`` and ``map1[key]`` are arrays of different shapes goodKeys: set Keys found in both ``map0`` and ``map1`` that are of the same type or point to arrays of the same shape
https://github.com/core-gatech-group/serpent-tools/blob/f61cd104bd997aa80429f1059bbc3669adc18654/serpentTools/utils/compare.py#L284-L343
from collections.abc import Iterable from numpy.core.defchararray import equal as charEqual from numpy import ( fabs, zeros_like, ndarray, array, greater, multiply, subtract, equal, asarray, ) from serpentTools.messages import ( error, logIdentical, logNotIdentical, logAcceptableLow, logAcceptableHigh, logOutsideTols, logDifferentTypes, logMissingKeys, logBadTypes, logBadShapes, logMapOfBadShapes, logIdenticalWithUncs, logInsideConfInt, logOutsideConfInt, ) from serpentTools.utils.docstrings import compareDocDecorator LOWER_LIM_DIVISION = 1E-8 DEF_COMP_LOWER = 0 DEF_COMP_UPPER = 10 DEF_COMP_SIGMA = 2 @compareDocDecorator def getCommonKeys(d0, d1, quantity, desc0='first', desc1='second', herald=error): k0 = d0.keys() if isinstance(d0, dict) else d0 k1 = d1.keys() if isinstance(d1, dict) else d1 s0 = set(k0) s1 = set(k1) common = s0.intersection(s1) missing = s0.symmetric_difference(s1) if missing: in0 = s0.difference(s1) in1 = s1.difference(s0) logMissingKeys(quantity, desc0, desc1, in0, in1, herald) return common TPL_FLOAT_INT = float, int DC_STAT_GOOD = 0 DC_STAT_LE_LOWER = 1 DC_STAT_MID = 10 DC_STAT_GE_UPPER = 100 DC_STAT_NOT_IDENTICAL = 200 DC_STAT_DIFF_TYPES = 255 DC_STAT_NOT_IMPLEMENTED = -1 DC_STAT_DIFF_SHAPES = 250 COMPARE_STATUS_CODES = { DC_STAT_GOOD: (logIdentical, True), DC_STAT_LE_LOWER: (logAcceptableLow, True), DC_STAT_MID: (logAcceptableHigh, True), DC_STAT_NOT_IDENTICAL: (logNotIdentical, False), DC_STAT_GE_UPPER: (logOutsideTols, False), DC_STAT_DIFF_TYPES: (logDifferentTypes, False), DC_STAT_DIFF_SHAPES: (logBadShapes, False), } @compareDocDecorator def directCompare(obj0, obj1, lower, upper): type0 = type(obj0) type1 = type(obj1) if type0 != type1: if type0 not in TPL_FLOAT_INT or type1 not in TPL_FLOAT_INT: return DC_STAT_DIFF_TYPES if type0 in (str, bool): if obj0 != obj1: return DC_STAT_NOT_IDENTICAL return DC_STAT_GOOD if not isinstance(obj0, Iterable): obj0 = array([obj0]) obj1 = array([obj1]) else: obj0 = array(obj0) obj1 = array(obj1) if obj0.dtype.name == 'object': return DC_STAT_NOT_IMPLEMENTED if obj0.shape != obj1.shape: return DC_STAT_DIFF_SHAPES if not upper: return _directCompareIdentical(obj0, obj1) return _directCompareWithTols(obj0, obj1, lower, upper) def _directCompareIdentical(obj0, obj1): if obj0.dtype.name[:3] == 'str': compArray = charEqual(obj0, obj1) else: compArray = equal(obj0, obj1) if compArray.all(): return DC_STAT_GOOD return DC_STAT_NOT_IDENTICAL def _directCompareWithTols(obj0, obj1, lower, upper): diff = multiply( fabs(subtract(obj0, obj1)), 100 ) nonZI = greater(fabs(obj0), LOWER_LIM_DIVISION) diff[nonZI] /= obj0[nonZI] maxDiff = diff.max() if maxDiff < LOWER_LIM_DIVISION: return DC_STAT_GOOD if maxDiff <= lower: return DC_STAT_LE_LOWER if maxDiff >= upper: return DC_STAT_GE_UPPER return DC_STAT_MID directCompare.__doc__ = directCompare.__doc__.format( good=DC_STAT_GOOD, leLower=DC_STAT_LE_LOWER, mid=DC_STAT_MID, geUpper=DC_STAT_GE_UPPER, notIdentical=DC_STAT_NOT_IDENTICAL, diffTypes=DC_STAT_DIFF_TYPES, notImplemented=DC_STAT_NOT_IMPLEMENTED, diffShapes=DC_STAT_DIFF_SHAPES, ) @compareDocDecorator def logDirectCompare(obj0, obj1, lower, upper, quantity): result = directCompare(obj0, obj1, lower, upper) if result < 0: if result == DC_STAT_NOT_IMPLEMENTED: raise TypeError( "directCompare is not configured to make tests on objects " "of type {tp}\n\tQuantity: {k}\n\tUsers: Create a issue on " "GitHub to alert developers.\n\tDevelopers: Update this " "function or create a compare function " "for {tp} objects.".format(k=quantity, tp=type(obj0))) noticeTuple = [obj0, obj1, quantity] if result in COMPARE_STATUS_CODES: func, returnV = COMPARE_STATUS_CODES[result] func(*noticeTuple) return returnV raise ValueError("Received value of {} from directCompare. Not sure " "what this means.")
MIT License
doudz/zigate
zigate/core.py
ZiGate.decode_data
python
def decode_data(self, packet): try: decoded = self.zigate_decode(packet[1:-1]) msg_type, length, checksum, value, lqi = struct.unpack('!HHB%dsB' % (len(decoded) - 6), decoded) except Exception: LOGGER.error('Failed to decode packet : %s', hexlify(packet)) return if length != len(value) + 1: LOGGER.error('Bad length %s != %s : %s', length, len(value) + 1, value) return computed_checksum = self.checksum(decoded[:4], lqi, value) if checksum != computed_checksum: LOGGER.error('Bad checksum %s != %s', checksum, computed_checksum) return LOGGER.debug('Received response 0x{:04x}: {}'.format(msg_type, hexlify(value))) try: response = RESPONSES.get(msg_type, Response)(value, lqi) except Exception: LOGGER.error('Error decoding response 0x{:04x}: {}'.format(msg_type, hexlify(value))) LOGGER.error(traceback.format_exc()) return if msg_type != response.msg: LOGGER.warning('Unknown response 0x{:04x}'.format(msg_type)) LOGGER.debug(response) self._last_response[msg_type] = response self.interpret_response(response) dispatch_signal(ZIGATE_RESPONSE_RECEIVED, self, response=response)
Decode raw packet message
https://github.com/doudz/zigate/blob/f42fbad6f677f67442e05c845ab4f3c2029b5431/zigate/core.py#L569-L599
from binascii import hexlify import traceback from time import (sleep, strftime, monotonic) import logging import json import os from pydispatch import dispatcher from .transport import (ThreadSerialConnection, ThreadSocketConnection, FakeTransport) from .responses import (RESPONSES, Response) from .const import (ACTIONS_COLOR, ACTIONS_LEVEL, ACTIONS_LOCK, ACTIONS_HUE, ACTIONS_ONOFF, ACTIONS_TEMPERATURE, ACTIONS_COVER, ACTIONS_THERMOSTAT, ACTIONS_IAS, OFF, ON, TYPE_COORDINATOR, STATUS_CODES, ZIGATE_ATTRIBUTE_ADDED, ZIGATE_ATTRIBUTE_UPDATED, ZIGATE_DEVICE_ADDED, ZIGATE_DEVICE_REMOVED, ZIGATE_DEVICE_UPDATED, ZIGATE_DEVICE_ADDRESS_CHANGED, ZIGATE_PACKET_RECEIVED, ZIGATE_DEVICE_NEED_DISCOVERY, ZIGATE_RESPONSE_RECEIVED, DATA_TYPE, BASE_PATH) from .clusters import (Cluster, get_cluster) import functools import struct import threading import random from enum import Enum import colorsys import datetime try: import RPi.GPIO as GPIO except Exception: class GPIO: def fake(self, *args, **kwargs): LOGGER.error('GPIO Not available') def __getattr__(self, *args, **kwargs): return self.fake GPIO = GPIO() import usb LOGGER = logging.getLogger('zigate') AUTO_SAVE = 5 * 60 BIND_REPORT = True SLEEP_INTERVAL = 0.1 ACTIONS = {} WAIT_TIMEOUT = 5 DETECT_FASTCHANGE = False DELAY_FASTCHANGE = 1.0 ACTUATORS = [0x0009, 0x0010, 0x0051, 0x000a, 0x010a, 0x010b, 0x010c, 0x010d, 0x0100, 0x0101, 0x0102, 0x0103, 0x0105, 0x0110, 0x0200, 0x0202, 0x0210, 0x0220, 0x0301, 0x0403] def register_actions(action): def decorator(func): if action not in ACTIONS: ACTIONS[action] = [] ACTIONS[action].append(func.__name__) return func return decorator class AddrMode(Enum): bound = 0 group = 1 short = 2 ieee = 3 def hex_to_rgb(h): h = h.strip('#') return tuple(int(h[i:i + 2], 16) / 255 for i in (0, 2, 4)) def rgb_to_xy(rgb): red, green, blue = rgb r = ((red + 0.055) / (1.0 + 0.055))**2.4 if (red > 0.04045) else (red / 12.92) g = ((green + 0.055) / (1.0 + 0.055))**2.4 if (green > 0.04045) else (green / 12.92) b = ((blue + 0.055) / (1.0 + 0.055))**2.4 if (blue > 0.04045) else (blue / 12.92) X = r * 0.664511 + g * 0.154324 + b * 0.162028 Y = r * 0.283881 + g * 0.668433 + b * 0.047685 Z = r * 0.000088 + g * 0.072310 + b * 0.986039 cx = 0 cy = 0 if (X + Y + Z) != 0: cx = X / (X + Y + Z) cy = Y / (X + Y + Z) return (cx, cy) def hex_to_xy(h): return rgb_to_xy(hex_to_rgb(h)) def dispatch_signal(signal=dispatcher.Any, sender=dispatcher.Anonymous, *arguments, **named): LOGGER.debug('Dispatch %s', signal) try: dispatcher.send(signal, sender, *arguments, **named) except Exception: LOGGER.error('Exception dispatching signal %s', signal) LOGGER.error(traceback.format_exc()) def ftdi_set_bitmode(dev, bitmask): BITMODE_CBUS = 0x20 SIO_SET_BITMODE_REQUEST = 0x0b bmRequestType = usb.util.build_request_type(usb.util.CTRL_OUT, usb.util.CTRL_TYPE_VENDOR, usb.util.CTRL_RECIPIENT_DEVICE) wValue = bitmask | (BITMODE_CBUS << BITMODE_CBUS) dev.ctrl_transfer(bmRequestType, SIO_SET_BITMODE_REQUEST, wValue) class ZiGate(object): def __init__(self, port='auto', path='~/.zigate.json', auto_start=True, auto_save=True, channel=None, adminpanel=False): self._model = 'TTL' self._devices = {} self._groups = {} self._scenes = {} self._led = True self._neighbours_table_cache = [] self._building_neighbours_table = False self._path = path self._version = None self._port = port self._last_response = {} self._last_status = {} self._save_lock = threading.Lock() self._autosavetimer = None self._closing = False self.connection = None self._addr = '0000' self._ieee = None self.panid = 0 self.extended_panid = 0 self.channel = 0 self._started = False self._no_response_count = 0 self._ota_reset_local_variables() if self.model == 'DIN': self.set_running_mode() if adminpanel: self.start_adminpanel() if auto_start: self.startup(channel) if auto_save: self.start_auto_save() @property def model(self): if self.connection: if self.connection.vid_pid() == (0x0403, 0x6001): self._model = 'DIN' return self._model def set_bootloader_mode(self): if self.model != 'DIN': LOGGER.warning('Method only supported on ZiGate DIN') return dev = usb.core.find(idVendor=0x0403, idProduct=0x6001) if not dev: LOGGER.error('ZiGate DIN not found.') return ftdi_set_bitmode(dev, 0x00) sleep(0.5) ftdi_set_bitmode(dev, 0xCC) sleep(0.5) ftdi_set_bitmode(dev, 0xC0) sleep(0.5) ftdi_set_bitmode(dev, 0xC4) sleep(0.5) ftdi_set_bitmode(dev, 0xCC) sleep(0.5) def set_running_mode(self): if self.model != 'DIN': LOGGER.warning('Method only supported on ZiGate DIN') return dev = usb.core.find(idVendor=0x0403, idProduct=0x6001) if not dev: LOGGER.error('ZiGate DIN not found.') return ftdi_set_bitmode(dev, 0xC8) sleep(0.5) ftdi_set_bitmode(dev, 0xCC) sleep(0.5) def flash_firmware(self, path, erase_eeprom=False): if self.model != 'DIN': LOGGER.warning('Method only supported on ZiGate DIN') return from .flasher import flash self.set_bootloader_mode() flash(self._port, write=path, erase=erase_eeprom) self.set_running_mode() @property def ieee(self): return self._ieee @property def addr(self): return self._addr def start_adminpanel(self, host=None, port=None, mount=None, prefix=None, debug=False): from .adminpanel import start_adminpanel, ADMINPANEL_HOST, ADMINPANEL_PORT port = port or ADMINPANEL_PORT host = host or ADMINPANEL_HOST self.adminpanel = start_adminpanel(self, host=host, port=port, mount=mount, prefix=prefix, quiet=not debug, debug=debug) return self.adminpanel def _event_loop(self): while not self._closing: if self.connection and not self.connection.received.empty(): packet = self.connection.received.get() dispatch_signal(ZIGATE_PACKET_RECEIVED, self, packet=packet) t = threading.Thread(target=self.decode_data, args=(packet,), name='ZiGate-Decode data') t.setDaemon(True) t.start() else: sleep(SLEEP_INTERVAL) def setup_connection(self): self.connection = ThreadSerialConnection(self, self._port) def close(self): self._closing = True if self._autosavetimer: self._autosavetimer.cancel() try: if self.connection: self.connection.close() except Exception: LOGGER.error('Exception during closing') LOGGER.error(traceback.format_exc()) self.connection = None self._started = False def save_state(self, path=None): LOGGER.debug('Saving persistent file') path = path or self._path if path is None: LOGGER.warning('Persistent file is disabled') if self._autosavetimer: self._autosavetimer.cancel() return self._path = os.path.expanduser(path) LOGGER.debug('Acquire Lock to save persistent file') r = self._save_lock.acquire(True, 5) if not r: LOGGER.error('Failed to acquire Lock to save persistent file') return try: data = {'devices': list(self._devices.values()), 'groups': self._groups, 'scenes': self._scenes, 'neighbours_table': self._neighbours_table_cache, 'led': self._led } with open(self._path, 'w') as fp: json.dump(data, fp, cls=DeviceEncoder, sort_keys=True, indent=4, separators=(',', ': ')) except Exception: LOGGER.error('Failed to save persistent file %s', self._path) LOGGER.error(traceback.format_exc()) LOGGER.debug('Release Lock of persistent file') self._save_lock.release() def load_state(self, path=None): LOGGER.debug('Try loading persistent file') path = path or self._path if path is None: LOGGER.warning('Persistent file is disabled') return self._path = os.path.expanduser(path) LOGGER.debug('Trying to load %s', self._path) if not os.path.exists(self._path): LOGGER.warning('Persistent file %s doesn\'t exist', self._path) return False try: with open(self._path) as fp: data = json.load(fp) if not isinstance(data, dict): data = {'devices': data, 'groups': {}} groups = data.get('groups', {}) for k, v in groups.items(): groups[k] = set([tuple(r) for r in v]) self._groups = groups self._scenes = data.get('scenes', {}) self._led = data.get('led', True) self._neighbours_table_cache = data.get('neighbours_table', []) LOGGER.debug('Load neighbours cache: %s', self._neighbours_table_cache) devices = data.get('devices', []) for data in devices: try: device = Device.from_json(data, self) self._devices[device.addr] = device device._create_actions() except Exception: LOGGER.error('Error loading device %s', data) LOGGER.debug('Load success') return True except Exception: LOGGER.error('Failed to load persistent file %s', self._path) LOGGER.error(traceback.format_exc()) LOGGER.debug('No file to load') return False def start_auto_save(self): LOGGER.debug('Auto saving %s', self._path) self.save_state() self._autosavetimer = threading.Timer(AUTO_SAVE, self.start_auto_save) self._autosavetimer.setDaemon(True) self._autosavetimer.start() if self.send_data(0x0010) is None: self.connection.reconnect() def __del__(self): self.close() def _start_event_thread(self): self._event_thread = threading.Thread(target=self._event_loop, name='ZiGate-Event Loop') self._event_thread.setDaemon(True) self._event_thread.start() def autoStart(self, channel=None): self.startup(channel) def startup(self, channel=None): if self._started: return self._closing = False self._start_event_thread() self.load_state() self.setup_connection() self.set_led(self._led) version = self.get_version() self.set_channel(channel) self.set_type(TYPE_COORDINATOR) LOGGER.debug('Check network state') network_state = self.get_network_state() if not network_state: LOGGER.error('Failed to get network state') if not network_state or network_state.get('extended_panid') == 0 or network_state.get('addr') == 'ffff': LOGGER.debug('Network is down, start it') self.start_network(True) tries = 3 while tries > 0: sleep(1) tries -= 1 network_state = self.get_network_state() if network_state and network_state.get('extended_panid') != 0 and network_state.get('addr') != 'ffff': break if tries <= 0: LOGGER.error('Failed to start network') self.reset() return if version and version['version'] >= '3.1a': LOGGER.debug('Set Zigate normal mode (firmware >= 3.1a)') self.set_raw_mode(False) if version and version['version'] >= '3.0f': LOGGER.debug('Set Zigate Time (firmware >= 3.0f)') self.set_time() self.get_devices_list(True) t = threading.Thread(target=self.need_discovery) t.setDaemon(True) t.start() def need_discovery(self): for device in self.devices: if device.need_discovery(): if device.receiver_on_when_idle(): LOGGER.debug('Auto discover device %s', device) device.discover_device() else: dispatch_signal(ZIGATE_DEVICE_NEED_DISCOVERY, self, **{'zigate': self, 'device': device}) def zigate_encode(self, data): encoded = bytearray() for b in data: if b < 0x10: encoded.extend([0x02, 0x10 ^ b]) else: encoded.append(b) return encoded def zigate_decode(self, data): flip = False decoded = bytearray() for b in data: if flip: flip = False decoded.append(b ^ 0x10) elif b == 0x02: flip = True else: decoded.append(b) return decoded def checksum(self, *args): chcksum = 0 for arg in args: if isinstance(arg, int): chcksum ^= arg continue for x in arg: chcksum ^= x return chcksum def send_to_transport(self, data): if not self.connection or not self.connection.is_connected(): LOGGER.error('Not connected to zigate') return self.connection.send(data) def send_data(self, cmd, data="", wait_response=None, wait_status=True): LOGGER.debug('REQUEST : 0x{:04x} {}'.format(cmd, data)) self._last_status[cmd] = None if wait_response: self._clear_response(wait_response) if isinstance(cmd, int): byte_cmd = struct.pack('!H', cmd) elif isinstance(data, str): byte_cmd = bytes.fromhex(cmd) else: byte_cmd = cmd if isinstance(data, str): byte_data = bytes.fromhex(data) else: byte_data = data assert type(byte_cmd) == bytes assert type(byte_data) == bytes length = len(byte_data) byte_length = struct.pack('!H', length) checksum = self.checksum(byte_cmd, byte_length, byte_data) msg = struct.pack('!HHB%ds' % length, cmd, length, checksum, byte_data) LOGGER.debug('Msg to send %s', hexlify(msg)) enc_msg = self.zigate_encode(msg) enc_msg.insert(0, 0x01) enc_msg.append(0x03) encoded_output = bytes(enc_msg) LOGGER.debug('Encoded Msg to send %s', hexlify(encoded_output)) self.send_to_transport(encoded_output) if wait_status: status = self._wait_status(cmd) if wait_response and status is not None: r = self._wait_response(wait_response) return r return status return False
MIT License
m3dev/pptx-template
.eggs/python_pptx-0.6.6-py3.6.egg/pptx/text/text.py
TextFrame.add_paragraph
python
def add_paragraph(self): p = self._txBody.add_p() return _Paragraph(p, self)
Return new |_Paragraph| instance appended to the sequence of paragraphs contained in this text frame.
https://github.com/m3dev/pptx-template/blob/bccd95728fc27963dabdd53bd3a2ee92233d5176/.eggs/python_pptx-0.6.6-py3.6.egg/pptx/text/text.py#L32-L38
from __future__ import absolute_import, print_function from ..compat import to_unicode from ..dml.fill import FillFormat from ..enum.dml import MSO_FILL from ..enum.lang import MSO_LANGUAGE_ID from ..enum.text import MSO_AUTO_SIZE, MSO_UNDERLINE from .fonts import FontFiles from .layout import TextFitter from ..opc.constants import RELATIONSHIP_TYPE as RT from ..oxml.simpletypes import ST_TextWrappingType from ..shapes import Subshape from ..util import Centipoints, Emu, lazyproperty, Pt class TextFrame(Subshape): def __init__(self, txBody, parent): super(TextFrame, self).__init__(parent) self._element = self._txBody = txBody
Apache License 2.0
tl-system/plato
plato/utils/rl_env.py
RLEnv.get_state
python
def get_state(self, state, is_episode_done): self.state = state self.is_episode_done = is_episode_done self.state_got.set() print("RL env: Get state", state) self.rl_agent.is_rl_tuned_para_got = False
Get transitted state from RL agent. This function is called by RL agent.
https://github.com/tl-system/plato/blob/cbc5ddc04b554b4b05679a85c6ed6e5fb7f70bef/plato/utils/rl_env.py#L125-L135
import asyncio import logging import gym import numpy as np from plato.config import Config from gym import spaces class RLEnv(gym.Env): metadata = {'render.modes': ['fl']} def __init__(self, rl_agent): super().__init__() self.rl_agent = rl_agent self.time_step = 0 self.state = None self.is_episode_done = False self.state_got = asyncio.Event() self.step_done = asyncio.Event() n_actions = 1 self.action_space = spaces.Box(low=-1, high=1, shape=(n_actions, ), dtype="float32") self.n_states = 1 self.observation_space = spaces.Box(low=-1, high=1, shape=(self.n_states, ), dtype="float32") self.state = [0 for i in range(self.n_states)] def reset(self): if self.rl_agent.rl_episode >= Config().algorithm.rl_episodes: while True: current_loop = asyncio.get_event_loop() task = current_loop.create_task(asyncio.sleep(1)) current_loop.run_until_complete(task) logging.info("Reseting RL environment.") self.time_step = 0 self.rl_agent.reset_rl_env() self.rl_agent.new_episode_begin.set() self.state = [0 for i in range(self.n_states)] return np.array(self.state) def step(self, action): assert self.action_space.contains( action), "%r (%s) invalid" % (action, type(action)) self.time_step += 1 reward = float(0) self.is_episode_done = False current_edge_agg_num = self.time_step logging.info("RL Agent: Start time step #%s...", self.time_step) logging.info( "Each edge server will run %s rounds of local aggregation.", current_edge_agg_num) self.rl_agent.get_tuned_para(current_edge_agg_num, self.time_step) current_loop = asyncio.get_event_loop() get_state_task = current_loop.create_task(self.wait_for_state()) current_loop.run_until_complete(get_state_task) self.normalize_state() reward = self.get_reward() info = {} self.rl_agent.cumulative_reward += reward self.step_done.set() return np.array([self.state]), reward, self.is_episode_done, info async def wait_for_state(self): await self.state_got.wait() assert self.time_step == self.rl_agent.current_round self.state_got.clear()
Apache License 2.0
believefxy/lightsans
recbole/evaluator/topk_evaluator.py
TopKEvaluator.evaluate
python
def evaluate(self, batch_matrix_list, eval_data): pos_len_list = eval_data.get_pos_len_list() topk_index = torch.cat(batch_matrix_list, dim=0).cpu().numpy() assert len(pos_len_list) == len(topk_index) metric_dict = {} result_list = self._calculate_metrics(pos_len_list, topk_index) for metric, value in zip(self.metrics, result_list): for k in self.topk: key = '{}@{}'.format(metric, k) metric_dict[key] = round(value[k - 1], 4) return metric_dict
calculate the metrics of all batches. It is called at the end of each epoch Args: batch_matrix_list (list): the results of all batches eval_data (Dataset): the class of test data Returns: dict: such as ``{'Hit@20': 0.3824, 'Recall@20': 0.0527, 'Hit@10': 0.3153, 'Recall@10': 0.0329}``
https://github.com/believefxy/lightsans/blob/94ce7e59d144dbc787153b8c486cad334790ec6e/recbole/evaluator/topk_evaluator.py#L63-L85
import numpy as np import torch from recbole.evaluator.abstract_evaluator import AbstractEvaluator from recbole.evaluator.metrics import metrics_dict from torch.nn.utils.rnn import pad_sequence topk_metrics = {metric.lower(): metric for metric in ['Hit', 'Recall', 'MRR', 'Precision', 'NDCG', 'MAP']} class TopKEvaluator(AbstractEvaluator): def __init__(self, config): super().__init__(config) self.topk = config['topk'] self._check_args() def collect(self, interaction, scores_tensor, full=False): user_len_list = interaction.user_len_list if full is True: scores_matrix = scores_tensor.view(len(user_len_list), -1) else: scores_list = torch.split(scores_tensor, user_len_list, dim=0) scores_matrix = pad_sequence(scores_list, batch_first=True, padding_value=-np.inf) _, topk_index = torch.topk(scores_matrix, max(self.topk), dim=-1) return topk_index
MIT License
taksau/gps-net
lib/get_dataset_counts.py
get_counts
python
def get_counts(train_data=VG(mode='train', filter_duplicate_rels=False, num_val_im=5000), must_overlap=True): fg_matrix = np.zeros(( train_data.num_classes, train_data.num_classes, train_data.num_predicates, ), dtype=np.int64) bg_matrix = np.zeros(( train_data.num_classes, train_data.num_classes, ), dtype=np.int64) for ex_ind in range(len(train_data)): gt_classes = train_data.gt_classes[ex_ind].copy() gt_relations = train_data.relationships[ex_ind].copy() gt_boxes = train_data.gt_boxes[ex_ind].copy() o1o2 = gt_classes[gt_relations[:, :2]] for (o1, o2), gtr in zip(o1o2, gt_relations[:,2]): fg_matrix[o1, o2, gtr] += 1 o1o2_total = gt_classes[np.array( box_filter(gt_boxes, must_overlap=must_overlap), dtype=int)] for (o1, o2) in o1o2_total: bg_matrix[o1, o2] += 1 return fg_matrix, bg_matrix
Get counts of all of the relations. Used for modeling directly P(rel | o1, o2) :param train_data: :param must_overlap: :return:
https://github.com/taksau/gps-net/blob/dfbe63a793026b231b3cd60073aaa91a2ad4d06a/lib/get_dataset_counts.py#L73-L107
import numpy as np from dataloaders.visual_genome import VG from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps from lib.pytorch_misc import nonintersecting_2d_inds, intersect_2d def get_counts_new(train_data=VG(mode='train', filter_duplicate_rels=False, num_val_im=5000), must_overlap=True): fg_matrix = np.zeros(( train_data.num_classes, train_data.num_classes, train_data.num_predicates, ), dtype=np.int64) bg_matrix = np.zeros(( train_data.num_classes, train_data.num_classes, ), dtype=np.int64) for ex_ind in range(len(train_data)): gt_classes = train_data.gt_classes[ex_ind].copy() gt_relations = train_data.relationships[ex_ind].copy() gt_boxes = train_data.gt_boxes[ex_ind].copy() o1o2 = gt_classes[gt_relations[:, :2]] for (o1, o2), gtr in zip(o1o2, gt_relations[:,2]): fg_matrix[o1, o2, gtr] += 1 o1o2_total = gt_classes[np.array( box_filter(gt_boxes, must_overlap=must_overlap), dtype=int)] mask = intersect_2d(o1o2_total, o1o2).any(1) index = np.where(mask)[0] o1o2_bg = o1o2_total[index] for (o1, o2) in o1o2_bg: bg_matrix[o1, o2] += 1 return fg_matrix, bg_matrix def box_filter(boxes, must_overlap=False): n_cands = boxes.shape[0] overlaps = bbox_overlaps(boxes.astype(np.float), boxes.astype(np.float)) > 0 np.fill_diagonal(overlaps, 0) all_possib = np.ones_like(overlaps, dtype=np.bool) np.fill_diagonal(all_possib, 0) if must_overlap: possible_boxes = np.column_stack(np.where(overlaps)) if possible_boxes.size == 0: possible_boxes = np.column_stack(np.where(all_possib)) else: possible_boxes = np.column_stack(np.where(all_possib)) return possible_boxes
MIT License
kriaga/health-checker
HealthChecker/venv/Lib/site-packages/selenium/webdriver/support/select.py
Select.deselect_by_visible_text
python
def deselect_by_visible_text(self, text): if not self.is_multiple: raise NotImplementedError("You may only deselect options of a multi-select") matched = False xpath = ".//option[normalize-space(.) = %s]" % self._escapeString(text) opts = self._el.find_elements(By.XPATH, xpath) for opt in opts: self._unsetSelected(opt) matched = True if not matched: raise NoSuchElementException("Could not locate element with visible text: %s" % text)
Deselect all options that display text matching the argument. That is, when given "Bar" this would deselect an option like: <option value="foo">Bar</option> :Args: - text - The visible text to match against
https://github.com/kriaga/health-checker/blob/3d9ce933f131bcbb897103b0f509cc45393cae4a/HealthChecker/venv/Lib/site-packages/selenium/webdriver/support/select.py#L190-L208
from selenium.webdriver.common.by import By from selenium.common.exceptions import NoSuchElementException, UnexpectedTagNameException class Select(object): def __init__(self, webelement): if webelement.tag_name.lower() != "select": raise UnexpectedTagNameException( "Select only works on <select> elements, not on <%s>" % webelement.tag_name) self._el = webelement multi = self._el.get_attribute("multiple") self.is_multiple = multi and multi != "false" @property def options(self): return self._el.find_elements(By.TAG_NAME, 'option') @property def all_selected_options(self): ret = [] for opt in self.options: if opt.is_selected(): ret.append(opt) return ret @property def first_selected_option(self): for opt in self.options: if opt.is_selected(): return opt raise NoSuchElementException("No options are selected") def select_by_value(self, value): css = "option[value =%s]" % self._escapeString(value) opts = self._el.find_elements(By.CSS_SELECTOR, css) matched = False for opt in opts: self._setSelected(opt) if not self.is_multiple: return matched = True if not matched: raise NoSuchElementException("Cannot locate option with value: %s" % value) def select_by_index(self, index): match = str(index) for opt in self.options: if opt.get_attribute("index") == match: self._setSelected(opt) return raise NoSuchElementException("Could not locate element with index %d" % index) def select_by_visible_text(self, text): xpath = ".//option[normalize-space(.) = %s]" % self._escapeString(text) opts = self._el.find_elements(By.XPATH, xpath) matched = False for opt in opts: self._setSelected(opt) if not self.is_multiple: return matched = True if len(opts) == 0 and " " in text: subStringWithoutSpace = self._get_longest_token(text) if subStringWithoutSpace == "": candidates = self.options else: xpath = ".//option[contains(.,%s)]" % self._escapeString(subStringWithoutSpace) candidates = self._el.find_elements(By.XPATH, xpath) for candidate in candidates: if text == candidate.text: self._setSelected(candidate) if not self.is_multiple: return matched = True if not matched: raise NoSuchElementException("Could not locate element with visible text: %s" % text) def deselect_all(self): if not self.is_multiple: raise NotImplementedError("You may only deselect all options of a multi-select") for opt in self.options: self._unsetSelected(opt) def deselect_by_value(self, value): if not self.is_multiple: raise NotImplementedError("You may only deselect options of a multi-select") matched = False css = "option[value = %s]" % self._escapeString(value) opts = self._el.find_elements(By.CSS_SELECTOR, css) for opt in opts: self._unsetSelected(opt) matched = True if not matched: raise NoSuchElementException("Could not locate element with value: %s" % value) def deselect_by_index(self, index): if not self.is_multiple: raise NotImplementedError("You may only deselect options of a multi-select") for opt in self.options: if opt.get_attribute("index") == str(index): self._unsetSelected(opt) return raise NoSuchElementException("Could not locate element with index %d" % index)
MIT License
whynothugo/django-afip
django_afip/helpers.py
get_server_status
python
def get_server_status(production: bool) -> ServerStatus: client = clients.get_client("wsfe", not production) response = client.service.FEDummy() return ServerStatus( app=response["AppServer"] == "OK", db=response["DbServer"] == "OK", auth=response["AuthServer"] == "OK", )
Return the status of AFIP's WS servers :param production: Whether to check the production servers. If false, the testing servers will be checked instead.
https://github.com/whynothugo/django-afip/blob/bf0eb4695b702c9f4424de3bfc031bb8740328da/django_afip/helpers.py#L30-L43
from dataclasses import dataclass from django_afip import clients @dataclass(frozen=True) class ServerStatus: app: bool db: bool auth: bool def __bool__(self): return self.app and self.db and self.auth
ISC License
philgyford/django-ditto
ditto/flickr/management/commands/__init__.py
FetchCommand.add_arguments
python
def add_arguments(self, parser): super().add_arguments(parser) parser.add_argument( "--account", action="store", default=False, help=( "The NSID of the Flickr User associated with the one " "Account to fetch for." ), )
All children will have the --account option.
https://github.com/philgyford/django-ditto/blob/bc8b68a839a8f82a0484f3006137a9ca718aa69e/ditto/flickr/management/commands/__init__.py#L12-L24
from django.core.management.base import CommandError from ....core.management.commands import DittoBaseCommand class FetchCommand(DittoBaseCommand):
MIT License
pallets/flask
src/flask/helpers.py
send_file
python
def send_file( path_or_file: t.Union[os.PathLike, str, t.BinaryIO], mimetype: t.Optional[str] = None, as_attachment: bool = False, download_name: t.Optional[str] = None, attachment_filename: t.Optional[str] = None, conditional: bool = True, etag: t.Union[bool, str] = True, add_etags: t.Optional[bool] = None, last_modified: t.Optional[t.Union[datetime, int, float]] = None, max_age: t.Optional[ t.Union[int, t.Callable[[t.Optional[str]], t.Optional[int]]] ] = None, cache_timeout: t.Optional[int] = None, ): return werkzeug.utils.send_file( **_prepare_send_file_kwargs( path_or_file=path_or_file, environ=request.environ, mimetype=mimetype, as_attachment=as_attachment, download_name=download_name, attachment_filename=attachment_filename, conditional=conditional, etag=etag, add_etags=add_etags, last_modified=last_modified, max_age=max_age, cache_timeout=cache_timeout, ) )
Send the contents of a file to the client. The first argument can be a file path or a file-like object. Paths are preferred in most cases because Werkzeug can manage the file and get extra information from the path. Passing a file-like object requires that the file is opened in binary mode, and is mostly useful when building a file in memory with :class:`io.BytesIO`. Never pass file paths provided by a user. The path is assumed to be trusted, so a user could craft a path to access a file you didn't intend. Use :func:`send_from_directory` to safely serve user-requested paths from within a directory. If the WSGI server sets a ``file_wrapper`` in ``environ``, it is used, otherwise Werkzeug's built-in wrapper is used. Alternatively, if the HTTP server supports ``X-Sendfile``, configuring Flask with ``USE_X_SENDFILE = True`` will tell the server to send the given path, which is much more efficient than reading it in Python. :param path_or_file: The path to the file to send, relative to the current working directory if a relative path is given. Alternatively, a file-like object opened in binary mode. Make sure the file pointer is seeked to the start of the data. :param mimetype: The MIME type to send for the file. If not provided, it will try to detect it from the file name. :param as_attachment: Indicate to a browser that it should offer to save the file instead of displaying it. :param download_name: The default name browsers will use when saving the file. Defaults to the passed file name. :param conditional: Enable conditional and range responses based on request headers. Requires passing a file path and ``environ``. :param etag: Calculate an ETag for the file, which requires passing a file path. Can also be a string to use instead. :param last_modified: The last modified time to send for the file, in seconds. If not provided, it will try to detect it from the file path. :param max_age: How long the client should cache the file, in seconds. If set, ``Cache-Control`` will be ``public``, otherwise it will be ``no-cache`` to prefer conditional caching. .. versionchanged:: 2.0 ``download_name`` replaces the ``attachment_filename`` parameter. If ``as_attachment=False``, it is passed with ``Content-Disposition: inline`` instead. .. versionchanged:: 2.0 ``max_age`` replaces the ``cache_timeout`` parameter. ``conditional`` is enabled and ``max_age`` is not set by default. .. versionchanged:: 2.0 ``etag`` replaces the ``add_etags`` parameter. It can be a string to use instead of generating one. .. versionchanged:: 2.0 Passing a file-like object that inherits from :class:`~io.TextIOBase` will raise a :exc:`ValueError` rather than sending an empty file. .. versionadded:: 2.0 Moved the implementation to Werkzeug. This is now a wrapper to pass some Flask-specific arguments. .. versionchanged:: 1.1 ``filename`` may be a :class:`~os.PathLike` object. .. versionchanged:: 1.1 Passing a :class:`~io.BytesIO` object supports range requests. .. versionchanged:: 1.0.3 Filenames are encoded with ASCII instead of Latin-1 for broader compatibility with WSGI servers. .. versionchanged:: 1.0 UTF-8 filenames as specified in :rfc:`2231` are supported. .. versionchanged:: 0.12 The filename is no longer automatically inferred from file objects. If you want to use automatic MIME and etag support, pass a filename via ``filename_or_fp`` or ``attachment_filename``. .. versionchanged:: 0.12 ``attachment_filename`` is preferred over ``filename`` for MIME detection. .. versionchanged:: 0.9 ``cache_timeout`` defaults to :meth:`Flask.get_send_file_max_age`. .. versionchanged:: 0.7 MIME guessing and etag support for file-like objects was deprecated because it was unreliable. Pass a filename if you are able to, otherwise attach an etag yourself. .. versionchanged:: 0.5 The ``add_etags``, ``cache_timeout`` and ``conditional`` parameters were added. The default behavior is to add etags. .. versionadded:: 0.2
https://github.com/pallets/flask/blob/44bc286c03ff3f8e783b4f79f75eb3a464940ca0/src/flask/helpers.py#L496-L627
import os import pkgutil import socket import sys import typing as t import warnings from datetime import datetime from datetime import timedelta from functools import lru_cache from functools import update_wrapper from threading import RLock import werkzeug.utils from werkzeug.exceptions import NotFound from werkzeug.routing import BuildError from werkzeug.urls import url_quote from .globals import _app_ctx_stack from .globals import _request_ctx_stack from .globals import current_app from .globals import request from .globals import session from .signals import message_flashed if t.TYPE_CHECKING: from .wrappers import Response def get_env() -> str: return os.environ.get("FLASK_ENV") or "production" def get_debug_flag() -> bool: val = os.environ.get("FLASK_DEBUG") if not val: return get_env() == "development" return val.lower() not in ("0", "false", "no") def get_load_dotenv(default: bool = True) -> bool: val = os.environ.get("FLASK_SKIP_DOTENV") if not val: return default return val.lower() in ("0", "false", "no") def stream_with_context( generator_or_function: t.Union[ t.Iterator[t.AnyStr], t.Callable[..., t.Iterator[t.AnyStr]] ] ) -> t.Iterator[t.AnyStr]: try: gen = iter(generator_or_function) except TypeError: def decorator(*args: t.Any, **kwargs: t.Any) -> t.Any: gen = generator_or_function(*args, **kwargs) return stream_with_context(gen) return update_wrapper(decorator, generator_or_function) def generator() -> t.Generator: ctx = _request_ctx_stack.top if ctx is None: raise RuntimeError( "Attempted to stream with context but " "there was no context in the first place to keep around." ) with ctx: yield None try: yield from gen finally: if hasattr(gen, "close"): gen.close() wrapped_g = generator() next(wrapped_g) return wrapped_g def make_response(*args: t.Any) -> "Response": if not args: return current_app.response_class() if len(args) == 1: args = args[0] return current_app.make_response(args) def url_for(endpoint: str, **values: t.Any) -> str: appctx = _app_ctx_stack.top reqctx = _request_ctx_stack.top if appctx is None: raise RuntimeError( "Attempted to generate a URL without the application context being" " pushed. This has to be executed when application context is" " available." ) if reqctx is not None: url_adapter = reqctx.url_adapter blueprint_name = request.blueprint if endpoint[:1] == ".": if blueprint_name is not None: endpoint = f"{blueprint_name}{endpoint}" else: endpoint = endpoint[1:] external = values.pop("_external", False) else: url_adapter = appctx.url_adapter if url_adapter is None: raise RuntimeError( "Application was not able to create a URL adapter for request" " independent URL generation. You might be able to fix this by" " setting the SERVER_NAME config variable." ) external = values.pop("_external", True) anchor = values.pop("_anchor", None) method = values.pop("_method", None) scheme = values.pop("_scheme", None) appctx.app.inject_url_defaults(endpoint, values) old_scheme = None if scheme is not None: if not external: raise ValueError("When specifying _scheme, _external must be True") old_scheme = url_adapter.url_scheme url_adapter.url_scheme = scheme try: try: rv = url_adapter.build( endpoint, values, method=method, force_external=external ) finally: if old_scheme is not None: url_adapter.url_scheme = old_scheme except BuildError as error: values["_external"] = external values["_anchor"] = anchor values["_method"] = method values["_scheme"] = scheme return appctx.app.handle_url_build_error(error, endpoint, values) if anchor is not None: rv += f"#{url_quote(anchor)}" return rv def get_template_attribute(template_name: str, attribute: str) -> t.Any: return getattr(current_app.jinja_env.get_template(template_name).module, attribute) def flash(message: str, category: str = "message") -> None: flashes = session.get("_flashes", []) flashes.append((category, message)) session["_flashes"] = flashes message_flashed.send( current_app._get_current_object(), message=message, category=category, ) def get_flashed_messages( with_categories: bool = False, category_filter: t.Iterable[str] = () ) -> t.Union[t.List[str], t.List[t.Tuple[str, str]]]: flashes = _request_ctx_stack.top.flashes if flashes is None: _request_ctx_stack.top.flashes = flashes = ( session.pop("_flashes") if "_flashes" in session else [] ) if category_filter: flashes = list(filter(lambda f: f[0] in category_filter, flashes)) if not with_categories: return [x[1] for x in flashes] return flashes def _prepare_send_file_kwargs( download_name: t.Optional[str] = None, attachment_filename: t.Optional[str] = None, etag: t.Optional[t.Union[bool, str]] = None, add_etags: t.Optional[t.Union[bool]] = None, max_age: t.Optional[ t.Union[int, t.Callable[[t.Optional[str]], t.Optional[int]]] ] = None, cache_timeout: t.Optional[int] = None, **kwargs: t.Any, ) -> t.Dict[str, t.Any]: if attachment_filename is not None: warnings.warn( "The 'attachment_filename' parameter has been renamed to" " 'download_name'. The old name will be removed in Flask" " 2.1.", DeprecationWarning, stacklevel=3, ) download_name = attachment_filename if cache_timeout is not None: warnings.warn( "The 'cache_timeout' parameter has been renamed to" " 'max_age'. The old name will be removed in Flask 2.1.", DeprecationWarning, stacklevel=3, ) max_age = cache_timeout if add_etags is not None: warnings.warn( "The 'add_etags' parameter has been renamed to 'etag'. The" " old name will be removed in Flask 2.1.", DeprecationWarning, stacklevel=3, ) etag = add_etags if max_age is None: max_age = current_app.get_send_file_max_age kwargs.update( environ=request.environ, download_name=download_name, etag=etag, max_age=max_age, use_x_sendfile=current_app.use_x_sendfile, response_class=current_app.response_class, _root_path=current_app.root_path, ) return kwargs
BSD 3-Clause New or Revised License
khan/tinyquery
tinyquery/parser.py
p_parenthesized_star
python
def p_parenthesized_star(p):
parenthesized_star : STAR | LPAREN parenthesized_star RPAREN
https://github.com/khan/tinyquery/blob/9382b18b4095d71c545f4d9da2b518efa21ac0e5/tinyquery/parser.py#L349-L351
from __future__ import absolute_import import os from ply import yacc from tinyquery import tq_ast from tinyquery import lexer tokens = lexer.tokens precedence = ( ('left', 'AND', 'OR'), ('left', 'EQUALS', 'NOT_EQUAL', 'GREATER_THAN', 'LESS_THAN', 'GREATER_THAN_OR_EQUAL', 'LESS_THAN_OR_EQUAL', 'IS'), ('left', 'PLUS', 'MINUS'), ('left', 'STAR', 'DIVIDED_BY', 'MOD', 'CONTAINS', 'IN'), ) def p_select(p): if len(p) == 4: p[0] = tq_ast.Select(p[2], None, None, None, None, None, p[3], None) elif len(p) == 10: p[0] = tq_ast.Select(p[2], p[4], p[5], p[6], p[7], p[8], p[9], None) else: assert False, 'Unexpected number of captured tokens.' def p_optional_where(p): if len(p) == 1: p[0] = None else: p[0] = p[2] def p_optional_having(p): if len(p) == 1: p[0] = None else: p[0] = p[2] def p_optional_group_by(p): if len(p) == 1: p[0] = None else: p[0] = p[len(p) - 1] def p_optional_order_by(p): if len(p) == 1: p[0] = None else: p[0] = p[3] def p_order_by_list(p): p[0] = p[1] def p_strict_order_by_list(p): if len(p) == 2: p[0] = [p[1]] else: p[1].append(p[3]) p[0] = p[1] def p_ordering_asc(p): p[0] = tq_ast.Ordering(p[1], True) def p_ordering_desc(p): p[0] = tq_ast.Ordering(p[1], False) def p_column_id_list(p): p[0] = p[1] def p_strict_column_id_list(p): if len(p) == 2: p[0] = [p[1]] else: p[1].append(p[3]) p[0] = p[1] def p_optional_limit(p): if len(p) == 1: p[0] = None else: p[0] = p[2] def p_table_expr_table_or_union(p): if len(p[1]) == 1: p[0] = p[1][0] else: p[0] = tq_ast.TableUnion(p[1]) def p_non_cross_join(p): if p[1].upper() == 'LEFT': p[0] = tq_ast.JoinType.LEFT_OUTER else: p[0] = tq_ast.JoinType.INNER def p_cross_join(p): p[0] = tq_ast.JoinType.CROSS def p_partial_join(p): if p[1] is tq_ast.JoinType.CROSS: p[0] = tq_ast.PartialJoin(p[2], p[1], None) else: p[0] = tq_ast.PartialJoin(p[2], p[1], p[4]) def p_join_tail(p): if len(p) == 2: p[0] = [p[1]] else: p[0] = [p[1]] + p[2] def p_join(p): p[0] = tq_ast.Join(p[1], p[2]) def p_aliased_table_expr_list(p): p[0] = p[1] def p_strict_aliased_table_expr_list(p): if len(p) == 2: p[0] = [p[1]] else: p[1].append(p[3]) p[0] = p[1] def p_aliased_table_expr(p): if len(p) == 2: p[0] = p[1] else: if isinstance(p[1], tq_ast.TableId): p[0] = tq_ast.TableId(p[1].name, p[len(p) - 1]) elif isinstance(p[1], tq_ast.Select): p[0] = tq_ast.Select(p[1].select_fields, p[1].table_expr, p[1].where_expr, p[1].groups, p[1].having_expr, p[1].orderings, p[1].limit, p[len(p) - 1]) else: assert False, 'Unexpected table_expr type: %s' % type(p[1]) def p_table_id(p): p[0] = tq_ast.TableId(p[1], None) def p_select_table_expression(p): p[0] = p[1] def p_table_expression_parens(p): p[0] = p[2] def p_select_field_list(p): p[0] = p[1] def p_strict_select_field_list(p): if len(p) == 2: p[0] = [p[1]] else: p[1].append(p[3]) p[0] = p[1] def p_select_field(p): within_record_type = None if len(p) > 2: alias = p[len(p) - 1] if len(p) > 3: if 'within' in p: within_record_type = p[len(p) - 3] if within_record_type == 'record': within_record_type = within_record_type.upper() else: within_record_type = within_record_type.name else: alias = None p[0] = tq_ast.SelectField(p[1], alias, within_record_type) def p_select_star(p): p[0] = tq_ast.Star() def p_expression_parens(p): p[0] = p[2] def p_expression_is_null(p): p[0] = tq_ast.UnaryOperator('is_null', p[1]) def p_expression_is_not_null(p): p[0] = tq_ast.UnaryOperator('is_not_null', p[1]) def p_expression_unary(p): p[0] = tq_ast.UnaryOperator(p[1], p[2]) def p_expression_binary(p): p[0] = tq_ast.BinaryOperator(p[2], p[1], p[3]) def p_expression_func_call(p): p[0] = tq_ast.FunctionCall(p[1].lower(), p[3]) def p_expression_count(p): p[0] = tq_ast.FunctionCall('count', p[3]) def p_expression_count_distinct(p): p[0] = tq_ast.FunctionCall('count_distinct', p[4]) def p_expression_count_star(p): p[0] = tq_ast.FunctionCall('count', [tq_ast.Literal(1)])
MIT License
lucasdavid/wikiart
wikiart/fetcher.py
WikiArtFetcher.copy_everything
python
def copy_everything(self): Logger.write('\nCopying paintings:') if not self.painting_groups: raise RuntimeError('Painting groups not found. Cannot continue.') show_progress_at = max(1, int(.1 * len(self.painting_groups))) for i, group in enumerate(self.painting_groups): for painting in group: self.download_hard_copy(painting) if i % show_progress_at == 0: Logger.info('%i%% done' % (100 * (i + 1) // len(self.painting_groups))) return self
Download A Copy of Every Single Painting.
https://github.com/lucasdavid/wikiart/blob/fe1d6e1d7323b1cc67316c782f24fc1e59aee717/wikiart/fetcher.py#L205-L221
import json import os import shutil import time import urllib.error import urllib.request import requests from . import settings, base from .base import Logger class WikiArtFetcher: def __init__(self, commit=True, override=False, padder=None): self.commit = commit self.override = override self.padder = padder or base.RequestPadder() self.artists = None self.painting_groups = None def prepare(self): os.makedirs(settings.BASE_FOLDER, exist_ok=True) os.makedirs(os.path.join(settings.BASE_FOLDER, 'meta'), exist_ok=True) os.makedirs(os.path.join(settings.BASE_FOLDER, 'images'), exist_ok=True) return self def check(self, only='all'): Logger.info('Checking downloaded data...') base_dir = settings.BASE_FOLDER meta_dir = os.path.join(base_dir, 'meta') imgs_dir = os.path.join(base_dir, 'images') if only in ('artists', 'all'): if not os.path.exists(os.path.join(meta_dir, 'artists.json')): Logger.warning('artists.json is missing.') if only in ('paintings', 'all'): for artist in self.artists: filename = os.path.join(meta_dir, artist['url'] + '.json') if not os.path.exists(filename): Logger.warning('%s\'s paintings file is missing.' % artist['url']) for group in self.painting_groups: for painting in group: filename = os.path.join(imgs_dir, str(painting['contentId']) + settings.SAVE_IMAGES_IN_FORMAT) if not os.path.exists(filename): Logger.warning('painting %i is missing.' % painting['contentId']) return self def getauthentication(self): params = {} params['accessCode'] = input('Please enter the Access code from https://www.wikiart.org/en/App/GetApi :') params['secretCode'] = input("Enter the Secret code :") url = 'https://www.wikiart.org/en/Api/2/login' try: response = requests.get(url, params=params, timeout=settings.METADATA_REQUEST_TIMEOUT) response.raise_for_status() data = response.json() return data['SessionKey'] except Exception as error: Logger.write('Error %s' % str(error)) def fetch_all(self): return (self.fetch_artists() .fetch_all_paintings() .copy_everything()) def fetch_artists(self): Logger.info('Fetching artists...', end=' ', flush=True) path = os.path.join(settings.BASE_FOLDER, 'meta', 'artists.json') if os.path.exists(path) and not self.override: with open(path, encoding='utf-8') as f: self.artists = json.load(f) Logger.info('skipped') return self elapsed = time.time() try: url = '/'.join((settings.BASE_URL, 'Artist/AlphabetJson')) params = {'v' : 'new', 'inPublicDomain' : 'true'} response = requests.get(url, timeout=settings.METADATA_REQUEST_TIMEOUT, params=params) response.raise_for_status() self.artists = response.json() if self.commit: with open(path, 'w', encoding='utf-8') as f: json.dump(self.artists, f, indent=4, ensure_ascii=False) Logger.write('Done (%.2f sec)' % (time.time() - elapsed)) except Exception as error: Logger.write('Error %s' % str(error)) return self def fetch_all_paintings(self): Logger.write('\nFetching paintings for every artist:') if not self.artists: raise RuntimeError('No artists defined. Cannot continue.') self.painting_groups = [] show_progress_at = max(1, int(.1 * len(self.artists))) for i, artist in enumerate(self.artists): self.painting_groups.append(self.fetch_paintings(artist)) if i % show_progress_at == 0: Logger.info('%i%% done' % (100 * (i + 1) // len(self.artists))) return self def fetch_paintings(self, artist): Logger.write('|- %s\'s paintings' % artist['artistName'], end='', flush=True) elapsed = time.time() meta_folder = os.path.join(settings.BASE_FOLDER, 'meta') url = '/'.join((settings.BASE_URL, 'Painting', 'PaintingsByArtist')) params = {'artistUrl': artist['url'], 'json': 2} filename = os.path.join(meta_folder, artist['url'] + '.json') if os.path.exists(filename) and not self.override: with open(filename, 'r', encoding='utf-8') as f: data = json.load(f) Logger.write(' (s)') return data try: response = requests.get( url, params=params, timeout=settings.METADATA_REQUEST_TIMEOUT) response.raise_for_status() data = response.json() for painting in data: url = '/'.join((settings.BASE_URL, 'Painting', 'ImageJson', str(painting['contentId']))) self.padder.request_start() response = requests.get( url, timeout=settings.METADATA_REQUEST_TIMEOUT) self.padder.request_finished() if response.ok: painting.update(response.json()) Logger.write('.', end='', flush=True) if self.commit: with open(filename, 'w', encoding='utf-8') as f: json.dump(data, f, indent=4, ensure_ascii=False) Logger.write(' Done (%.2f sec)' % (time.time() - elapsed)) return data except (IOError, urllib.error.HTTPError) as e: Logger.write(' Failed (%s)' % str(e)) return []
MIT License
gaasedelen/lighthouse
plugins/lighthouse/ui/coverage_table.py
CoverageTableModel.columnCount
python
def columnCount(self, index=QtCore.QModelIndex()): return len(self.COLUMN_HEADERS)
The number of table columns.
https://github.com/gaasedelen/lighthouse/blob/7245a2d2c4e84351cd259ed81dafa4263167909a/plugins/lighthouse/ui/coverage_table.py#L798-L802
import os import time import string import logging from operator import itemgetter, attrgetter from lighthouse.util import lmsg from lighthouse.util.qt import * from lighthouse.util.python import * from lighthouse.util.misc import mainthread from lighthouse.util.disassembler import disassembler from lighthouse.coverage import FunctionCoverage, BADADDR logger = logging.getLogger("Lighthouse.UI.Table") class CoverageTableView(QtWidgets.QTableView): def __init__(self, controller, model, parent=None): super(CoverageTableView, self).__init__(parent) self.setObjectName(self.__class__.__name__) self._controller = controller self._model = model self.setModel(self._model) self._ui_init() self.refresh_theme() @disassembler.execute_ui def refresh_theme(self): palette = self._model.lctx.palette self.setStyleSheet( "QTableView {" " gridline-color: %s;" % palette.table_grid.name() + " background-color: %s;" % palette.table_background.name() + " color: %s;" % palette.table_text.name() + " outline: none; " "} " + "QHeaderView::section { " " padding: 1ex;" " margin: 0;" "} " + "QTableView::item:selected {" " color: white; " " background-color: %s;" % palette.table_selection.name() + "}" ) def keyPressEvent(self, event): if event.key() == QtCore.Qt.Key_J: event = remap_key_event(event, QtCore.Qt.Key_Down) elif event.key() == QtCore.Qt.Key_K: event = remap_key_event(event, QtCore.Qt.Key_Up) elif event.key() == QtCore.Qt.Key_H: event = remap_key_event(event, QtCore.Qt.Key_Left) elif event.key() == QtCore.Qt.Key_L: event = remap_key_event(event, QtCore.Qt.Key_Right) super(CoverageTableView, self).keyPressEvent(event) self.repaint() flush_qt_events() def _ui_init(self): self._ui_init_table() self._ui_init_table_ctx_menu_actions() self._ui_init_header_ctx_menu_actions() self._ui_init_signals() def _ui_init_table(self): self.setFocusPolicy(QtCore.Qt.StrongFocus) self.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel) self.setMinimumHeight(0) self.setSizePolicy( QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored ) title_font = self._model.headerData(0, QtCore.Qt.Horizontal, QtCore.Qt.FontRole) title_fm = QtGui.QFontMetricsF(title_font) entry_font = self._model.data(0, QtCore.Qt.FontRole) entry_fm = QtGui.QFontMetricsF(entry_font) entry_font = self._model.data(0, QtCore.Qt.FontRole) entry_fm = QtGui.QFontMetricsF(entry_font) for i in xrange(self._model.columnCount()): title_rect = self._model.headerData(i, QtCore.Qt.Horizontal, QtCore.Qt.SizeHintRole) entry_text = self._model.SAMPLE_CONTENTS[i] entry_rect = entry_fm.boundingRect(entry_text) column_width = max(title_rect.width(), entry_rect.width()*1.2) self.setColumnWidth(i, column_width) self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) vh = self.verticalHeader() hh = self.horizontalHeader() vh.hide() hh.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch) hh.setHighlightSections(False) self.setSortingEnabled(True) hh.setSortIndicator( CoverageTableModel.FUNC_ADDR, QtCore.Qt.AscendingOrder ) vh.setSectionResizeMode(QtWidgets.QHeaderView.Fixed) spacing = entry_fm.height() - entry_fm.xHeight() tweak = (17*get_dpi_scale() - spacing)/get_dpi_scale() vh.setDefaultSectionSize(entry_fm.height()+tweak) def _ui_init_table_ctx_menu_actions(self): self._action_rename = QtWidgets.QAction("Rename", None) self._action_copy_name = QtWidgets.QAction("Copy name", None) self._action_copy_address = QtWidgets.QAction("Copy address", None) self._action_copy_name_and_address = QtWidgets.QAction("Copy name and address", None) self._action_copy_names = QtWidgets.QAction("Copy names", None) self._action_copy_addresses = QtWidgets.QAction("Copy addresses", None) self._action_copy_names_and_addresses = QtWidgets.QAction("Copy names and addresses", None) self._action_prefix = QtWidgets.QAction("Prefix selected functions", None) self._action_clear_prefix = QtWidgets.QAction("Clear prefixes", None) def _ui_init_header_ctx_menu_actions(self): self._action_alignment = QtWidgets.QAction("Center Aligned", None) self._action_alignment.setCheckable(True) self._action_alignment.setChecked(True) def _ui_init_signals(self): self.doubleClicked.connect(self._ui_entry_double_click) self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.customContextMenuRequested.connect(self._ui_table_ctx_menu_handler) hh = self.horizontalHeader() hh.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) hh.customContextMenuRequested.connect(self._ui_header_ctx_menu_handler) def _ui_entry_double_click(self, index): self._controller.navigate_to_function(index.row()) def _ui_table_ctx_menu_handler(self, position): ctx_menu = self._populate_table_ctx_menu() if not ctx_menu: return if USING_PYSIDE6: exec_func = getattr(ctx_menu, "exec") else: exec_func = getattr(ctx_menu, "exec_") action = exec_func(self.viewport().mapToGlobal(position)) self._process_table_ctx_menu_action(action) def _ui_header_ctx_menu_handler(self, position): hh = self.horizontalHeader() column = hh.logicalIndexAt(position) ctx_menu = self._populate_header_ctx_menu() if not ctx_menu: return if USING_PYSIDE6: exec_func = getattr(ctx_menu, "exec") else: exec_func = getattr(ctx_menu, "exec_") action = exec_func(hh.viewport().mapToGlobal(position)) self._process_header_ctx_menu_action(action, column) def _populate_table_ctx_menu(self): selected_rows = self.selectionModel().selectedRows() if len(selected_rows) == 0: return None ctx_menu = QtWidgets.QMenu() if len(selected_rows) == 1: ctx_menu.addAction(self._action_rename) ctx_menu.addSeparator() ctx_menu.addAction(self._action_copy_name) ctx_menu.addAction(self._action_copy_address) ctx_menu.addAction(self._action_copy_name_and_address) ctx_menu.addSeparator() else: ctx_menu.addAction(self._action_copy_names) ctx_menu.addAction(self._action_copy_addresses) ctx_menu.addAction(self._action_copy_names_and_addresses) ctx_menu.addSeparator() ctx_menu.addAction(self._action_prefix) ctx_menu.addAction(self._action_clear_prefix) return ctx_menu def _process_table_ctx_menu_action(self, action): if not action: return row_indexes = self.selectionModel().selectedRows() rows = [index.row() for index in row_indexes] if len(rows) == 0: return if action == self._action_rename and len(rows) == 1: self._controller.rename_table_function(rows[0]) elif action in [self._action_copy_name, self._action_copy_names]: self._controller.copy_name(rows) elif action in [self._action_copy_address, self._action_copy_addresses]: self._controller.copy_address(rows) elif action in [self._action_copy_name_and_address, self._action_copy_names_and_addresses]: self._controller.copy_name_and_address(rows) elif action == self._action_prefix: self._controller.prefix_table_functions(rows) elif action == self._action_clear_prefix: self._controller.clear_function_prefixes(rows) def _populate_header_ctx_menu(self): ctx_menu = QtWidgets.QMenu() ctx_menu.addAction(self._action_alignment) return ctx_menu def _process_header_ctx_menu_action(self, action, column): if not action: return if action == self._action_alignment: self._controller.toggle_column_alignment(column) class CoverageTableController(object): def __init__(self, lctx, model): self.lctx = lctx self._model = model self._last_directory = None @mainthread def rename_table_function(self, row): function_address = self._model.row2func[row] original_name = disassembler[self.lctx].get_function_raw_name_at(function_address) ok, new_name = prompt_string( "Please enter function name", "Rename Function", original_name ) if not (ok or new_name != original_name): return disassembler[self.lctx].set_function_name_at(function_address, new_name) @mainthread def prefix_table_functions(self, rows): ok, prefix = prompt_string( "Please enter a function prefix", "Prefix Function(s)", "MyPrefix" ) if not (ok and prefix): return function_addresses = self._get_function_addresses(rows) disassembler[self.lctx].prefix_functions(function_addresses, prefix) @mainthread def clear_function_prefixes(self, rows): function_addresses = self._get_function_addresses(rows) disassembler[self.lctx].clear_prefixes(function_addresses) @mainthread def copy_name(self, rows): model = self._model function_names = "" for row_number in rows: name_index = model.index(row_number, model.FUNC_NAME) function_names += model.data(name_index) function_names += "\n" copy_to_clipboard(function_names.rstrip()) return function_names @mainthread def copy_address(self, rows): model = self._model address_string = "" for row_number in rows: addr_index = model.index(row_number, model.FUNC_ADDR) address_string += model.data(addr_index) address_string += "\n" copy_to_clipboard(address_string.rstrip()) return address_string @mainthread def copy_name_and_address(self, rows): model = self._model function_name_and_address = "" for row_number in rows: name_index = model.index(row_number, model.FUNC_NAME) addr_index = model.index(row_number, model.FUNC_ADDR) function_name_and_address += model.data(addr_index) function_name_and_address += " " function_name_and_address += model.data(name_index) function_name_and_address += "\n" copy_to_clipboard(function_name_and_address.rstrip()) return function_name_and_address def navigate_to_function(self, row): function_address = self._model.row2func[row] function_coverage = self.lctx.director.coverage.functions.get(function_address, None) if function_coverage: if function_address in function_coverage.nodes: target_address = function_address else: target_address = sorted(function_coverage.nodes)[0] else: target_address = function_address disassembler[self.lctx].navigate_to_function(function_address, target_address) def toggle_column_alignment(self, column): index = self._model.index(0, column) alignment = self._model.data(index, QtCore.Qt.TextAlignmentRole) if alignment == QtCore.Qt.AlignCenter: new_alignment = QtCore.Qt.AlignVCenter else: new_alignment = QtCore.Qt.AlignCenter self._model.set_column_alignment(column, new_alignment) def export_to_html(self): if not self._last_directory: self._last_directory = disassembler[self.lctx].get_database_directory() name, _ = os.path.splitext(self.lctx.director.coverage_name) filename = name + ".html" suggested_filepath = os.path.join(self._last_directory, filename) file_dialog = QtWidgets.QFileDialog() file_dialog.setFileMode(QtWidgets.QFileDialog.AnyFile) kwargs = { "filter": "HTML Files (*.html)", "caption": "Save HTML Report", "directory": suggested_filepath } filename, _ = file_dialog.getSaveFileName(**kwargs) if not filename: return self._last_directory = os.path.dirname(filename) + os.sep with open(filename, "w") as fd: fd.write(self._model.to_html()) lmsg("Saved HTML report to %s" % filename) def _get_function_addresses(self, rows): function_addresses = [] for row_number in rows: address = self._model.row2func[row_number] function_addresses.append(address) return function_addresses class CoverageTableModel(QtCore.QAbstractTableModel): COV_PERCENT = 0 FUNC_NAME = 1 FUNC_ADDR = 2 BLOCKS_HIT = 3 INST_HIT = 4 FUNC_SIZE = 5 COMPLEXITY = 6 METADATA_ATTRIBUTES = [FUNC_NAME, FUNC_ADDR, FUNC_SIZE, COMPLEXITY] COVERAGE_ATTRIBUTES = [COV_PERCENT, BLOCKS_HIT, INST_HIT] COLUMN_TO_FIELD = { COV_PERCENT: "instruction_percent", FUNC_NAME: "name", FUNC_ADDR: "address", BLOCKS_HIT: "nodes_executed", INST_HIT: "instructions_executed", FUNC_SIZE: "size", COMPLEXITY: "cyclomatic_complexity" } COLUMN_HEADERS = { COV_PERCENT: "Cov %", FUNC_NAME: "Func Name", FUNC_ADDR: "Address", BLOCKS_HIT: "Blocks Hit", INST_HIT: "Instr. Hit", FUNC_SIZE: "Func Size", COMPLEXITY: "CC", } COLUMN_TOOLTIPS = { COV_PERCENT: "Coverage Percent", FUNC_NAME: "Function Name", FUNC_ADDR: "Function Address", BLOCKS_HIT: "Number of Basic Blocks Executed", INST_HIT: "Number of Instructions Executed", FUNC_SIZE: "Function Size (bytes)", COMPLEXITY: "Cyclomatic Complexity", } SAMPLE_CONTENTS = [ " 100.00 ", " sub_140001B20 ", " 0x140001b20 ", " 100 / 100 ", " 1000 / 1000 ", " 100000 ", " 1000 ", ] def __init__(self, lctx, parent=None): super(CoverageTableModel, self).__init__(parent) self.lctx = lctx self._director = lctx.director self.row2func = {} self._row_count = 0 self._no_coverage = [] self._visible_metadata = {} self._visible_coverage = {} self._blank_coverage = FunctionCoverage(BADADDR) self._blank_coverage.coverage_color = lctx.palette.table_coverage_none self._default_alignment = QtCore.Qt.AlignCenter self._column_alignment = [ self._default_alignment for x in self.COLUMN_HEADERS ] self.set_column_alignment(self.FUNC_NAME, QtCore.Qt.AlignVCenter) self._entry_font = MonospaceFont() if not USING_PYSIDE6: self._entry_font.setStyleStrategy(QtGui.QFont.ForceIntegerMetrics) self._entry_font.setPointSizeF(normalize_to_dpi(10)) self._title_font = QtGui.QFont() self._title_font.setPointSizeF(normalize_to_dpi(10)) self._last_sort = self.FUNC_ADDR self._last_sort_order = QtCore.Qt.AscendingOrder self._hide_zero = False self._search_string = "" self._director.coverage_switched(self._internal_refresh) self._director.coverage_modified(self._internal_refresh) self._director.metadata.function_renamed(self._data_changed) def refresh_theme(self): self._blank_coverage.coverage_color = self.lctx.palette.table_coverage_none self._data_changed() def flags(self, index): return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable def rowCount(self, index=QtCore.QModelIndex()): return self._row_count
MIT License
helios-protocol/py-helios-node
hvm/chains/header.py
HeaderChain.import_header
python
def import_header(self, header: BlockHeader) -> Tuple[BlockHeader, ...]: new_canonical_headers = self.headerdb.persist_header(header) self.header = self.get_canonical_head() return new_canonical_headers
Direct passthrough to `headerdb` Also updates the local `header` property to be the latest canonical head. Returns an iterable of headers representing the headers that are newly part of the canonical chain. - If the imported header is not part of the canonical chain then an empty tuple will be returned. - If the imported header simply extends the canonical chain then a length-1 tuple with the imported header will be returned. - If the header is part of a non-canonical chain which overtakes the current canonical chain then the returned tuple will contain the headers which are newly part of the canonical chain.
https://github.com/helios-protocol/py-helios-node/blob/691b378938f0a36bf8774dc1ee4e4370b6cf7c63/hvm/chains/header.py#L154-L173
from abc import ABCMeta, abstractmethod from typing import Dict, Any, Tuple, Type from eth_typing import ( BlockNumber, Hash32, ) from hvm.db.backends.base import BaseDB from hvm.db.header import ( BaseHeaderDB, HeaderDB, ) from hvm.rlp.headers import BlockHeader from hvm.utils.datatypes import ( Configurable, ) from hvm.vm.base import BaseVM class BaseHeaderChain(Configurable, metaclass=ABCMeta): _base_db = None _headerdb_class = None _headerdb = None header = None network_id = None vm_configuration = None @abstractmethod def __init__(self, base_db: BaseDB, header: BlockHeader=None) -> None: raise NotImplementedError("Chain classes must implement this method") @classmethod @abstractmethod def from_genesis_header(cls, base_db: BaseDB, genesis_header: BlockHeader) -> 'BaseHeaderChain': raise NotImplementedError("Chain classes must implement this method") @classmethod @abstractmethod def get_headerdb_class(cls): raise NotImplementedError("Chain classes must implement this method") @abstractmethod def get_canonical_block_header_by_number(self, block_number: BlockNumber) -> BlockHeader: raise NotImplementedError("Chain classes must implement this method") @abstractmethod def get_canonical_head(self) -> BlockHeader: raise NotImplementedError("Chain classes must implement this method") @abstractmethod def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader: raise NotImplementedError("Chain classes must implement this method") @abstractmethod def header_exists(self, block_hash: Hash32) -> bool: raise NotImplementedError("Chain classes must implement this method") @abstractmethod def import_header(self, header: BlockHeader) -> Tuple[BlockHeader, ...]: raise NotImplementedError("Chain classes must implement this method") class HeaderChain(BaseHeaderChain): _headerdb_class = HeaderDB def __init__(self, base_db: BaseDB, header: BlockHeader=None) -> None: self.base_db = base_db self.headerdb = self.get_headerdb_class()(base_db) if header is None: self.header = self.get_canonical_head() else: self.header = header @classmethod def from_genesis_header(cls, base_db: BaseDB, genesis_header: BlockHeader) -> 'BaseHeaderChain': headerdb = cls.get_headerdb_class()(base_db) headerdb.persist_header(genesis_header) return cls(base_db, genesis_header) @classmethod def get_headerdb_class(cls): if cls._headerdb_class is None: raise AttributeError("HeaderChain classes must set a `headerdb_class`") return cls._headerdb_class def get_canonical_block_hash(self, block_number: BlockNumber) -> Hash32: return self.headerdb.get_canonical_block_hash(block_number) def get_canonical_block_header_by_number(self, block_number: BlockNumber) -> BlockHeader: return self.headerdb.get_canonical_block_header_by_number(block_number) def get_canonical_head(self) -> BlockHeader: return self.headerdb.get_canonical_head() def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader: return self.headerdb.get_block_header_by_hash(block_hash) def header_exists(self, block_hash: Hash32) -> bool: return self.headerdb.header_exists(block_hash)
MIT License
delfick/harpoon
harpoon/dockerpty/io.py
Stream.read
python
def read(self, n=4096): while True: try: if hasattr(self.fd, "recv"): return self.fd.recv(n) return os.read(self.fd.fileno(), n) except EnvironmentError as e: if e.errno not in Stream.ERRNO_RECOVERABLE: raise e
Return `n` bytes of data from the Stream, or None at end of stream.
https://github.com/delfick/harpoon/blob/7120cb25a165cc7c50187794d5ed7a90075ad70a/harpoon/dockerpty/io.py#L102-L114
import os import fcntl import errno import struct import select as builtin_select def set_blocking(fd, blocking=True): old_flag = fcntl.fcntl(fd, fcntl.F_GETFL) if blocking: new_flag = old_flag & ~os.O_NONBLOCK else: new_flag = old_flag | os.O_NONBLOCK fcntl.fcntl(fd, fcntl.F_SETFL, new_flag) return not bool(old_flag & os.O_NONBLOCK) def select(read_streams, write_streams, timeout=0): exception_streams = [] try: return builtin_select.select(read_streams, write_streams, exception_streams, timeout)[0:2] except builtin_select.error as e: no = e.errno if no == errno.EINTR: return ([], []) else: raise e class Stream(object): """ Recoverable IO/OS Errors. """ ERRNO_RECOVERABLE = [errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK] def __init__(self, fd): self.fd = fd self.buffer = b"" self.close_requested = False self.closed = False def fileno(self): return self.fd.fileno() def set_blocking(self, value): if hasattr(self.fd, "setblocking"): self.fd.setblocking(value) return True else: return set_blocking(self.fd, value)
MIT License
icfpc2016/icfpc2016-judge
hibiki/app/hibiki/game.py
normalize_solution
python
def normalize_solution(solution_spec): new_solution_spec = '' tokens = solution_spec.split() try: num_points = int(tokens.pop(0)) except Exception: raise VerificationError('Parse error in the number of source vertices.') if num_points <= 0: raise VerificationError('Number of source vertices must be positive.') new_solution_spec += '%d\n' % num_points for i in xrange(num_points): try: point = tokens.pop(0) except Exception: raise VerificationError('Parse error in coordinate of source vertex #%d.' % i) if not _POINT_RE.search(point): raise VerificationError('Parse error in coordinate of source vertex #%d.' % i) new_solution_spec += '%s\n' % point try: num_facets = int(tokens.pop(0)) except Exception: raise VerificationError('Parse error in the number of facets.') if num_facets <= 0: raise VerificationError('Number of facets must be positive.') new_solution_spec += '%d\n' % num_facets for i in xrange(num_facets): try: facet_size = int(tokens.pop(0)) except Exception: raise VerificationError('Parse error in the size of facet #%d.' % i) if facet_size < 3: raise VerificationError('The size of facet #%d must be no less than three.' % i) facet_def = [] for j in xrange(facet_size): try: point_index = int(tokens.pop(0)) except Exception: raise VerificationError('A vertex index in facet #%d is invalid.' % i) if not 0 <= point_index < num_points: raise VerificationError('A vertex index in facet #%d is out of range.' % i) facet_def.append(point_index) if len(set(facet_def)) != facet_size: raise VerificationError('Facet #%d has duplicated vertices.' % i) new_solution_spec += '%d %s\n' % ( facet_size, ' '.join('%d' % facet_index for facet_index in facet_def)) for i in xrange(num_points): try: point = tokens.pop(0) except Exception: raise VerificationError('Parse error in coordinate of destination vertex #%d.' % i) if not _POINT_RE.search(point): raise VerificationError('Parse error in coordinate of destination vertex #%d.' % i) new_solution_spec += '%s\n' % point if tokens: raise VerificationError('Redundant tokens found after the end of the specification.') solution_size = sum(len(s) for s in new_solution_spec.split()) if solution_size > _MAX_SOLUTION_SIZE: raise VerificationError('Solution size limit exceeded.') return (new_solution_spec, solution_size)
Normalizes a solution spec. Args: solution_spec: Specification string of a solution. Returns: (solution_spec, solution_size) solution_spec: Normalized specification string of a solution. solution_size: Solution size. Raises: VerificationError: When parsing failed.
https://github.com/icfpc2016/icfpc2016-judge/blob/ece7a536fa480f141d0d305b090b1bfe195542f8/hibiki/app/hibiki/game.py#L56-L139
import re import tempfile import gflags import subprocess32 as subprocess FLAGS = gflags.FLAGS _MAX_SOLUTION_SIZE = 5000 _JUDGE_TIMEOUT_SECONDS = 30 _NUMBER_RE = re.compile( r'^' r'(0|-?[1-9][0-9]*|((0|-?[1-9][0-9]*)/[1-9][0-9]*))' r'$' ) _POINT_RE = re.compile( r'^' r'(0|-?[1-9][0-9]*|((0|-?[1-9][0-9]*)/[1-9][0-9]*))' r',' r'(0|-?[1-9][0-9]*|((0|-?[1-9][0-9]*)/[1-9][0-9]*))' r'$' ) _VERIFICATION_ERROR_RE = re.compile(r'^ValidateSolutionError:\s*(.*)$', re.MULTILINE) class VerificationError(Exception): def __init__(self, message): super(VerificationError, self).__init__(message) self.message = message def make_temporary_file_with_content(content): f = tempfile.NamedTemporaryFile() f.write(content) f.flush() f.seek(0) return f
Apache License 2.0
numba/numba
numba/core/caching.py
_Cache.disable
python
def disable(self):
Disable the cache.
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/core/caching.py#L78-L81
from abc import ABCMeta, abstractmethod, abstractproperty import contextlib import errno import hashlib import inspect import itertools import os import pickle import sys import tempfile import warnings from numba.misc.appdirs import AppDirs import numba from numba.core.errors import NumbaWarning from numba.core.base import BaseContext from numba.core.codegen import CodeLibrary from numba.core.compiler import CompileResult from numba.core import config, compiler from numba.core.serialize import dumps def _get_codegen(obj): if isinstance(obj, BaseContext): return obj.codegen() elif isinstance(obj, CodeLibrary): return obj.codegen elif isinstance(obj, CompileResult): return obj.target_context.codegen() else: raise TypeError(type(obj)) def _cache_log(msg, *args): if config.DEBUG_CACHE: msg = msg % args print(msg) class _Cache(metaclass=ABCMeta): @abstractproperty def cache_path(self): @abstractmethod def load_overload(self, sig, target_context): @abstractmethod def save_overload(self, sig, data): @abstractmethod def enable(self): @abstractmethod
BSD 2-Clause Simplified License
awslabs/dgl-lifesci
python/dgllife/model/model_zoo/dgmg.py
MoleculeEnv.reset
python
def reset(self, rdkit_mol=False): self.dgl_graph = dgl.graph(([], []), idtype=torch.int32) self.dgl_graph.set_n_initializer(dgl.frame.zero_initializer) self.dgl_graph.set_e_initializer(dgl.frame.zero_initializer) self.mol = None if rdkit_mol: self.mol = Chem.RWMol(Chem.MolFromSmiles(''))
Setup for generating a new molecule Parameters ---------- rdkit_mol : bool Whether to keep a Chem.rdchem.Mol object so that we know what molecule is being generated
https://github.com/awslabs/dgl-lifesci/blob/ef58e803d2e7d8e0772292abfd59d1a6fa03c007/python/dgllife/model/model_zoo/dgmg.py#L91-L109
import dgl import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from functools import partial from rdkit import Chem from torch.distributions import Categorical __all__ = ['DGMG'] class MoleculeEnv(object): def __init__(self, atom_types, bond_types): super(MoleculeEnv, self).__init__() self.atom_types = atom_types self.bond_types = bond_types self.atom_type_to_id = dict() self.bond_type_to_id = dict() for id, a_type in enumerate(atom_types): self.atom_type_to_id[a_type] = id for id, b_type in enumerate(bond_types): self.bond_type_to_id[b_type] = id def get_decision_sequence(self, mol, atom_order): decisions = [] old2new = dict() for new_id, old_id in enumerate(atom_order): atom = mol.GetAtomWithIdx(old_id) a_type = atom.GetSymbol() decisions.append((0, self.atom_type_to_id[a_type])) for bond in atom.GetBonds(): u = bond.GetBeginAtomIdx() v = bond.GetEndAtomIdx() if v == old_id: u, v = v, u if v in old2new: decisions.append((1, self.bond_type_to_id[bond.GetBondType()])) decisions.append((2, old2new[v])) decisions.append((1, len(self.bond_types))) old2new[old_id] = new_id decisions.append((0, len(self.atom_types))) return decisions
Apache License 2.0
soon/codeforcesapi
codeforces/api/json_objects/ranklist_row.py
RanklistRow.party
python
def party(self, value): assert isinstance(value, (Party, str, dict)) if not isinstance(value, Party): value = Party(value) self._party = value
Lazy property. :param value: Party that took a corresponding place in the contest. :type value: Party or str or dict
https://github.com/soon/codeforcesapi/blob/23275464a41c6886461af94929b35ef1808a33bd/codeforces/api/json_objects/ranklist_row.py#L63-L75
from . import BaseJsonObject, Party, ProblemResult from codeforces.utils import lazy_property __all__ = ['RanklistRow'] class RanklistRow(BaseJsonObject): def __init__(self, data=None): self._party = None self._rank = None self._points = None self._penalty = None self._successful_hack_count = None self._unsuccessful_hack_count = None self._problem_results = None self._last_submission_time = None super().__init__(data) def __repr__(self): return '<RanklistRow: {}>'.format(self.party) def load_required_fields_from_dict(self, values): super().load_required_fields_from_dict(values) self.party = values['party'] self.rank = values['rank'] self.points = values['points'] self.penalty = values['penalty'] self.successful_hack_count = values['successfulHackCount'] self.unsuccessful_hack_count = values['unsuccessfulHackCount'] self.problem_results = values['problemResults'] def load_optional_fields_from_dict(self, values): super().load_optional_fields_from_dict(values) self.last_submission_time = values.get('lastSubmissionTimeSeconds') @lazy_property def party(self): return self._party @party.setter
MIT License
cuthbertlab/music21-tools
trecento/medren.py
MensuralNote.fontString
python
def fontString(self): if self.mensuralType == 'maxima': self._fontString = '0x58' elif self.mensuralType == 'Longa': self._fontString = '0x4c' elif self.mensuralType == 'brevis': self._fontString = '0x42' elif self.mensuralType == 'semibrevis': if 'down' in self.stems: self._fontString = '0x4e' elif 'side' in self.stems: self._fontString = '0x41' else: self._fontString = '0x53' elif self.mensuralType == 'minima': if 'down' in self.stems: if 'down' in self.flags and self.flags['down'] == 'left': self._fontString = '0x46' elif 'down' in self.flags and self.flags['down'] == 'right': self._fontString = '0x47' else: self._fontString = '0x44' elif 'side' in self.stems: self._fontString = '0x61' else: self._fontString = '0x4d' else: if self.flags['up'] == 'left': self._fontString = '0x49' else: if 'down' in self.stems: if 'down' in self.flags and self.flags['down'] == 'left': self._fontString = '0x48' else: self._fontString = '0x45' else: self._fontString = '0x59' if self.style.color == 'red': if self._fontString in ['41', '61']: self._fontString = '' else: self._fontString = hex(int(self._fontString, 16) + 32) return self._fontString
The utf-8 code corresponding to a mensural note in Ciconia font. Note that semiminima with a left flag on the upper stem and any flag on the lower stem, semiminima with a right flag on the upperstem and on the lowerstem, and any red or unfilled notes with sidestems have no corresponding characters in the Cicionia font. TODO: Replace with SMuFL >>> mn = MensuralNote('A', 'M') >>> mn.setStem('down') >>> mn.fontString '0x44' >>> mn.setFlag('down', 'right') >>> mn.fontString '0x47' >>> mn.setFlag('down', None) >>> mn.setStem(None) >>> mn.fontString '0x4d' >>> mn.style.color = 'red' >>> mn.fontString '0x6d'
https://github.com/cuthbertlab/music21-tools/blob/78cf5404c1bf5e4ab8b4d5b7b6c76e253d48c8ee/trecento/medren.py#L733-L801
import copy import unittest from music21 import bar from music21 import base from music21 import clef from music21 import common from music21 import duration from music21 import exceptions21 from music21 import interval from music21 import meter from music21 import note from music21 import pitch from music21 import stream from music21 import tempo from music21 import environment from . import notation environLocal = environment.Environment('medren') allowableStrettoIntervals = { -8: [(3, True), (-3, True), (5, False), (-4, False), (1, True)], 8: [(3, True), (-3, True), (5, False), (4, False), (1, True)], -5: [(-3, True), (-5, False), (2, True), (4, False), (1, True)], 5: [(3, True), (5, False), (-2, True), (-4, False), (1, True)], -4: [(3, True), (5, False), (2, False), (-2, True), (-4, False)], 4: [(-3, True), (-5, False), (2, True), (-2, False), (4, False)], } _validMensuralTypes = [None,'maxima', 'longa', 'brevis', 'semibrevis', 'minima', 'semiminima'] _validMensuralAbbr = [None, 'Mx', 'L', 'B', 'SB', 'M', 'SM'] class MensuralClef(clef.Clef): def __init__(self, sign='C'): super().__init__() self._line = None self._fontString = None if sign == 'C': self.sign = sign self._line = 4 elif sign == 'F': self.sign = sign self._line = 3 else: raise MedRenException('A %s-clef is not a recognized mensural clef' % sign) def _getLine(self): return self._line def _setLine(self, line): self._line = line line = property(_getLine, _setLine, doc = '''The staff line the clef resides on''') @property def fontString(self): if self.sign == 'C': self._fontString = '0x4b' else: self._fontString = '0x5c' return self._fontString class Mensuration(meter.TimeSignature): def __init__(self, tempus='perfect', prolation='minor', mode='perfect', maximode=None, scalingFactor=4): self.tempus = tempus self.prolation = prolation self.mode = mode self.maximode = maximode self._fontString = '' self.timeString = None self._minimaPerBrevis = 0 if tempus == 'perfect' and prolation == 'major': self.timeString = '9/8' self.standardSymbol = 'O-dot' self._fontString = '0x50' self._minimaPerBrevis = 9 elif tempus == 'perfect' and prolation == 'minor': self.timeString = '6/8' self.standardSymbol = 'C-dot' self._fontString = '0x63' self._minimaPerBrevis = 6 elif tempus == 'imperfect' and prolation == 'major': self.timeString = '3/4' self.standardSymbol = 'O' self._fontString = '0x4f' self._minimaPerBrevis = 6 elif tempus == 'imperfect' and prolation == 'minor': self.timeString = '2/4' self.standardSymbol = 'C' self._fontString = '0x43' self._minimaPerBrevis = 4 else: raise MedRenException( 'cannot make out the mensuration from tempus %s and prolation %s' % (tempus, prolation)) meter.TimeSignature.__init__(self, self.timeString) def __str__(self): return '<medren.Mensuration %s>' % self.standardSymbol def __repr__(self): return str(self) def _getMinimaPerMeasure(self): return self._minimaPerBrevis def _setMinimaPerMeasure(self, mPM): self._minimaPerBrevis = mPM minimaPerBrevis = property(_getMinimaPerMeasure, _setMinimaPerMeasure, doc = '''Used to get or set the number of minima in a 'measure' under the given mensuration. >>> c = Mensuration('imperfect', 'minor') >>> c.minimaPerBrevis 4 >>> c.minimaPerBrevis = 8 >>> c.minimaPerBrevis 8 ''') @property def fontString(self): return self._fontString class GeneralMensuralNote(base.Music21Object): def __init__(self, mensuralTypeOrAbbr='brevis'): base.Music21Object.__init__(self) self._gettingDuration = False self._duration = None if mensuralTypeOrAbbr in _validMensuralTypes: self._mensuralType = mensuralTypeOrAbbr elif mensuralTypeOrAbbr in _validMensuralAbbr: self.mensuralType = _validMensuralTypes[_validMensuralAbbr.index(mensuralTypeOrAbbr)] else: raise MedRenException('%s is not a valid mensural type or abbreviation' % mensuralTypeOrAbbr) self.lenList = [] def __repr__(self): return '<medren.GeneralMensuralNote %s>' % self.mensuralType def __eq__(self, other): eq = hasattr(other, 'mensuralType') if eq: eq = eq and (self.mensuralType == other.mensuralType) if eq and hasattr(self, 'activeSite'): eq = eq and hasattr(other, 'activeSite') if eq: eq = eq and (self.activeSite == other.activeSite) if eq and hasattr(self, 'offset'): eq = eq and hasattr(other, 'offset') if eq: eq = eq and (self.offset == other.offset) return eq def _getMensuralType(self): return self._mensuralType def _setMensuralType(self, mensuralTypeOrAbbr): if mensuralTypeOrAbbr in _validMensuralTypes: self._mensuralType = mensuralTypeOrAbbr elif mensuralTypeOrAbbr in _validMensuralAbbr: self.mensuralType = _validMensuralTypes[_validMensuralAbbr.index(mensuralTypeOrAbbr)] else: raise MedRenException('%s is not a valid mensural type or abbreviation' % mensuralTypeOrAbbr) mensuralType = property(_getMensuralType, _setMensuralType, doc = '''Name of the mensural length of the general mensural note (brevis, longa, etc.): >>> gmn = GeneralMensuralNote('maxima') >>> gmn.mensuralType 'maxima' >>> gmn_1 = GeneralMensuralNote('SB') >>> gmn_1.mensuralType 'semibrevis' >>> gmn_2 = GeneralMensuralNote('blah') Traceback (most recent call last): MedRenException: blah is not a valid mensural type or abbreviation ''') def updateDurationFromMensuration(self, mensuration=None, surroundingStream=None): mLen, mDur = 0, 0 if self._gettingDuration is True: return duration.Duration(0.0) if mensuration is None: mOrD = self._determineMensurationOrDivisione() else: mOrD = mensuration index = self._getTranslator(mensurationOrDivisione=mOrD, surroundingStream=surroundingStream) if self.lenList: if mOrD.standardSymbol in ['.q.', '.p.', '.i.', '.n.']: mDur = 0.5 else: mDur = 0.25 mLen = self.lenList[index] self.duration = duration.Duration(mLen * mDur) else: self.duration = duration.Duration(0.0) def _getTranslator(self, mensurationOrDivisione=None, surroundingStream=None): mOrD = mensurationOrDivisione if mOrD is None: mOrD = self._determineMensurationOrDivisione() measure, index = self._getSurroundingMeasure(mensurationOrDivisione=mOrD, activeSite=surroundingStream) self._gettingDuration = True if measure and 'Divisione' in mOrD.classes: if index == 0: self.lenList = notation.BrevisLengthTranslator( mOrD, measure).getKnownLengths() elif index != -1: tempMN = measure[0] self.lenList = tempMN.lenList self._gettingDuration = False return index def _determineMensurationOrDivisione(self): searchClasses = (Mensuration, notation.Divisione) mOrD = self.getContextByClass(searchClasses) if mOrD is not None: return mOrD else: return None def _getSurroundingMeasure(self, mensurationOrDivisione=None, activeSite=None): mOrD = mensurationOrDivisione if mOrD is None: mOrD = self._determineMensurationOrDivisione() if activeSite is None: site = self.activeSite else: site = activeSite if site is None: return [], -1 if self.mensuralType in ['brevis', 'longa', 'maxima']: return [self], 0 tempList = list(site.recurse())[1:] if site.isMeasure: return tempList, -1 mList = [] currentIndex, index = -1, -1 indOffset = 0 for ind, item in enumerate(tempList): if self is item: currentIndex = ind for i in range(currentIndex - 1, -1, -1): if (('Punctus' in tempList[i].classes) or ('Ligature' in tempList[i].classes)): indOffset = i + 1 break elif 'GeneralMensuralNote' in tempList[i].classes: if (('Divisione' in mOrD.classes) and (tempList[i].mensuralType in ['brevis', 'longa', 'maxima'])): indOffset = i + 1 break else: mList.insert(i, tempList[i]) else: indOffset += 1 mList.reverse() mList.insert(currentIndex, self) for j in range(currentIndex + 1, len(tempList), 1): if (('Punctus' in tempList[j].classes) or ('Ligature' in tempList[j].classes)): break if 'GeneralMensuralNote' in tempList[j].classes: if (('Divisione' in mOrD.classes) and (tempList[j].mensuralType in ['brevis', 'longa', 'maxima'])): break else: mList.insert(j, tempList[j]) index = currentIndex - indOffset return mList, index class MensuralRest(GeneralMensuralNote, note.Rest): def __init__(self, *arguments, **keywords): note.Rest.__init__(self, *arguments, **keywords) GeneralMensuralNote.__init__(self) self._gettingDuration = False self._mensuralType = 'brevis' if arguments: tOrA = arguments[0] if tOrA in _validMensuralTypes: self._mensuralType = tOrA elif tOrA in _validMensuralAbbr: self._mensuralType = _validMensuralTypes[_validMensuralAbbr.index(tOrA)] else: raise MedRenException('%s is not a valid mensural type or abbreviation' % tOrA) self._duration = None self._fontString = '' if self.mensuralType == 'Longa': self._fontString = '0x30' elif self.mensuralType == 'brevis': self._fontString = '0x31' elif self.mensuralType == 'semibrevis': self._fontString = '0x32' elif self.mensuralType == 'minima': self._fontString = '0x33' self.lenList = [] def __repr__(self): return '<medren.MensuralRest %s>' % self.mensuralType @property def fullName(self): msg = [] msg.append(self.mensuralType) msg.append(' rest') return ''.join(msg) @property def fontString(self): return self._fontString class MensuralNote(GeneralMensuralNote, note.Note): def __init__(self, *arguments, **keywords): if arguments: note.Note.__init__(self, arguments[0], **keywords) else: note.Note.__init__(self, **keywords) GeneralMensuralNote.__init__(self) self._gettingDuration = False self._mensuralType = 'brevis' if len(arguments) > 1: tOrA = arguments[1] if tOrA in _validMensuralTypes: self._mensuralType = tOrA elif tOrA in _validMensuralAbbr: self._mensuralType = _validMensuralTypes[_validMensuralAbbr.index(tOrA)] else: raise MedRenException('%s is not a valid mensural type or abbreviation' % tOrA) if self.mensuralType in ['minima', 'semiminima']: self.stems = ['up'] else: self.stems = [] self.flags = dict((s, None) for s in self.stems) if self._mensuralType == 'semiminima': self.flags['up'] = 'right' self._duration = None self._fontString = '' self.lenList = [] def __repr__(self): return '<medren.MensuralNote %s %s>' % (self.mensuralType, self.name) def __eq__(self, other): eq = GeneralMensuralNote.__eq__(self, other) eq = eq and hasattr(other, 'pitch') if eq: eq = eq and (self.pitch == other.pitch) eq = eq and hasattr(other, 'articulations') if eq: eq = eq and (sorted(list(set(self.articulations))) == sorted(list(set(other.articulations))) ) return eq @property def fullName(self): msg = [] msg.append(self.mensuralType) msg.append(' %s ' % self.pitch.fullName) return ''.join(msg) @property
BSD 3-Clause New or Revised License
joaoventura/pylibui
pylibui/controls/checkbox.py
Checkbox.getChecked
python
def getChecked(self): return bool(libui.uiCheckboxChecked(self.control))
Sets whether the checkbox is checked or not. :return: bool
https://github.com/joaoventura/pylibui/blob/2e74db787bfea533f3ae465670963daedcaec344/pylibui/controls/checkbox.py#L53-L59
from pylibui import libui from .control import Control class Checkbox(Control): def __init__(self, text): super().__init__() self.control = libui.uiNewCheckbox(text) def handler(window, data): self.onToggled(data) return 0 self.toggledHandler = libui.uiCheckboxOnToggled(self.control, handler, None) def setText(self, text): libui.uiCheckboxSetText(self.control, text) def getText(self): return libui.uiCheckboxText(self.control) def setChecked(self, checked): libui.uiCheckboxSetChecked(self.control, int(checked))
MIT License
gardener/cc-utils
transport/processing/processing.py
ProcessComponent.all_tgt_resources_processed
python
def all_tgt_resources_processed(self): return all( [ (len(r.resources) == r.expected_count) for r in [ self.tgt_external_resources, self.tgt_local_resources ] ] )
check if the number of processed resources has reached the expected count
https://github.com/gardener/cc-utils/blob/70b9c15b002218b5d06633f70f0c4f1489c74dbc/transport/processing/processing.py#L272-L281
import concurrent.futures import enum import itertools import logging import os import gci.componentmodel as cm import ci.util import processing.model import container.util import product.v2 import processing.config as config import processing.filters as p_filters import processing.processing_component as pc import processing.processing_model as processing_model import processing.processors as p_processors import processing.uploaders as p_uploaders import processing.downloaders as p_downloaders LOGGER = logging.getLogger(__name__) class Action(enum.Enum): ARCHIVE = 'archive' CREATE = 'create' DOWNLOAD = 'download' EXTRACT = 'extract' UPLOAD = 'upload' SYNC = 'sync' class ProcessingPipeline: def __init__( self, name, filters, downloader, processor, uploaders, ): self._name = name self._filters = filters self._downloader = downloader self._processor = processor self._uploaders = uploaders def matches(self, component, container_image): filters_count = len(self._filters) return all( map( lambda filtr, component, container_image: filtr.matches(component, container_image), self._filters, itertools.repeat(component, filters_count), itertools.repeat(container_image, filters_count), ) ) def process(self, component, container_image): if not self.matches(component, container_image): return None LOGGER.info( f'{self._name} will process image: ' f'{component.name}:{container_image.access.imageReference}' ) image_tar_path = os.path.join( config.RESOURCES_DIR, ci.util.file_extension_join( container_image.access.imageReference, pc.FileExtension.TAR.value, ) ) job = processing_model.ProcessingJob( component=component, container_image=container_image, download_request=None, upload_request=processing.model.ContainerImageUploadRequest( source_ref=container_image.access.imageReference, source_file=image_tar_path, target_ref=None, processing_callback=None, ), upload_context_url=None, ) job = self._downloader.process( processing_job=job, target_file=image_tar_path ) job = self._processor.process(processing_job=job) first = True for uploader in self._uploaders: job = uploader.process(job, target_as_source=not first) first = False return job def _filter(filter_cfg: dict): filter_ctor = getattr(p_filters, filter_cfg['type']) filter_ = filter_ctor(**filter_cfg.get('kwargs', {})) return filter_ def _processor(processor_cfg: dict): proc_type = processor_cfg['type'] proc_ctor = getattr(p_processors, proc_type, None) if not proc_ctor: ci.util.fail(f'no such image processor: {proc_type}') processor = proc_ctor(**processor_cfg.get('kwargs', {})) return processor def _uploader(uploader_cfg: dict): upload_type = uploader_cfg['type'] upload_ctor = getattr(p_uploaders, upload_type, None) if not upload_ctor: ci.util.fail(f'no such uploader: {upload_type}') uploader = upload_ctor(**uploader_cfg.get('kwargs', {})) return uploader def processing_pipeline( processing_cfg: dict, shared_processors: dict, shared_uploaders: dict, ): name = processing_cfg.get('name', '<no name>') filter_cfgs = processing_cfg['filter'] if isinstance(filter_cfgs, dict): filter_cfgs = [filter_cfgs] filters = [_filter(filter_cfg=filter_cfg) for filter_cfg in filter_cfgs] downloader = p_downloaders.Downloader() if 'processor' in processing_cfg: processor_cfg = processing_cfg['processor'] if isinstance(processor_cfg, str): proc = shared_processors[processor_cfg] else: proc = _processor(processor_cfg=processor_cfg) else: proc = p_processors.NoOpProcessor() upload_cfgs = processing_cfg['upload'] if not isinstance(upload_cfgs, list): upload_cfgs = [upload_cfgs] def instantiate_uploader(upload_cfg): if isinstance(upload_cfg, str): return shared_uploaders[upload_cfg] return _uploader(upload_cfg) uploaders = [instantiate_uploader(upload_cfg) for upload_cfg in upload_cfgs] pipeline = ProcessingPipeline( name=name, filters=filters, downloader=downloader, processor=proc, uploaders=uploaders, ) return pipeline def enum_processing_cfgs( processing_cfg: dict, shared_processors: dict, shared_uploaders: dict, ): cfg_entries = processing_cfg['processing_cfg'] yield from map( processing_pipeline, cfg_entries, itertools.repeat(shared_processors, len(cfg_entries)), itertools.repeat(shared_uploaders, len(cfg_entries)), ) def create_jobs(processing_cfg, component_descriptor): shared_processors = { name: _processor(cfg) for name, cfg in processing_cfg.get('processors', {}).items() } shared_uploaders = { name: _uploader(cfg) for name, cfg in processing_cfg.get('uploaders', {}).items() } for component, container_image in _enumerate_oci_resources(component_descriptor): for processor in enum_processing_cfgs( processing_cfg, shared_processors, shared_uploaders, ): job = processor.process(component=component, container_image=container_image) if not job: continue ci.util.info( f'found matching processor: {component.name}: ' f'{container_image.access.imageReference}' ) yield job break else: ci.util.warning( f'no matching processor: {component.name}: ' f'{container_image.access.imageReference}' ) def _enumerate_oci_resources(descriptor): resources = descriptor.resources for resource in resources: if resource.access.type == cm.AccessType.OCI_REGISTRY and resource.type == cm.ResourceType.OCI_IMAGE: yield (descriptor.component, resource) class ProcessComponent: def __init__(self, processing_cfg, component_obj): self.src_component_obj = component_obj self.src_descriptor = component_obj.descriptor self.src_external_resources = self.src_descriptor.component.resources self.src_local_resources = self.src_descriptor.component.resources self.tgt_external_resources = ProcessComponent.new_processing_resources( src_resources=self.src_external_resources ) self.tgt_local_resources = ProcessComponent.new_processing_resources( src_resources=self.src_local_resources ) executor = concurrent.futures.ThreadPoolExecutor(max_workers=8) jobs = create_jobs( processing_cfg=processing_cfg, component_descriptor=self.src_descriptor, ) for _ in executor.map(self.process_job, jobs): pass @staticmethod def new_processing_resources( src_resources: cm.Resource ) -> processing_model.ProcessingResources: return processing_model.ProcessingResources( resources=[r for r in src_resources if r.access.type != cm.AccessType.OCI_REGISTRY], expected_count=len(src_resources) )
Apache License 2.0
flhonker/zaq-code
quantization/torchTransformer.py
TorchTransformer.register
python
def register(self, origin_class, target_class): print("register", origin_class, target_class) self._register_dict[origin_class] = target_class pass
! This function register which class should transform to target class.
https://github.com/flhonker/zaq-code/blob/e7e9f55791e36c6784d58c356d3ced76a7583369/quantization/torchTransformer.py#L31-L39
import time import copy import types import inspect from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F import pydot from graphviz import Digraph from .utils import _ReplaceFunc, Log, UnitLayer, dict_merge class TorchTransformer(nn.Module): def __init__(self): super(TorchTransformer, self).__init__() self._register_dict = OrderedDict() self.log = Log() self._raw_TrochFuncs = OrderedDict() self._raw_TrochFunctionals = OrderedDict() self._functional_timestamp = 0
MIT License
hdrhistogram/hdrhistogram_py
hdrh/histogram.py
HdrHistogram.encode
python
def encode(self): return self.encoder.encode()
Encode this histogram into a histoblob. Return: the histoblob describing this histogram (a string containing the base64 encoded compressed histogram, V2 format)
https://github.com/hdrhistogram/hdrhistogram_py/blob/2a8b8c9f2f42ee16e8ebd2ee13b159e59f42cb30/hdrh/histogram.py#L419-L427
from __future__ import division, print_function from builtins import range import math import sys from hdrh.iterators import AllValuesIterator from hdrh.iterators import RecordedIterator from hdrh.iterators import PercentileIterator from hdrh.iterators import LinearIterator from hdrh.iterators import LogIterator from hdrh.codec import HdrHistogramEncoder def get_bucket_count(value, subb_count, unit_mag): smallest_untrackable_value = subb_count << unit_mag buckets_needed = 1 while smallest_untrackable_value <= value: if smallest_untrackable_value > sys.maxsize // 2: return buckets_needed + 1 smallest_untrackable_value <<= 1 buckets_needed += 1 return buckets_needed class HdrHistogram(): def __init__(self, lowest_trackable_value, highest_trackable_value, significant_figures, word_size=8, b64_wrap=True, hdr_payload=None): if significant_figures < 1 or significant_figures > 5: raise ValueError('Invalid significant_figures') self.lowest_trackable_value = lowest_trackable_value self.highest_trackable_value = highest_trackable_value self.significant_figures = significant_figures self.unit_magnitude = int(math.floor(math.log(lowest_trackable_value) / math.log(2))) largest_value_single_unit_res = 2 * math.pow(10, significant_figures) subb_count_mag = int(math.ceil(math.log(largest_value_single_unit_res) / math.log(2))) self.sub_bucket_half_count_magnitude = subb_count_mag - 1 if subb_count_mag > 1 else 0 self.sub_bucket_count = int(math.pow(2, self.sub_bucket_half_count_magnitude + 1)) self.sub_bucket_half_count = self.sub_bucket_count // 2 self.sub_bucket_mask = (self.sub_bucket_count - 1) << self.unit_magnitude self.bucket_count = get_bucket_count(highest_trackable_value, self.sub_bucket_count, self.unit_magnitude) self.min_value = sys.maxsize self.max_value = 0 self.total_count = 0 self.counts_len = (self.bucket_count + 1) * (self.sub_bucket_count // 2) self.word_size = word_size if hdr_payload: payload = hdr_payload.payload self.int_to_double_conversion_ratio = payload.conversion_ratio_bits results = hdr_payload.init_counts(self.counts_len) if results['total']: self.set_internal_tacking_values(results['min_nonzero_index'], results['max_nonzero_index'], results['total']) else: self.int_to_double_conversion_ratio = 1.0 self.b64_wrap = b64_wrap self.encoder = HdrHistogramEncoder(self, b64_wrap, hdr_payload) self.counts = self.encoder.get_counts() self.start_time_stamp_msec = 0 self.end_time_stamp_msec = 0 self.tag = None def _clz(self, value): return 63 - (len(bin(value)) - 3) def _get_bucket_index(self, value): pow2ceiling = 64 - self._clz(int(value) | self.sub_bucket_mask) return int(pow2ceiling - self.unit_magnitude - (self.sub_bucket_half_count_magnitude + 1)) def _get_sub_bucket_index(self, value, bucket_index): return int(value) >> (bucket_index + self.unit_magnitude) def _counts_index(self, bucket_index, sub_bucket_index): bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count return bucket_base_index + offset_in_bucket def _counts_index_for(self, value): bucket_index = self._get_bucket_index(value) sub_bucket_index = self._get_sub_bucket_index(value, bucket_index) return self._counts_index(bucket_index, sub_bucket_index) def record_value(self, value, count=1): if value < 0: return False counts_index = self._counts_index_for(value) if (counts_index < 0) or (self.counts_len <= counts_index): return False self.counts[counts_index] += count self.total_count += count self.min_value = min(self.min_value, value) self.max_value = max(self.max_value, value) return True def record_corrected_value(self, value, expected_interval, count=1): while True: if not self.record_value(value, count): return False if value <= expected_interval or expected_interval <= 0: return True value -= expected_interval def get_count_at_index(self, index): if index >= self.counts_len: raise IndexError() if index >= self.encoder.payload.counts_len: return 0 return self.counts[index] def get_count_at_sub_bucket(self, bucket_index, sub_bucket_index): bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count counts_index = bucket_base_index + offset_in_bucket return self.counts[counts_index] def get_value_from_sub_bucket(self, bucket_index, sub_bucket_index): return sub_bucket_index << (bucket_index + self.unit_magnitude) def get_value_from_index(self, index): bucket_index = (index >> self.sub_bucket_half_count_magnitude) - 1 sub_bucket_index = (index & (self.sub_bucket_half_count - 1)) + self.sub_bucket_half_count if bucket_index < 0: sub_bucket_index -= self.sub_bucket_half_count bucket_index = 0 return self.get_value_from_sub_bucket(bucket_index, sub_bucket_index) def get_lowest_equivalent_value(self, value): bucket_index = self._get_bucket_index(value) sub_bucket_index = self._get_sub_bucket_index(value, bucket_index) lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index, sub_bucket_index) return lowest_equivalent_value def get_highest_equivalent_value(self, value): bucket_index = self._get_bucket_index(value) sub_bucket_index = self._get_sub_bucket_index(value, bucket_index) lowest_equivalent_value = self.get_value_from_sub_bucket(bucket_index, sub_bucket_index) if sub_bucket_index >= self.sub_bucket_count: bucket_index += 1 size_of_equivalent_value_range = 1 << (self.unit_magnitude + bucket_index) next_non_equivalent_value = lowest_equivalent_value + size_of_equivalent_value_range return next_non_equivalent_value - 1 def get_target_count_at_percentile(self, percentile): requested_percentile = min(percentile, 100.0) count_at_percentile = int(((requested_percentile * self.total_count / 100)) + 0.5) return max(count_at_percentile, 1) def get_value_at_percentile(self, percentile): count_at_percentile = self.get_target_count_at_percentile(percentile) total = 0 for index in range(self.counts_len): total += self.get_count_at_index(index) if total >= count_at_percentile: value_at_index = self.get_value_from_index(index) if percentile: return self.get_highest_equivalent_value(value_at_index) return self.get_lowest_equivalent_value(value_at_index) return 0 def get_percentile_to_value_dict(self, percentile_list): result = {} total = 0 percentile_list_index = 0 count_at_percentile = 0 percentile_list = list(set(percentile_list)) percentile_list.sort() for index in range(self.counts_len): total += self.get_count_at_index(index) while True: if not count_at_percentile: if percentile_list_index == len(percentile_list): return result percentile = percentile_list[percentile_list_index] percentile_list_index += 1 if percentile > 100: return result count_at_percentile = self.get_target_count_at_percentile(percentile) if total >= count_at_percentile: value_at_index = self.get_value_from_index(index) if percentile: result[percentile] = self.get_highest_equivalent_value(value_at_index) else: result[percentile] = self.get_lowest_equivalent_value(value_at_index) count_at_percentile = 0 else: break return result def get_total_count(self): return self.total_count def get_count_at_value(self, value): counts_index = self._counts_index_for(value) return self.counts[counts_index] def values_are_equivalent(self, val1, val2): return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2) def get_max_value(self): if self.max_value == 0: return 0 return self.get_highest_equivalent_value(self.max_value) def get_min_value(self): if self.counts[0] > 0 or self.total_count == 0: return 0 if sys.maxsize == self.min_value: return sys.maxsize return self.get_lowest_equivalent_value(self.min_value) def _hdr_size_of_equiv_value_range(self, value): bucket_index = self._get_bucket_index(value) sub_bucket_index = self._get_sub_bucket_index(value, bucket_index) if sub_bucket_index >= self.sub_bucket_count: bucket_index += 1 return 1 << (self.unit_magnitude + bucket_index) def _hdr_median_equiv_value(self, value): return self.get_lowest_equivalent_value(value) + (self._hdr_size_of_equiv_value_range(value) >> 1) def get_mean_value(self): if not self.total_count: return 0.0 total = 0 itr = self.get_recorded_iterator() for item in itr: total += itr.count_at_this_value * self._hdr_median_equiv_value(item.value_iterated_to) return float(total) / self.total_count def get_stddev(self): if not self.total_count: return 0.0 mean = self.get_mean_value() geometric_dev_total = 0.0 for item in self.get_recorded_iterator(): dev = (self._hdr_median_equiv_value(item.value_iterated_to) * 1.0) - mean geometric_dev_total += (dev * dev) * item.count_added_in_this_iter_step return math.sqrt(geometric_dev_total / self.total_count) def reset(self): for index in range(self.counts_len): self.counts[index] = 0 self.total_count = 0 self.min_value = sys.maxsize self.max_value = 0 self.start_time_stamp_msec = sys.maxsize self.end_time_stamp_msec = 0 def __iter__(self): return RecordedIterator(self) def get_all_values_iterator(self): return AllValuesIterator(self) def get_recorded_iterator(self): return RecordedIterator(self) def get_percentile_iterator(self, ticks_per_half_distance): return PercentileIterator(self, ticks_per_half_distance) def get_linear_iterator(self, value_units_per_bucket): return LinearIterator(self, value_units_per_bucket) def get_log_iterator(self, value_units_first_bucket, log_base): return LogIterator(self, value_units_first_bucket, log_base)
Apache License 2.0
openaddresses/machine
openaddr/ci/objects.py
read_sets
python
def read_sets(db, past_id): db.execute('''SELECT id, commit_sha, datetime_start, datetime_end, render_world, render_europe, render_usa, render_geojson, owner, repository FROM sets WHERE id < COALESCE(%s, 2^64) ORDER BY id DESC LIMIT 25''', (past_id, )) return [Set(*row) for row in db.fetchall()]
Read information about recent sets. Returns list of Sets.
https://github.com/openaddresses/machine/blob/05db17d8492b3d8f4064f0f5b0ca9c68041c535a/openaddr/ci/objects.py#L313-L325
import logging; _L = logging.getLogger('openaddr.ci.objects') import json, pickle, copy, time from ..process_one import SourceProblem class Job: def __init__(self, id, status, task_files, states, file_results, github_owner, github_repository, github_status_url, github_comments_url, datetime_start, datetime_end): self.id = id self.status = status self.task_files = task_files self.states = states self.file_results = file_results self.github_owner = github_owner self.github_repository = github_repository self.github_status_url = github_status_url self.github_comments_url = github_comments_url self.datetime_start = datetime_start self.datetime_end = datetime_end class Set: def __init__(self, id, commit_sha, datetime_start, datetime_end, render_world, render_europe, render_usa, render_geojson, owner, repository): self.id = id self.commit_sha = commit_sha self.datetime_start = datetime_start self.datetime_end = datetime_end self.render_world = render_world self.render_europe = render_europe self.render_usa = render_usa self.render_geojson = render_geojson self.owner = owner self.repository = repository class Run: def __init__(self, id, source_path, source_id, source_data, datetime_tz, state, status, copy_of, code_version, worker_id, job_id, set_id, commit_sha, is_merged): assert hasattr(state, 'to_json'), 'Run state should have to_json() method' assert source_path.startswith('sources/'), '{} should start with "sources"'.format(repr(source_path)) assert source_path.endswith('.json'), '{} should end with ".json"'.format(repr(source_path)) self.id = id self.source_path = source_path self.source_id = source_id self.source_data = bytes(source_data) if (source_data is not None) else None self.datetime_tz = datetime_tz self.state = state self.status = status self.copy_of = copy_of self.code_version = code_version self.worker_id = worker_id self.job_id = job_id self.set_id = set_id self.commit_sha = commit_sha self.is_merged = is_merged class RunState: key_attrs = {key: key.replace(' ', '_').replace('-', '_') for key in ('source', 'cache', 'sample', 'geometry type', 'address count', 'version', 'fingerprint', 'cache time', 'processed', 'output', 'process time', 'website', 'skipped', 'license', 'share-alike', 'attribution required', 'attribution name', 'attribution flag', 'process hash', 'preview', 'slippymap', 'source problem', 'code version', 'tests passed', 'run id')} def __init__(self, json_blob): blob_dict = dict(json_blob or {}) self.keys = blob_dict.keys() self.run_id = blob_dict.get('run id') self.source = blob_dict.get('source') self.cache = blob_dict.get('cache') self.sample = blob_dict.get('sample') self.geometry_type = blob_dict.get('geometry type') self.address_count = blob_dict.get('address count') self.version = blob_dict.get('version') self.fingerprint = blob_dict.get('fingerprint') self.cache_time = blob_dict.get('cache time') self.processed = blob_dict.get('processed') self.output = blob_dict.get('output') self.preview = blob_dict.get('preview') self.slippymap = blob_dict.get('slippymap') self.process_time = blob_dict.get('process time') self.process_hash = blob_dict.get('process hash') self.website = blob_dict.get('website') self.skipped = blob_dict.get('skipped') self.license = blob_dict.get('license') self.share_alike = blob_dict.get('share-alike') self.attribution_required = blob_dict.get('attribution required') self.attribution_name = blob_dict.get('attribution name') self.attribution_flag = blob_dict.get('attribution flag') self.code_version = blob_dict.get('code version') self.tests_passed = blob_dict.get('tests passed') raw_problem = blob_dict.get('source problem', None) self.source_problem = None if (raw_problem is None) else SourceProblem(raw_problem) unexpected = ', '.join(set(self.keys) - set(RunState.key_attrs.keys())) assert len(unexpected) == 0, 'RunState should not have keys {}'.format(unexpected) def get(self, json_key): return getattr(self, RunState.key_attrs[json_key]) def to_dict(self): dict = {k: self.get(k) for k in self.keys} if 'source problem' in dict and dict['source problem'] is not None: dict['source problem'] = self.source_problem.value return dict def to_json(self): return json.dumps(self.to_dict(), sort_keys=True) class Zip: def __init__(self, url, content_length): self.url = url self.content_length = content_length def _result_runstate2dictionary(result): actual_result = copy.copy(result) if result and 'state' in result: actual_result['state'] = result['state'].to_dict() elif result and 'output' in result: actual_result['state'] = result.pop('output').to_dict() return actual_result def result_dictionary2runstate(result): actual_result = copy.copy(result) if result and 'state' in result: actual_result['state'] = RunState(result['state']) elif result and 'output' in result: actual_result['state'] = RunState(result.pop('output')) elif result: actual_result['state'] = RunState(None) return actual_result def add_job(db, job_id, status, task_files, file_states, file_results, owner, repo, status_url, comments_url): actual_results = {path: _result_runstate2dictionary(result) for (path, result) in file_results.items()} db.execute('''INSERT INTO jobs (task_files, file_states, file_results, github_owner, github_repository, github_status_url, github_comments_url, status, id, datetime_start) VALUES (%s::json, %s::json, %s::json, %s, %s, %s, %s, %s, %s, NOW())''', (json.dumps(task_files, sort_keys=True), json.dumps(file_states, sort_keys=True), json.dumps(actual_results, sort_keys=True), owner, repo, status_url, comments_url, status, job_id)) def write_job(db, job_id, status, task_files, file_states, file_results, owner, repo, status_url, comments_url): actual_results = {path: _result_runstate2dictionary(result) for (path, result) in file_results.items()} is_complete = bool(status is not None) db.execute('''UPDATE jobs SET task_files=%s::json, file_states=%s::json, file_results=%s::json, github_owner=%s, github_repository=%s, github_status_url=%s, github_comments_url=%s, status=%s, datetime_end=CASE WHEN %s THEN NOW() ELSE null END WHERE id = %s''', (json.dumps(task_files, sort_keys=True), json.dumps(file_states, sort_keys=True), json.dumps(actual_results, sort_keys=True), owner, repo, status_url, comments_url, status, is_complete, job_id)) def read_job(db, job_id): db.execute('''SELECT status, task_files, file_states, file_results, github_owner, github_repository, github_status_url, github_comments_url, datetime_start, datetime_end FROM jobs WHERE id = %s LIMIT 1''', (job_id, )) try: status, task_files, states, file_results, github_owner, github_repository, github_status_url, github_comments_url, datetime_start, datetime_end = db.fetchone() except TypeError: return None else: actual_results = {path: result_dictionary2runstate(result) for (path, result) in file_results.items()} return Job(job_id, status, task_files, states, actual_results, github_owner, github_repository, github_status_url, github_comments_url, datetime_start, datetime_end) def read_jobs(db, past_id): db.execute('''SELECT id, status, task_files, file_states, file_results, github_owner, github_repository, github_status_url, github_comments_url, datetime_start, datetime_end -- -- Select sequence value from jobs based on ID. Null sequence -- values will be excluded by this comparison to an integer. -- FROM jobs WHERE sequence < COALESCE((SELECT sequence FROM jobs WHERE id = %s), 2^64) ORDER BY sequence DESC LIMIT 25''', (past_id, )) jobs = [] for row in db.fetchall(): job_args = list(row) file_results = job_args.pop(4) actual_results = {path: result_dictionary2runstate(result) for (path, result) in file_results.items()} job_args.insert(4, actual_results) jobs.append(Job(*job_args)) return jobs def add_set(db, owner, repository): db.execute('''INSERT INTO sets (owner, repository, datetime_start) VALUES (%s, %s, NOW())''', (owner, repository)) db.execute("SELECT CURRVAL('ints')") (set_id, ) = db.fetchone() _L.info(u'Added set {} to sets table'.format(set_id)) return read_set(db, set_id) def complete_set(db, set_id, commit_sha): _L.info(u'Updating set {} in sets table'.format(set_id)) db.execute('''UPDATE sets SET datetime_end = NOW(), commit_sha = %s WHERE id = %s''', (commit_sha, set_id)) def update_set_renders(db, set_id, render_world, render_usa, render_europe, render_geojson): db.execute('''UPDATE sets SET render_world = %s, render_usa = %s, render_europe = %s, render_geojson = %s WHERE id = %s''', (render_world, render_usa, render_europe, render_geojson, set_id)) def read_set(db, set_id): db.execute('''SELECT id, commit_sha, datetime_start, datetime_end, render_world, render_europe, render_usa, render_geojson, owner, repository FROM sets WHERE id = %s LIMIT 1''', (set_id, )) try: id, sha, start, end, world, europe, usa, json, own, repo = db.fetchone() except TypeError: return None else: return Set(id, sha, start, end, world, europe, usa, json, own, repo)
ISC License
allenai/allennlp
allennlp/data/data_loaders/multitask_scheduler.py
MultiTaskScheduler.count_batches
python
def count_batches(self, dataset_counts: Dict[str, int]) -> int: raise NotImplementedError
Given the number of instances per dataset, this returns the total number of batches the scheduler will return.
https://github.com/allenai/allennlp/blob/dcd8d9e9671da5a87de51f2bb42ceb3abdce8b3b/allennlp/data/data_loaders/multitask_scheduler.py#L38-L43
from collections import defaultdict from typing import Any, Dict, Iterable, Union, List, Mapping import more_itertools from allennlp.common.registrable import Registrable from allennlp.data.instance import Instance class MultiTaskScheduler(Registrable): def batch_instances( self, epoch_instances: Dict[str, Iterable[Instance]] ) -> Iterable[List[Instance]]: raise NotImplementedError def update_from_epoch_metrics(self, epoch_metrics: Dict[str, Any]) -> None: raise NotImplementedError
Apache License 2.0
px4/flight_review
app/plot_app/config.py
get_airframes_url
python
def get_airframes_url(): return __AIRFRAMES_URL
get airframes download URL
https://github.com/px4/flight_review/blob/0b092d1fdc508500ff6d8e1d3fda7fe7fe788ad6/app/plot_app/config.py#L112-L114
import configparser import os _conf = configparser.ConfigParser() _cur_dir = os.path.dirname(os.path.realpath(__file__)) _conf.read_file(open(os.path.join(_cur_dir, '../config_default.ini'))) _user_config_file = os.path.join(_cur_dir, '../config_user.ini') _user_config_file_old = os.path.join(_cur_dir, '../../config_user.ini') if os.path.exists(_user_config_file_old) and not os.path.exists(_user_config_file): print('moving config file') os.rename(_user_config_file_old, _user_config_file) if os.path.exists(_user_config_file): _conf.read_file(open(_user_config_file)) email_config = dict(_conf.items('email')) email_notifications_config = dict(_conf.items('email_notifications')) email_notifications_config['public_flightreport'] = [ s.strip() for s in email_notifications_config['public_flightreport'].split(',')] email_notifications_config['public_flightreport_bad'] = [ s.strip() for s in email_notifications_config['public_flightreport_bad'].split(',')] __DOMAIN_NAME = _conf.get('general', 'domain_name') __HTTP_PROTOCOL = _conf.get('general', 'http_protocol') __AIRFRAMES_URL = _conf.get('general', 'airframes_url') __PARAMETERS_URL = _conf.get('general', 'parameters_url') __EVENTS_URL = _conf.get('general', 'events_url') __MAPBOX_API_ACCESS_TOKEN = _conf.get('general', 'mapbox_api_access_token') __BING_API_KEY = _conf.get('general', 'bing_maps_api_key') __CESIUM_API_KEY = _conf.get('general', 'cesium_api_key') __LOG_CACHE_SIZE = int(_conf.get('general', 'log_cache_size')) __DB_FILENAME_CUSTOM = _conf.get('general', 'db_filename') __STORAGE_PATH = _conf.get('general', 'storage_path') if not os.path.isabs(__STORAGE_PATH): __STORAGE_PATH = os.path.join(_cur_dir, '..', __STORAGE_PATH) __LOG_FILE_PATH = os.path.join(__STORAGE_PATH, 'log_files') __DB_FILENAME = os.path.join(__STORAGE_PATH, 'logs.sqlite') __CACHE_FILE_PATH = os.path.join(__STORAGE_PATH, 'cache') __AIRFRAMES_FILENAME = os.path.join(__CACHE_FILE_PATH, 'airframes.xml') __PARAMETERS_FILENAME = os.path.join(__CACHE_FILE_PATH, 'parameters.xml') __EVENTS_FILENAME = os.path.join(__CACHE_FILE_PATH, 'events.json.xz') __RELEASES_FILENAME = os.path.join(__CACHE_FILE_PATH, 'releases.json') __PRINT_TIMING = int(_conf.get('debug', 'print_timing')) __VERBOSE_OUTPUT = int(_conf.get('debug', 'verbose_output')) plot_width = 840 plot_color_blue = '#2877a2' plot_color_red = '#e0212d' plot_config = dict( maps_line_color = plot_color_blue, plot_width = plot_width, plot_height = dict( normal = int(plot_width / 2.1), small = int(plot_width / 2.5), large = int(plot_width / 1.61803398874989484), ), ) colors3 = [plot_color_red, '#208900', plot_color_blue] colors2 = [colors3[0], colors3[1]] colors8 = [colors3[0], colors3[1], colors3[2], '#333333', '#999999', '#e58C33', '#33e5e5', '#e533e5'] color_gray = '#464646' plot_config['mission_setpoint_color'] = colors8[5] def get_domain_name(): return __DOMAIN_NAME def get_http_protocol(): return __HTTP_PROTOCOL def get_log_filepath(): return __LOG_FILE_PATH def get_cache_filepath(): return __CACHE_FILE_PATH def get_kml_filepath(): return os.path.join(get_cache_filepath(), 'kml') def get_overview_img_filepath(): return os.path.join(get_cache_filepath(), 'img') def get_db_filename(): if __DB_FILENAME_CUSTOM != "": return __DB_FILENAME_CUSTOM return __DB_FILENAME def get_airframes_filename(): return __AIRFRAMES_FILENAME
BSD 3-Clause New or Revised License