Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def url_for(self, *args: str, **kwargs: str) -> URL: return self._resource.url_for(*args, **kwargs)
[ "Construct url for route with additional params." ]
Please provide a description of the function:def add_static(self, prefix: str, path: PathLike, *, name: Optional[str]=None, expect_handler: Optional[_ExpectHandler]=None, chunk_size: int=256 * 1024, show_index: bool=False, follow_symlinks: bool=False, append_version: bool=False) -> AbstractResource: assert prefix.startswith('/') if prefix.endswith('/'): prefix = prefix[:-1] resource = StaticResource(prefix, path, name=name, expect_handler=expect_handler, chunk_size=chunk_size, show_index=show_index, follow_symlinks=follow_symlinks, append_version=append_version) self.register_resource(resource) return resource
[ "Add static files view.\n\n prefix - url prefix\n path - folder with files\n\n " ]
Please provide a description of the function:def add_head(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute: return self.add_route(hdrs.METH_HEAD, path, handler, **kwargs)
[ "\n Shortcut for add_route with method HEAD\n " ]
Please provide a description of the function:def add_options(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute: return self.add_route(hdrs.METH_OPTIONS, path, handler, **kwargs)
[ "\n Shortcut for add_route with method OPTIONS\n " ]
Please provide a description of the function:def add_get(self, path: str, handler: _WebHandler, *, name: Optional[str]=None, allow_head: bool=True, **kwargs: Any) -> AbstractRoute: resource = self.add_resource(path, name=name) if allow_head: resource.add_route(hdrs.METH_HEAD, handler, **kwargs) return resource.add_route(hdrs.METH_GET, handler, **kwargs)
[ "\n Shortcut for add_route with method GET, if allow_head is true another\n route is added allowing head requests to the same endpoint\n " ]
Please provide a description of the function:def add_post(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute: return self.add_route(hdrs.METH_POST, path, handler, **kwargs)
[ "\n Shortcut for add_route with method POST\n " ]
Please provide a description of the function:def add_put(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute: return self.add_route(hdrs.METH_PUT, path, handler, **kwargs)
[ "\n Shortcut for add_route with method PUT\n " ]
Please provide a description of the function:def add_patch(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute: return self.add_route(hdrs.METH_PATCH, path, handler, **kwargs)
[ "\n Shortcut for add_route with method PATCH\n " ]
Please provide a description of the function:def add_delete(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute: return self.add_route(hdrs.METH_DELETE, path, handler, **kwargs)
[ "\n Shortcut for add_route with method DELETE\n " ]
Please provide a description of the function:def add_view(self, path: str, handler: AbstractView, **kwargs: Any) -> AbstractRoute: return self.add_route(hdrs.METH_ANY, path, handler, **kwargs)
[ "\n Shortcut for add_route with ANY methods for a class-based view\n " ]
Please provide a description of the function:def add_routes(self, routes: Iterable[AbstractRouteDef]) -> None: for route_def in routes: route_def.register(self)
[ "Append routes to route table.\n\n Parameter should be a sequence of RouteDef objects.\n " ]
Please provide a description of the function:def parse_headers( self, lines: List[bytes] ) -> Tuple['CIMultiDictProxy[str]', RawHeaders, Optional[bool], Optional[str], bool, bool]: headers, raw_headers = self._headers_parser.parse_headers(lines) close_conn = None encoding = None upgrade = False chunked = False # keep-alive conn = headers.get(hdrs.CONNECTION) if conn: v = conn.lower() if v == 'close': close_conn = True elif v == 'keep-alive': close_conn = False elif v == 'upgrade': upgrade = True # encoding enc = headers.get(hdrs.CONTENT_ENCODING) if enc: enc = enc.lower() if enc in ('gzip', 'deflate', 'br'): encoding = enc # chunking te = headers.get(hdrs.TRANSFER_ENCODING) if te and 'chunked' in te.lower(): chunked = True return (headers, raw_headers, close_conn, encoding, upgrade, chunked)
[ "Parses RFC 5322 headers from a stream.\n\n Line continuations are supported. Returns list of header name\n and value pairs. Header name is in upper case.\n " ]
Please provide a description of the function:def get_extra_info(self, name: str, default: Any=None) -> Any: conn = self._response.connection if conn is None: return default transport = conn.transport if transport is None: return default return transport.get_extra_info(name, default)
[ "extra info from connection transport" ]
Please provide a description of the function:def user_agent(style=None) -> _UserAgent: global useragent if (not useragent) and style: useragent = UserAgent() return useragent[style] if style else DEFAULT_USER_AGENT
[ "Returns an apparently legit user-agent, if not requested one of a specific\n style. Defaults to a Chrome-style User-Agent.\n " ]
Please provide a description of the function:def raw_html(self) -> _RawHTML: if self._html: return self._html else: return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
[ "Bytes representation of the HTML content.\n (`learn more <http://www.diveintopython3.net/strings.html>`_).\n " ]
Please provide a description of the function:def html(self) -> _BaseHTML: if self._html: return self.raw_html.decode(self.encoding, errors='replace') else: return etree.tostring(self.element, encoding='unicode').strip()
[ "Unicode representation of the HTML content\n (`learn more <http://www.diveintopython3.net/strings.html>`_).\n " ]
Please provide a description of the function:def encoding(self) -> _Encoding: if self._encoding: return self._encoding # Scan meta tags for charset. if self._html: self._encoding = html_to_unicode(self.default_encoding, self._html)[0] # Fall back to requests' detected encoding if decode fails. try: self.raw_html.decode(self.encoding, errors='replace') except UnicodeDecodeError: self._encoding = self.default_encoding return self._encoding if self._encoding else self.default_encoding
[ "The encoding string to be used, extracted from the HTML and\n :class:`HTMLResponse <HTMLResponse>` headers.\n " ]
Please provide a description of the function:def pq(self) -> PyQuery: if self._pq is None: self._pq = PyQuery(self.lxml) return self._pq
[ "`PyQuery <https://pythonhosted.org/pyquery/>`_ representation\n of the :class:`Element <Element>` or :class:`HTML <HTML>`.\n " ]
Please provide a description of the function:def lxml(self) -> HtmlElement: if self._lxml is None: try: self._lxml = soup_parse(self.html, features='html.parser') except ValueError: self._lxml = lxml.html.fromstring(self.raw_html) return self._lxml
[ "`lxml <http://lxml.de>`_ representation of the\n :class:`Element <Element>` or :class:`HTML <HTML>`.\n " ]
Please provide a description of the function:def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find: # Convert a single containing into a list. if isinstance(containing, str): containing = [containing] encoding = _encoding or self.encoding elements = [ Element(element=found, url=self.url, default_encoding=encoding) for found in self.pq(selector) ] if containing: elements_copy = elements.copy() elements = [] for element in elements_copy: if any([c.lower() in element.full_text.lower() for c in containing]): elements.append(element) elements.reverse() # Sanitize the found HTML. if clean: elements_copy = elements.copy() elements = [] for element in elements_copy: element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml)) elements.append(element) return _get_first_or_list(elements, first)
[ "Given a CSS Selector, returns a list of\n :class:`Element <Element>` objects or a single one.\n\n :param selector: CSS Selector to use.\n :param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.\n :param containing: If specified, only return elements that contain the provided text.\n :param first: Whether or not to return just the first result.\n :param _encoding: The encoding format.\n\n Example CSS Selectors:\n\n - ``a``\n - ``a.someClass``\n - ``a#someID``\n - ``a[target=_blank]``\n\n See W3School's `CSS Selectors Reference\n <https://www.w3schools.com/cssref/css_selectors.asp>`_\n for more details.\n\n If ``first`` is ``True``, only returns the first\n :class:`Element <Element>` found.\n " ]
Please provide a description of the function:def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath: selected = self.lxml.xpath(selector) elements = [ Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding) if not isinstance(selection, etree._ElementUnicodeResult) else str(selection) for selection in selected ] # Sanitize the found HTML. if clean: elements_copy = elements.copy() elements = [] for element in elements_copy: element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml)) elements.append(element) return _get_first_or_list(elements, first)
[ "Given an XPath selector, returns a list of\n :class:`Element <Element>` objects or a single one.\n\n :param selector: XPath Selector to use.\n :param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.\n :param first: Whether or not to return just the first result.\n :param _encoding: The encoding format.\n\n If a sub-selector is specified (e.g. ``//a/@href``), a simple\n list of results is returned.\n\n See W3School's `XPath Examples\n <https://www.w3schools.com/xml/xpath_examples.asp>`_\n for more details.\n\n If ``first`` is ``True``, only returns the first\n :class:`Element <Element>` found.\n " ]
Please provide a description of the function:def search_all(self, template: str) -> _Result: return [r for r in findall(template, self.html)]
[ "Search the :class:`Element <Element>` (multiple times) for the given parse\n template.\n\n :param template: The Parse template to use.\n " ]
Please provide a description of the function:def links(self) -> _Links: def gen(): for link in self.find('a'): try: href = link.attrs['href'].strip() if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')): yield href except KeyError: pass return set(gen())
[ "All found links on page, in as–is form." ]
Please provide a description of the function:def _make_absolute(self, link): # Parse the link with stdlib. parsed = urlparse(link)._asdict() # If link is relative, then join it with base_url. if not parsed['netloc']: return urljoin(self.base_url, link) # Link is absolute; if it lacks a scheme, add one from base_url. if not parsed['scheme']: parsed['scheme'] = urlparse(self.base_url).scheme # Reconstruct the URL to incorporate the new scheme. parsed = (v for v in parsed.values()) return urlunparse(parsed) # Link is absolute and complete with scheme; nothing to be done here. return link
[ "Makes a given link absolute." ]
Please provide a description of the function:def absolute_links(self) -> _Links: def gen(): for link in self.links: yield self._make_absolute(link) return set(gen())
[ "All found links on page, in absolute form\n (`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).\n " ]
Please provide a description of the function:def base_url(self) -> _URL: # Support for <base> tag. base = self.find('base', first=True) if base: result = base.attrs.get('href', '').strip() if result: return result # Parse the url to separate out the path parsed = urlparse(self.url)._asdict() # Remove any part of the path after the last '/' parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/' # Reconstruct the url with the modified path parsed = (v for v in parsed.values()) url = urlunparse(parsed) return url
[ "The base URL for the page. Supports the ``<base>`` tag\n (`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)." ]
Please provide a description of the function:def attrs(self) -> _Attrs: if self._attrs is None: self._attrs = {k: v for k, v in self.element.items()} # Split class and rel up, as there are ussually many of them: for attr in ['class', 'rel']: if attr in self._attrs: self._attrs[attr] = tuple(self._attrs[attr].split()) return self._attrs
[ "Returns a dictionary of the attributes of the :class:`Element <Element>`\n (`learn more <https://www.w3schools.com/tags/ref_attributes.asp>`_).\n " ]
Please provide a description of the function:def next(self, fetch: bool = False, next_symbol: _NextSymbol = DEFAULT_NEXT_SYMBOL) -> _Next: def get_next(): candidates = self.find('a', containing=next_symbol) for candidate in candidates: if candidate.attrs.get('href'): # Support 'next' rel (e.g. reddit). if 'next' in candidate.attrs.get('rel', []): return candidate.attrs['href'] # Support 'next' in classnames. for _class in candidate.attrs.get('class', []): if 'next' in _class: return candidate.attrs['href'] if 'page' in candidate.attrs['href']: return candidate.attrs['href'] try: # Resort to the last candidate. return candidates[-1].attrs['href'] except IndexError: return None __next = get_next() if __next: url = self._make_absolute(__next) else: return None if fetch: return self.session.get(url) else: return url
[ "Attempts to find the next page, if there is one. If ``fetch``\n is ``True`` (default), returns :class:`HTML <HTML>` object of\n next page. If ``fetch`` is ``False``, simply returns the next URL.\n\n " ]
Please provide a description of the function:async def _async_render(self, *, url: str, script: str = None, scrolldown, sleep: int, wait: float, reload, content: Optional[str], timeout: Union[float, int], keep_page: bool): try: page = await self.browser.newPage() # Wait before rendering the page, to prevent timeouts. await asyncio.sleep(wait) # Load the given page (GET request, obviously.) if reload: await page.goto(url, options={'timeout': int(timeout * 1000)}) else: await page.goto(f'data:text/html,{self.html}', options={'timeout': int(timeout * 1000)}) result = None if script: result = await page.evaluate(script) if scrolldown: for _ in range(scrolldown): await page._keyboard.down('PageDown') await asyncio.sleep(sleep) else: await asyncio.sleep(sleep) if scrolldown: await page._keyboard.up('PageDown') # Return the content of the page, JavaScript evaluated. content = await page.content() if not keep_page: await page.close() page = None return content, result, page except TimeoutError: await page.close() page = None return None
[ " Handle page creation and js rendering. Internal use for render/arender methods. " ]
Please provide a description of the function:def render(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False): self.browser = self.session.browser # Automatically create a event loop and browser content = None # Automatically set Reload to False, if example URL is being used. if self.url == DEFAULT_URL: reload = False for i in range(retries): if not content: try: content, result, page = self.session.loop.run_until_complete(self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page)) except TypeError: pass else: break if not content: raise MaxRetries("Unable to render the page. Try increasing timeout") html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING) self.__dict__.update(html.__dict__) self.page = page return result
[ "Reloads the response in Chromium, and replaces HTML content\n with an updated version, with JavaScript executed.\n\n :param retries: The number of times to retry loading the page in Chromium.\n :param script: JavaScript to execute upon page load (optional).\n :param wait: The number of seconds to wait before loading the page, preventing timeouts (optional).\n :param scrolldown: Integer, if provided, of how many times to page down.\n :param sleep: Integer, if provided, of how many long to sleep after initial render.\n :param reload: If ``False``, content will not be loaded from the browser, but will be provided from memory.\n :param keep_page: If ``True`` will allow you to interact with the browser page through ``r.html.page``.\n\n If ``scrolldown`` is specified, the page will scrolldown the specified\n number of times, after sleeping the specified amount of time\n (e.g. ``scrolldown=10, sleep=1``).\n\n If just ``sleep`` is provided, the rendering will wait *n* seconds, before\n returning.\n\n If ``script`` is specified, it will execute the provided JavaScript at\n runtime. Example:\n\n .. code-block:: python\n\n script = \\\"\\\"\\\"\n () => {\n return {\n width: document.documentElement.clientWidth,\n height: document.documentElement.clientHeight,\n deviceScaleFactor: window.devicePixelRatio,\n }\n }\n \\\"\\\"\\\"\n\n Returns the return value of the executed ``script``, if any is provided:\n\n .. code-block:: python\n\n >>> r.html.render(script=script)\n {'width': 800, 'height': 600, 'deviceScaleFactor': 1}\n\n Warning: the first time you run this method, it will download\n Chromium into your home directory (``~/.pyppeteer``).\n " ]
Please provide a description of the function:def response_hook(self, response, **kwargs) -> HTMLResponse: if not response.encoding: response.encoding = DEFAULT_ENCODING return HTMLResponse._from_response(response, self)
[ " Change response enconding and replace it by a HTMLResponse. " ]
Please provide a description of the function:def close(self): if hasattr(self, "_browser"): self.loop.run_until_complete(self._browser.close()) super().close()
[ " If a browser was created close it first. " ]
Please provide a description of the function:def request(self, *args, **kwargs): func = partial(super().request, *args, **kwargs) return self.loop.run_in_executor(self.thread_pool, func)
[ " Partial original request func and run it in a thread. " ]
Please provide a description of the function:def run(self, *coros): tasks = [ asyncio.ensure_future(coro()) for coro in coros ] done, _ = self.loop.run_until_complete(asyncio.wait(tasks)) return [t.result() for t in done]
[ " Pass in all the coroutines you want to run, it will wrap each one\n in a task, run it and wait for the result. Return a list with all\n results, this is returned in the same order coros are passed in. " ]
Please provide a description of the function:def add_depth_channel(img_tensor, pad_mode): ''' img_tensor: N, C, H, W ''' img_tensor[:, 1] = get_depth_tensor(pad_mode) img_tensor[:, 2] = img_tensor[:, 0] * get_depth_tensor(pad_mode)
[]
Please provide a description of the function:def get_pre_compute(self, s): ''' :param s: [src_sequence, batch_size, src_dim] :return: [src_sequence, batch_size. hidden_dim] ''' hidden_dim = self.hidden_dim src_dim = s.get_shape().as_list()[-1] assert src_dim is not None, 'src dim must be defined' W = self._get_var('W', shape=[src_dim, hidden_dim]) b = self._get_var('b', shape=[1, hidden_dim]) return tf.tensordot(s, W, [[2], [0]]) + b
[]
Please provide a description of the function:def get_prob(self, src, tgt, mask, pre_compute, return_logits=False): ''' :param s: [src_sequence_length, batch_size, src_dim] :param h: [batch_size, tgt_dim] or [tgt_sequence_length, batch_size, tgt_dim] :param mask: [src_sequence_length, batch_size]\ or [tgt_sequence_length, src_sequence_length, batch_sizse] :param pre_compute: [src_sequence_length, batch_size, hidden_dim] :return: [src_sequence_length, batch_size]\ or [tgt_sequence_length, src_sequence_length, batch_size] ''' s_shape = src.get_shape().as_list() h_shape = tgt.get_shape().as_list() src_dim = s_shape[-1] tgt_dim = h_shape[-1] assert src_dim is not None, 'src dimension must be defined' assert tgt_dim is not None, 'tgt dimension must be defined' self._define_params(src_dim, tgt_dim) if len(h_shape) == 2: tgt = tf.expand_dims(tgt, 0) if pre_compute is None: pre_compute = self.get_pre_compute(src) buf0 = pre_compute buf1 = tf.tensordot(tgt, self.var['U'], axes=[[2], [0]]) buf2 = tf.tanh(tf.expand_dims(buf0, 0) + tf.expand_dims(buf1, 1)) if not self.is_vanilla: xh1 = tgt xh2 = tgt s1 = src if self.need_padding: xh1 = tf.tensordot(xh1, self.var['V_t'], 1) xh2 = tf.tensordot(xh2, self.var['S_t'], 1) s1 = tf.tensordot(s1, self.var['V_s'], 1) if not self.is_identity_transform: xh1 = tf.tensordot(xh1, self.var['T'], 1) xh2 = tf.tensordot(xh2, self.var['T'], 1) buf3 = tf.expand_dims(s1, 0) * tf.expand_dims(xh1, 1) buf3 = tf.tanh(tf.tensordot(buf3, self.var['V'], axes=[[3], [0]])) buf = tf.reshape(tf.tanh(buf2 + buf3), shape=tf.shape(buf3)) else: buf = buf2 v = self.var['v'] e = tf.tensordot(buf, v, [[3], [0]]) e = tf.squeeze(e, axis=[3]) tmp = tf.reshape(e + (mask - 1) * 10000.0, shape=tf.shape(e)) prob = tf.nn.softmax(tmp, 1) if len(h_shape) == 2: prob = tf.squeeze(prob, axis=[0]) tmp = tf.squeeze(tmp, axis=[0]) if return_logits: return prob, tmp return prob
[]
Please provide a description of the function:def get_att(self, s, prob): ''' :param s: [src_sequence_length, batch_size, src_dim] :param prob: [src_sequence_length, batch_size]\ or [tgt_sequence_length, src_sequence_length, batch_size] :return: [batch_size, src_dim] or [tgt_sequence_length, batch_size, src_dim] ''' buf = s * tf.expand_dims(prob, axis=-1) att = tf.reduce_sum(buf, axis=-3) return att
[]
Please provide a description of the function:def shape(tensor): ''' Get shape of variable. Return type is tuple. ''' temp_s = tensor.get_shape() return tuple([temp_s[i].value for i in range(0, len(temp_s))])
[]
Please provide a description of the function:def get_variable(name, temp_s): ''' Get variable by name. ''' return tf.Variable(tf.zeros(temp_s), name=name)
[]
Please provide a description of the function:def dropout(tensor, drop_prob, is_training): ''' Dropout except test. ''' if not is_training: return tensor return tf.nn.dropout(tensor, 1.0 - drop_prob)
[]
Please provide a description of the function:def get_elapsed(self, restart=True): ''' Calculate time span. ''' end = time.time() span = end - self.__start if restart: self.__start = end return span
[]
Please provide a description of the function:def do_tta_predict(args, model, ckp_path, tta_num=4): ''' return 18000x128x128 np array ''' model.eval() preds = [] meta = None # i is tta index, 0: no change, 1: horizon flip, 2: vertical flip, 3: do both for flip_index in range(tta_num): print('flip_index:', flip_index) test_loader = get_test_loader(args.batch_size, index=flip_index, dev_mode=False, pad_mode=args.pad_mode) meta = test_loader.meta outputs = None with torch.no_grad(): for i, img in enumerate(test_loader): add_depth_channel(img, args.pad_mode) img = img.cuda() output, _ = model(img) output = torch.sigmoid(output) if outputs is None: outputs = output.squeeze() else: outputs = torch.cat([outputs, output.squeeze()], 0) print('{} / {}'.format(args.batch_size*(i+1), test_loader.num), end='\r') outputs = outputs.cpu().numpy() # flip back masks if flip_index == 1: outputs = np.flip(outputs, 2) elif flip_index == 2: outputs = np.flip(outputs, 1) elif flip_index == 3: outputs = np.flip(outputs, 2) outputs = np.flip(outputs, 1) #print(outputs.shape) preds.append(outputs) parent_dir = ckp_path+'_out' if not os.path.exists(parent_dir): os.makedirs(parent_dir) np_file = os.path.join(parent_dir, 'pred.npy') model_pred_result = np.mean(preds, 0) np.save(np_file, model_pred_result) return model_pred_result, meta
[]
Please provide a description of the function:def partition_dataset(): dataset = datasets.MNIST( './data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ])) size = dist.get_world_size() bsz = 128 / float(size) partition_sizes = [1.0 / size for _ in range(size)] partition = DataPartitioner(dataset, partition_sizes) partition = partition.use(dist.get_rank()) train_set = torch.utils.data.DataLoader( partition, batch_size=int(bsz), shuffle=True) return train_set, bsz
[ " Partitioning MNIST " ]
Please provide a description of the function:def average_gradients(model): size = float(dist.get_world_size()) for param in model.parameters(): dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0) param.grad.data /= size
[ " Gradient averaging. " ]
Please provide a description of the function:def run(params): rank = dist.get_rank() torch.manual_seed(1234) train_set, bsz = partition_dataset() model = Net() model = model optimizer = optim.SGD(model.parameters(), lr=params['learning_rate'], momentum=params['momentum']) num_batches = ceil(len(train_set.dataset) / float(bsz)) total_loss = 0.0 for epoch in range(3): epoch_loss = 0.0 for data, target in train_set: data, target = Variable(data), Variable(target) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) epoch_loss += loss.item() loss.backward() average_gradients(model) optimizer.step() #logger.debug('Rank: ', rank, ', epoch: ', epoch, ': ', epoch_loss / num_batches) if rank == 0: nni.report_intermediate_result(epoch_loss / num_batches) total_loss += (epoch_loss / num_batches) total_loss /= 3 logger.debug('Final loss: {}'.format(total_loss)) if rank == 0: nni.report_final_result(total_loss)
[ " Distributed Synchronous SGD Example " ]
Please provide a description of the function:def graph_loads(graph_json): ''' Load graph ''' layers = [] for layer in graph_json['layers']: layer_info = Layer(layer['type'], layer['input'], layer['output'], layer['size']) layer_info.is_delete = layer['is_delete'] layers.append(layer_info) graph = Graph(graph_json['max_layer_num'], [], [], []) graph.layers = layers return graph
[]
Please provide a description of the function:def set_size(self, graph_id, size): ''' Set size. ''' if self.graph_type == LayerType.attention.value: if self.input[0] == graph_id: self.size = size if self.graph_type == LayerType.rnn.value: self.size = size if self.graph_type == LayerType.self_attention.value: self.size = size if self.graph_type == LayerType.output.value: if self.size != size: return False return True
[]
Please provide a description of the function:def clear_size(self): ''' Clear size ''' if self.graph_type == LayerType.attention.value or \ LayerType.rnn.value or LayerType.self_attention.value: self.size = None
[]
Please provide a description of the function:def is_topology(self, layers=None): ''' valid the topology ''' if layers is None: layers = self.layers layers_nodle = [] result = [] for i, layer in enumerate(layers): if layer.is_delete is False: layers_nodle.append(i) while True: flag_break = True layers_toremove = [] for layer1 in layers_nodle: flag_arrive = True for layer2 in layers[layer1].input: if layer2 in layers_nodle: flag_arrive = False if flag_arrive is True: for layer2 in layers[layer1].output: # Size is error if layers[layer2].set_size(layer1, layers[layer1].size) is False: return False layers_toremove.append(layer1) result.append(layer1) flag_break = False for layer in layers_toremove: layers_nodle.remove(layer) result.append('|') if flag_break: break # There is loop in graph || some layers can't to arrive if layers_nodle: return False return result
[]
Please provide a description of the function:def is_legal(self, layers=None): ''' Judge whether is legal for layers ''' if layers is None: layers = self.layers for layer in layers: if layer.is_delete is False: if len(layer.input) != layer.input_size: return False if len(layer.output) < layer.output_size: return False # layer_num <= max_layer_num if self.layer_num(layers) > self.max_layer_num: return False # There is loop in graph || some layers can't to arrive if self.is_topology(layers) is False: return False return True
[]
Please provide a description of the function:def mutation(self, only_add=False): ''' Mutation for a graph ''' types = [] if self.layer_num() < self.max_layer_num: types.append(0) types.append(1) if self.layer_num() > 5 and only_add is False: types.append(2) types.append(3) # 0 : add a layer , delete a edge # 1 : add a layer , change a edge # 2 : delete a layer, delete a edge # 3 : delete a layer, change a edge graph_type = random.choice(types) layer_type = random.choice([LayerType.attention.value,\ LayerType.self_attention.value, LayerType.rnn.value]) layers = copy.deepcopy(self.layers) cnt_try = 0 while True: layers_in = [] layers_out = [] layers_del = [] for i, layer in enumerate(layers): if layer.is_delete is False: if layer.graph_type != LayerType.output.value: layers_in.append(i) if layer.graph_type != LayerType.input.value: layers_out.append(i) if layer.graph_type != LayerType.output.value\ and layer.graph_type != LayerType.input.value: layers_del.append(i) if graph_type <= 1: new_id = len(layers) out = random.choice(layers_out) inputs = [] output = [out] pos = random.randint(0, len(layers[out].input) - 1) last_in = layers[out].input[pos] layers[out].input[pos] = new_id if graph_type == 0: layers[last_in].output.remove(out) if graph_type == 1: layers[last_in].output.remove(out) layers[last_in].output.append(new_id) inputs = [last_in] lay = Layer(graph_type=layer_type, inputs=inputs, output=output) while len(inputs) < lay.input_size: layer1 = random.choice(layers_in) inputs.append(layer1) layers[layer1].output.append(new_id) lay.input = inputs layers.append(lay) else: layer1 = random.choice(layers_del) for layer2 in layers[layer1].output: layers[layer2].input.remove(layer1) if graph_type == 2: random_in = random.choice(layers_in) else: random_in = random.choice(layers[layer1].input) layers[layer2].input.append(random_in) layers[random_in].output.append(layer2) for layer2 in layers[layer1].input: layers[layer2].output.remove(layer1) layers[layer1].is_delete = True if self.is_legal(layers): self.layers = layers break else: layers = copy.deepcopy(self.layers) cnt_try += 1
[]
Please provide a description of the function:def _main_cli(self): self.logger.info("SMAC call: %s" % (" ".join(sys.argv))) cmd_reader = CMDReader() args, _ = cmd_reader.read_cmd() root_logger = logging.getLogger() root_logger.setLevel(args.verbose_level) logger_handler = logging.StreamHandler( stream=sys.stdout) if root_logger.level >= logging.INFO: formatter = logging.Formatter( "%(levelname)s:\t%(message)s") else: formatter = logging.Formatter( "%(asctime)s:%(levelname)s:%(name)s:%(message)s", "%Y-%m-%d %H:%M:%S") logger_handler.setFormatter(formatter) root_logger.addHandler(logger_handler) # remove default handler root_logger.removeHandler(root_logger.handlers[0]) # Create defaults rh = None initial_configs = None stats = None incumbent = None # Create scenario-object scen = Scenario(args.scenario_file, []) if args.mode == "SMAC": optimizer = SMAC( scenario=scen, rng=np.random.RandomState(args.seed), runhistory=rh, initial_configurations=initial_configs, stats=stats, restore_incumbent=incumbent, run_id=args.seed) elif args.mode == "ROAR": optimizer = ROAR( scenario=scen, rng=np.random.RandomState(args.seed), runhistory=rh, initial_configurations=initial_configs, run_id=args.seed) elif args.mode == "EPILS": optimizer = EPILS( scenario=scen, rng=np.random.RandomState(args.seed), runhistory=rh, initial_configurations=initial_configs, run_id=args.seed) else: optimizer = None return optimizer
[ "Main function of SMAC for CLI interface\n \n Returns\n -------\n instance\n optimizer\n " ]
Please provide a description of the function:def update_search_space(self, search_space): if not self.update_ss_done: self.categorical_dict = generate_scenario(search_space) if self.categorical_dict is None: raise RuntimeError('categorical dict is not correctly returned after parsing search space.') self.optimizer = self._main_cli() self.smbo_solver = self.optimizer.solver self.loguniform_key = {key for key in search_space.keys() if search_space[key]['_type'] == 'loguniform'} self.update_ss_done = True else: self.logger.warning('update search space is not supported.')
[ "TODO: this is urgly, we put all the initialization work in this method, because initialization relies\n on search space, also because update_search_space is called at the beginning.\n NOTE: updating search space is not supported.\n\n Parameters\n ----------\n search_space:\n search space\n " ]
Please provide a description of the function:def receive_trial_result(self, parameter_id, parameters, value): reward = extract_scalar_reward(value) if self.optimize_mode is OptimizeMode.Maximize: reward = -reward if parameter_id not in self.total_data: raise RuntimeError('Received parameter_id not in total_data.') if self.first_one: self.smbo_solver.nni_smac_receive_first_run(self.total_data[parameter_id], reward) self.first_one = False else: self.smbo_solver.nni_smac_receive_runs(self.total_data[parameter_id], reward)
[ "receive_trial_result\n \n Parameters\n ----------\n parameter_id: int\n parameter id\n parameters:\n parameters\n value:\n value\n \n Raises\n ------\n RuntimeError\n Received parameter id not in total_data\n " ]
Please provide a description of the function:def convert_loguniform_categorical(self, challenger_dict): converted_dict = {} for key, value in challenger_dict.items(): # convert to loguniform if key in self.loguniform_key: converted_dict[key] = np.exp(challenger_dict[key]) # convert categorical back to original value elif key in self.categorical_dict: idx = challenger_dict[key] converted_dict[key] = self.categorical_dict[key][idx] else: converted_dict[key] = value return converted_dict
[ "Convert the values of type `loguniform` back to their initial range\n Also, we convert categorical:\n categorical values in search space are changed to list of numbers before,\n those original values will be changed back in this function\n \n Parameters\n ----------\n challenger_dict: dict\n challenger dict\n\n Returns\n -------\n dict\n dict which stores copy of challengers\n " ]
Please provide a description of the function:def generate_parameters(self, parameter_id): if self.first_one: init_challenger = self.smbo_solver.nni_smac_start() self.total_data[parameter_id] = init_challenger return self.convert_loguniform_categorical(init_challenger.get_dictionary()) else: challengers = self.smbo_solver.nni_smac_request_challengers() for challenger in challengers: self.total_data[parameter_id] = challenger return self.convert_loguniform_categorical(challenger.get_dictionary())
[ "generate one instance of hyperparameters\n \n Parameters\n ----------\n parameter_id: int\n parameter id\n \n Returns\n -------\n list\n new generated parameters\n " ]
Please provide a description of the function:def generate_multiple_parameters(self, parameter_id_list): if self.first_one: params = [] for one_id in parameter_id_list: init_challenger = self.smbo_solver.nni_smac_start() self.total_data[one_id] = init_challenger params.append(self.convert_loguniform_categorical(init_challenger.get_dictionary())) else: challengers = self.smbo_solver.nni_smac_request_challengers() cnt = 0 params = [] for challenger in challengers: if cnt >= len(parameter_id_list): break self.total_data[parameter_id_list[cnt]] = challenger params.append(self.convert_loguniform_categorical(challenger.get_dictionary())) cnt += 1 return params
[ "generate mutiple instances of hyperparameters\n \n Parameters\n ----------\n parameter_id_list: list\n list of parameter id\n \n Returns\n -------\n list\n list of new generated parameters\n " ]
Please provide a description of the function:def lovasz_grad(gt_sorted): p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts - gt_sorted.float().cumsum(0) union = gts + (1 - gt_sorted).float().cumsum(0) jaccard = 1. - intersection / union if p > 1: # cover 1-pixel case jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] return jaccard
[ "\n Computes gradient of the Lovasz extension w.r.t sorted errors\n See Alg. 1 in paper\n " ]
Please provide a description of the function:def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True): if not per_image: preds, labels = (preds,), (labels,) ious = [] for pred, label in zip(preds, labels): intersection = ((label == 1) & (pred == 1)).sum() union = ((label == 1) | ((pred == 1) & (label != ignore))).sum() if not union: iou = EMPTY else: iou = float(intersection) / union ious.append(iou) iou = mean(ious) # mean accross images if per_image return 100 * iou
[ "\n IoU for foreground class\n binary: 1 foreground, 0 background\n " ]
Please provide a description of the function:def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False): if not per_image: preds, labels = (preds,), (labels,) ious = [] for pred, label in zip(preds, labels): iou = [] for i in range(C): if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes) intersection = ((label == i) & (pred == i)).sum() union = ((label == i) | ((pred == i) & (label != ignore))).sum() if not union: iou.append(EMPTY) else: iou.append(float(intersection) / union) ious.append(iou) ious = map(mean, zip(*ious)) # mean accross images if per_image return 100 * np.array(ious)
[ "\n Array of IoU for each (non ignored) class\n " ]
Please provide a description of the function:def lovasz_hinge(logits, labels, per_image=True, ignore=None): if per_image: loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) for log, lab in zip(logits, labels)) else: loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) return loss
[ "\n Binary Lovasz hinge loss\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n per_image: compute the loss per image instead of per batch\n ignore: void class id\n " ]
Please provide a description of the function:def lovasz_hinge_flat(logits, labels): if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * Variable(signs)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.elu(errors_sorted)+1, Variable(grad)) #loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss
[ "\n Binary Lovasz hinge loss\n logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)\n labels: [P] Tensor, binary ground truth labels (0 or 1)\n ignore: label to ignore\n " ]
Please provide a description of the function:def flatten_binary_scores(scores, labels, ignore=None): scores = scores.view(-1) labels = labels.view(-1) if ignore is None: return scores, labels valid = (labels != ignore) vscores = scores[valid] vlabels = labels[valid] return vscores, vlabels
[ "\n Flattens predictions in the batch (binary case)\n Remove labels equal to 'ignore'\n " ]
Please provide a description of the function:def binary_xloss(logits, labels, ignore=None): logits, labels = flatten_binary_scores(logits, labels, ignore) loss = StableBCELoss()(logits, Variable(labels.float())) return loss
[ "\n Binary Cross entropy loss\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n ignore: void class id\n " ]
Please provide a description of the function:def lovasz_softmax(probas, labels, only_present=False, per_image=False, ignore=None): if per_image: loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), only_present=only_present) for prob, lab in zip(probas, labels)) else: loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), only_present=only_present) return loss
[ "\n Multi-class Lovasz-Softmax loss\n probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1)\n labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)\n only_present: average only on classes present in ground truth\n per_image: compute the loss per image instead of per batch\n ignore: void class labels\n " ]
Please provide a description of the function:def lovasz_softmax_flat(probas, labels, only_present=False): C = probas.size(1) losses = [] for c in range(C): fg = (labels == c).float() # foreground for class c if only_present and fg.sum() == 0: continue errors = (Variable(fg) - probas[:, c]).abs() errors_sorted, perm = torch.sort(errors, 0, descending=True) perm = perm.data fg_sorted = fg[perm] losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) return mean(losses)
[ "\n Multi-class Lovasz-Softmax loss\n probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)\n labels: [P] Tensor, ground truth labels (between 0 and C - 1)\n only_present: average only on classes present in ground truth\n " ]
Please provide a description of the function:def flatten_probas(probas, labels, ignore=None): B, C, H, W = probas.size() probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C labels = labels.view(-1) if ignore is None: return probas, labels valid = (labels != ignore) vprobas = probas[valid.nonzero().squeeze()] vlabels = labels[valid] return vprobas, vlabels
[ "\n Flattens predictions in the batch\n " ]
Please provide a description of the function:def xloss(logits, labels, ignore=None): return F.cross_entropy(logits, Variable(labels), ignore_index=255)
[ "\n Cross entropy loss\n " ]
Please provide a description of the function:def mean(l, ignore_nan=False, empty=0): l = iter(l) if ignore_nan: l = ifilterfalse(np.isnan, l) try: n = 1 acc = next(l) except StopIteration: if empty == 'raise': raise ValueError('Empty mean') return empty for n, v in enumerate(l, 2): acc += v if n == 1: return acc return acc / n
[ "\n nanmean compatible with generators.\n " ]
Please provide a description of the function:def main_loop(args): '''main loop logic for trial keeper''' if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) stdout_file = open(STDOUT_FULL_PATH, 'a+') stderr_file = open(STDERR_FULL_PATH, 'a+') trial_keeper_syslogger = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'trial_keeper', StdOutputType.Stdout, args.log_collection) # redirect trial keeper's stdout and stderr to syslog trial_syslogger_stdout = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'trial', StdOutputType.Stdout, args.log_collection) sys.stdout = sys.stderr = trial_keeper_syslogger # backward compatibility hdfs_host = None hdfs_output_dir = None if args.hdfs_host: hdfs_host = args.hdfs_host elif args.pai_hdfs_host: hdfs_host = args.pai_hdfs_host if args.hdfs_output_dir: hdfs_output_dir = args.hdfs_output_dir elif args.pai_hdfs_output_dir: hdfs_output_dir = args.pai_hdfs_output_dir if hdfs_host is not None and args.nni_hdfs_exp_dir is not None: try: if args.webhdfs_path: hdfs_client = HdfsClient(hosts='{0}:80'.format(hdfs_host), user_name=args.pai_user_name, webhdfs_path=args.webhdfs_path, timeout=5) else: # backward compatibility hdfs_client = HdfsClient(hosts='{0}:{1}'.format(hdfs_host, '50070'), user_name=args.pai_user_name, timeout=5) except Exception as e: nni_log(LogType.Error, 'Create HDFS client error: ' + str(e)) raise e copyHdfsDirectoryToLocal(args.nni_hdfs_exp_dir, os.getcwd(), hdfs_client) # Notice: We don't appoint env, which means subprocess wil inherit current environment and that is expected behavior log_pipe_stdout = trial_syslogger_stdout.get_pipelog_reader() process = Popen(args.trial_command, shell = True, stdout = log_pipe_stdout, stderr = log_pipe_stdout) nni_log(LogType.Info, 'Trial keeper spawns a subprocess (pid {0}) to run command: {1}'.format(process.pid, shlex.split(args.trial_command))) while True: retCode = process.poll() # child worker process exits and all stdout data is read if retCode is not None and log_pipe_stdout.set_process_exit() and log_pipe_stdout.is_read_completed == True: nni_log(LogType.Info, 'subprocess terminated. Exit code is {}. Quit'.format(retCode)) if hdfs_output_dir is not None: # Copy local directory to hdfs for OpenPAI nni_local_output_dir = os.environ['NNI_OUTPUT_DIR'] try: if copyDirectoryToHdfs(nni_local_output_dir, hdfs_output_dir, hdfs_client): nni_log(LogType.Info, 'copy directory from {0} to {1} success!'.format(nni_local_output_dir, hdfs_output_dir)) else: nni_log(LogType.Info, 'copy directory from {0} to {1} failed!'.format(nni_local_output_dir, hdfs_output_dir)) except Exception as e: nni_log(LogType.Error, 'HDFS copy directory got exception: ' + str(e)) raise e ## Exit as the retCode of subprocess(trial) exit(retCode) break time.sleep(2)
[]
Please provide a description of the function:def forward(self, x): '''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]''' N,C,H,W = x.size() g = self.groups return x.view(N,g,C/g,H,W).permute(0,2,1,3,4).contiguous().view(N,C,H,W)
[]
Please provide a description of the function:def load_embedding(path): ''' return embedding for a specific file by given file path. ''' EMBEDDING_DIM = 300 embedding_dict = {} with open(path, 'r', encoding='utf-8') as file: pairs = [line.strip('\r\n').split() for line in file.readlines()] for pair in pairs: if len(pair) == EMBEDDING_DIM + 1: embedding_dict[pair[0]] = [float(x) for x in pair[1:]] logger.debug('embedding_dict size: %d', len(embedding_dict)) return embedding_dict
[]
Please provide a description of the function:def generate_predict_json(position1_result, position2_result, ids, passage_tokens): ''' Generate json by prediction. ''' predict_len = len(position1_result) logger.debug('total prediction num is %s', str(predict_len)) answers = {} for i in range(predict_len): sample_id = ids[i] passage, tokens = passage_tokens[i] kbest = find_best_answer_span( position1_result[i], position2_result[i], len(tokens), 23) _, start, end = kbest[0] answer = passage[tokens[start]['char_begin']:tokens[end]['char_end']] answers[sample_id] = answer logger.debug('generate predict done.') return answers
[]
Please provide a description of the function:def generate_data(path, tokenizer, char_vcb, word_vcb, is_training=False): ''' Generate data ''' global root_path qp_pairs = data.load_from_file(path=path, is_training=is_training) tokenized_sent = 0 # qp_pairs = qp_pairs[:1000]1 for qp_pair in qp_pairs: tokenized_sent += 1 data.tokenize(qp_pair, tokenizer, is_training) for word in qp_pair['question_tokens']: word_vcb.add(word['word']) for char in word['word']: char_vcb.add(char) for word in qp_pair['passage_tokens']: word_vcb.add(word['word']) for char in word['word']: char_vcb.add(char) max_query_length = max(len(x['question_tokens']) for x in qp_pairs) max_passage_length = max(len(x['passage_tokens']) for x in qp_pairs) #min_passage_length = min(len(x['passage_tokens']) for x in qp_pairs) cfg.max_query_length = max_query_length cfg.max_passage_length = max_passage_length return qp_pairs
[]
Please provide a description of the function:def f1_score(prediction, ground_truth): ''' Calculate the f1 score. ''' prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1_result = (2 * precision * recall) / (precision + recall) return f1_result
[]
Please provide a description of the function:def _evaluate(dataset, predictions): ''' Evaluate function. ''' f1_result = exact_match = total = 0 count = 0 for article in dataset: for paragraph in article['paragraphs']: for qa_pair in paragraph['qas']: total += 1 if qa_pair['id'] not in predictions: count += 1 continue ground_truths = list(map(lambda x: x['text'], qa_pair['answers'])) prediction = predictions[qa_pair['id']] exact_match += metric_max_over_ground_truths( exact_match_score, prediction, ground_truths) f1_result += metric_max_over_ground_truths( f1_score, prediction, ground_truths) print('total', total, 'exact_match', exact_match, 'unanswer_question ', count) exact_match = 100.0 * exact_match / total f1_result = 100.0 * f1_result / total return {'exact_match': exact_match, 'f1': f1_result}
[]
Please provide a description of the function:def evaluate(data_file, pred_file): ''' Evaluate. ''' expected_version = '1.1' with open(data_file) as dataset_file: dataset_json = json.load(dataset_file) if dataset_json['version'] != expected_version: print('Evaluation expects v-' + expected_version + ', but got dataset with v-' + dataset_json['version'], file=sys.stderr) dataset = dataset_json['data'] with open(pred_file) as prediction_file: predictions = json.load(prediction_file) # print(json.dumps(evaluate(dataset, predictions))) result = _evaluate(dataset, predictions) # print('em:', result['exact_match'], 'f1:', result['f1']) return result['exact_match']
[]
Please provide a description of the function:def evaluate_with_predictions(data_file, predictions): ''' Evalutate with predictions/ ''' expected_version = '1.1' with open(data_file) as dataset_file: dataset_json = json.load(dataset_file) if dataset_json['version'] != expected_version: print('Evaluation expects v-' + expected_version + ', but got dataset with v-' + dataset_json['version'], file=sys.stderr) dataset = dataset_json['data'] result = _evaluate(dataset, predictions) return result['exact_match']
[]
Please provide a description of the function:def send(command, data): global _lock try: _lock.acquire() data = data.encode('utf8') assert len(data) < 1000000, 'Command too long' msg = b'%b%06d%b' % (command.value, len(data), data) logging.getLogger(__name__).debug('Sending command, data: [%s]' % msg) _out_file.write(msg) _out_file.flush() finally: _lock.release()
[ "Send command to Training Service.\n command: CommandType object.\n data: string payload.\n " ]
Please provide a description of the function:def receive(): header = _in_file.read(8) logging.getLogger(__name__).debug('Received command, header: [%s]' % header) if header is None or len(header) < 8: # Pipe EOF encountered logging.getLogger(__name__).debug('Pipe EOF encountered') return None, None length = int(header[2:]) data = _in_file.read(length) command = CommandType(header[:2]) data = data.decode('utf8') logging.getLogger(__name__).debug('Received command, data: [%s]' % data) return command, data
[ "Receive a command from Training Service.\n Returns a tuple of command (CommandType) and payload (str)\n " ]
Please provide a description of the function:def json2space(in_x, name=ROOT): out_y = copy.deepcopy(in_x) if isinstance(in_x, dict): if TYPE in in_x.keys(): _type = in_x[TYPE] name = name + '-' + _type _value = json2space(in_x[VALUE], name=name) if _type == 'choice': out_y = eval('hp.hp.'+_type)(name, _value) else: if _type in ['loguniform', 'qloguniform']: _value[:2] = np.log(_value[:2]) out_y = eval('hp.hp.' + _type)(name, *_value) else: out_y = dict() for key in in_x.keys(): out_y[key] = json2space(in_x[key], name+'[%s]' % str(key)) elif isinstance(in_x, list): out_y = list() for i, x_i in enumerate(in_x): out_y.append(json2space(x_i, name+'[%d]' % i)) else: logger.info('in_x is not a dict or a list in json2space fuinction %s', str(in_x)) return out_y
[ "\n Change json to search space in hyperopt.\n\n Parameters\n ----------\n in_x : dict/list/str/int/float\n The part of json.\n name : str\n name could be ROOT, TYPE, VALUE or INDEX.\n " ]
Please provide a description of the function:def json2parameter(in_x, parameter, name=ROOT): out_y = copy.deepcopy(in_x) if isinstance(in_x, dict): if TYPE in in_x.keys(): _type = in_x[TYPE] name = name + '-' + _type if _type == 'choice': _index = parameter[name] out_y = { INDEX: _index, VALUE: json2parameter(in_x[VALUE][_index], parameter, name=name+'[%d]' % _index) } else: out_y = parameter[name] else: out_y = dict() for key in in_x.keys(): out_y[key] = json2parameter( in_x[key], parameter, name + '[%s]' % str(key)) elif isinstance(in_x, list): out_y = list() for i, x_i in enumerate(in_x): out_y.append(json2parameter(x_i, parameter, name + '[%d]' % i)) else: logger.info('in_x is not a dict or a list in json2space fuinction %s', str(in_x)) return out_y
[ "\n Change json to parameters.\n " ]
Please provide a description of the function:def _add_index(in_x, parameter): if TYPE not in in_x: # if at the top level out_y = dict() for key, value in parameter.items(): out_y[key] = _add_index(in_x[key], value) return out_y elif isinstance(in_x, dict): value_type = in_x[TYPE] value_format = in_x[VALUE] if value_type == "choice": choice_name = parameter[0] if isinstance(parameter, list) else parameter for pos, item in enumerate(value_format): # here value_format is a list if isinstance(item, list): # this format is ["choice_key", format_dict] choice_key = item[0] choice_value_format = item[1] if choice_key == choice_name: return {INDEX: pos, VALUE: [choice_name, _add_index(choice_value_format, parameter[1])]} elif choice_name == item: return {INDEX: pos, VALUE: item} else: return parameter
[ "\n change parameters in NNI format to parameters in hyperopt format(This function also support nested dict.).\n For example, receive parameters like:\n {'dropout_rate': 0.8, 'conv_size': 3, 'hidden_size': 512}\n Will change to format in hyperopt, like:\n {'dropout_rate': 0.8, 'conv_size': {'_index': 1, '_value': 3}, 'hidden_size': {'_index': 1, '_value': 512}}\n " ]
Please provide a description of the function:def _split_index(params): if isinstance(params, list): return [params[0], _split_index(params[1])] elif isinstance(params, dict): if INDEX in params.keys(): return _split_index(params[VALUE]) result = dict() for key in params: result[key] = _split_index(params[key]) return result else: return params
[ "\n Delete index infromation from params\n " ]
Please provide a description of the function:def _choose_tuner(self, algorithm_name): if algorithm_name == 'tpe': return hp.tpe.suggest if algorithm_name == 'random_search': return hp.rand.suggest if algorithm_name == 'anneal': return hp.anneal.suggest raise RuntimeError('Not support tuner algorithm in hyperopt.')
[ "\n Parameters\n ----------\n algorithm_name : str\n algorithm_name includes \"tpe\", \"random_search\" and anneal\"\n " ]
Please provide a description of the function:def update_search_space(self, search_space): self.json = search_space search_space_instance = json2space(self.json) rstate = np.random.RandomState() trials = hp.Trials() domain = hp.Domain(None, search_space_instance, pass_expr_memo_ctrl=None) algorithm = self._choose_tuner(self.algorithm_name) self.rval = hp.FMinIter(algorithm, domain, trials, max_evals=-1, rstate=rstate, verbose=0) self.rval.catch_eval_exceptions = False
[ "\n Update search space definition in tuner by search_space in parameters.\n\n Will called when first setup experiemnt or update search space in WebUI.\n\n Parameters\n ----------\n search_space : dict\n " ]
Please provide a description of the function:def generate_parameters(self, parameter_id): total_params = self.get_suggestion(random_search=False) # avoid generating same parameter with concurrent trials because hyperopt doesn't support parallel mode if total_params in self.total_data.values(): # but it can cause deplicate parameter rarely total_params = self.get_suggestion(random_search=True) self.total_data[parameter_id] = total_params params = _split_index(total_params) return params
[ "\n Returns a set of trial (hyper-)parameters, as a serializable object.\n\n Parameters\n ----------\n parameter_id : int\n\n Returns\n -------\n params : dict\n " ]
Please provide a description of the function:def receive_trial_result(self, parameter_id, parameters, value): reward = extract_scalar_reward(value) # restore the paramsters contains '_index' if parameter_id not in self.total_data: raise RuntimeError('Received parameter_id not in total_data.') params = self.total_data[parameter_id] if self.optimize_mode is OptimizeMode.Maximize: reward = -reward rval = self.rval domain = rval.domain trials = rval.trials new_id = len(trials) rval_specs = [None] rval_results = [domain.new_result()] rval_miscs = [dict(tid=new_id, cmd=domain.cmd, workdir=domain.workdir)] vals = params idxs = dict() out_y = dict() json2vals(self.json, vals, out_y) vals = out_y for key in domain.params: if key in [VALUE, INDEX]: continue if key not in vals or vals[key] is None or vals[key] == []: idxs[key] = vals[key] = [] else: idxs[key] = [new_id] vals[key] = [vals[key]] self.miscs_update_idxs_vals(rval_miscs, idxs, vals, idxs_map={new_id: new_id}, assert_all_vals_used=False) trial = trials.new_trial_docs([new_id], rval_specs, rval_results, rval_miscs)[0] trial['result'] = {'loss': reward, 'status': 'ok'} trial['state'] = hp.JOB_STATE_DONE trials.insert_trial_docs([trial]) trials.refresh()
[ "\n Record an observation of the objective function\n\n Parameters\n ----------\n parameter_id : int\n parameters : dict\n value : dict/float\n if value is dict, it should have \"default\" key.\n value is final metrics of the trial.\n " ]
Please provide a description of the function:def miscs_update_idxs_vals(self, miscs, idxs, vals, assert_all_vals_used=True, idxs_map=None): if idxs_map is None: idxs_map = {} assert set(idxs.keys()) == set(vals.keys()) misc_by_id = {m['tid']: m for m in miscs} for m in miscs: m['idxs'] = dict([(key, []) for key in idxs]) m['vals'] = dict([(key, []) for key in idxs]) for key in idxs: assert len(idxs[key]) == len(vals[key]) for tid, val in zip(idxs[key], vals[key]): tid = idxs_map.get(tid, tid) if assert_all_vals_used or tid in misc_by_id: misc_by_id[tid]['idxs'][key] = [tid] misc_by_id[tid]['vals'][key] = [val]
[ "\n Unpack the idxs-vals format into the list of dictionaries that is\n `misc`.\n\n Parameters\n ----------\n idxs_map : dict\n idxs_map is a dictionary of id->id mappings so that the misc['idxs'] can\n contain different numbers than the idxs argument.\n " ]
Please provide a description of the function:def get_suggestion(self, random_search=False): rval = self.rval trials = rval.trials algorithm = rval.algo new_ids = rval.trials.new_trial_ids(1) rval.trials.refresh() random_state = rval.rstate.randint(2**31-1) if random_search: new_trials = hp.rand.suggest(new_ids, rval.domain, trials, random_state) else: new_trials = algorithm(new_ids, rval.domain, trials, random_state) rval.trials.refresh() vals = new_trials[0]['misc']['vals'] parameter = dict() for key in vals: try: parameter[key] = vals[key][0].item() except (KeyError, IndexError): parameter[key] = None # remove '_index' from json2parameter and save params-id total_params = json2parameter(self.json, parameter) return total_params
[ "get suggestion from hyperopt\n\n Parameters\n ----------\n random_search : bool\n flag to indicate random search or not (default: {False})\n\n Returns\n ----------\n total_params : dict\n parameter suggestion\n " ]
Please provide a description of the function:def import_data(self, data): _completed_num = 0 for trial_info in data: logger.info("Importing data, current processing progress %s / %s" %(_completed_num, len(data))) _completed_num += 1 if self.algorithm_name == 'random_search': return assert "parameter" in trial_info _params = trial_info["parameter"] assert "value" in trial_info _value = trial_info['value'] if not _value: logger.info("Useless trial data, value is %s, skip this trial data." %_value) continue self.supplement_data_num += 1 _parameter_id = '_'.join(["ImportData", str(self.supplement_data_num)]) self.total_data[_parameter_id] = _add_index(in_x=self.json, parameter=_params) self.receive_trial_result(parameter_id=_parameter_id, parameters=_params, value=_value) logger.info("Successfully import data to TPE/Anneal tuner.")
[ "Import additional data for tuning\n\n Parameters\n ----------\n data:\n a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'\n " ]
Please provide a description of the function:def next_hyperparameter_lowest_mu(fun_prediction, fun_prediction_args, x_bounds, x_types, minimize_starting_points, minimize_constraints_fun=None): ''' "Lowest Mu" acquisition function ''' best_x = None best_acquisition_value = None x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds] x_bounds_minmax = numpy.array(x_bounds_minmax) for starting_point in numpy.array(minimize_starting_points): res = minimize(fun=_lowest_mu, x0=starting_point.reshape(1, -1), bounds=x_bounds_minmax, method="L-BFGS-B", args=(fun_prediction, fun_prediction_args, \ x_bounds, x_types, minimize_constraints_fun)) if (best_acquisition_value is None) or (res.fun < best_acquisition_value): res.x = numpy.ndarray.tolist(res.x) res.x = lib_data.match_val_type(res.x, x_bounds, x_types) if (minimize_constraints_fun is None) or (minimize_constraints_fun(res.x) is True): best_acquisition_value = res.fun best_x = res.x outputs = None if best_x is not None: mu, sigma = fun_prediction(best_x, *fun_prediction_args) outputs = {'hyperparameter': best_x, 'expected_mu': mu, 'expected_sigma': sigma, 'acquisition_func': "lm"} return outputs
[]
Please provide a description of the function:def _lowest_mu(x, fun_prediction, fun_prediction_args, x_bounds, x_types, minimize_constraints_fun): ''' Calculate the lowest mu ''' # This is only for step-wise optimization x = lib_data.match_val_type(x, x_bounds, x_types) mu = sys.maxsize if (minimize_constraints_fun is None) or (minimize_constraints_fun(x) is True): mu, _ = fun_prediction(x, *fun_prediction_args) return mu
[]
Please provide a description of the function:def build_char_states(self, char_embed, is_training, reuse, char_ids, char_lengths): max_char_length = self.cfg.max_char_length inputs = dropout(tf.nn.embedding_lookup(char_embed, char_ids), self.cfg.dropout, is_training) inputs = tf.reshape( inputs, shape=[max_char_length, -1, self.cfg.char_embed_dim]) char_lengths = tf.reshape(char_lengths, shape=[-1]) with tf.variable_scope('char_encoding', reuse=reuse): cell_fw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) cell_bw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) _, (left_right, right_left) = tf.nn.bidirectional_dynamic_rnn( cell_fw=cell_fw, cell_bw=cell_bw, sequence_length=char_lengths, inputs=inputs, time_major=True, dtype=tf.float32 ) left_right = tf.reshape(left_right, shape=[-1, self.cfg.char_embed_dim]) right_left = tf.reshape(right_left, shape=[-1, self.cfg.char_embed_dim]) states = tf.concat([left_right, right_left], axis=1) out_shape = tf.shape(char_ids)[1:3] out_shape = tf.concat([out_shape, tf.constant( value=[self.cfg.char_embed_dim * 2], dtype=tf.int32)], axis=0) return tf.reshape(states, shape=out_shape)
[ "Build char embedding network for the QA model." ]
Please provide a description of the function:def handle_report_metric_data(self, data): if data['type'] == 'FINAL': self._handle_final_metric_data(data) elif data['type'] == 'PERIODICAL': if self.assessor is not None: self._handle_intermediate_metric_data(data) else: pass else: raise ValueError('Data type not supported: {}'.format(data['type']))
[ "\n data: a dict received from nni_manager, which contains:\n - 'parameter_id': id of the trial\n - 'value': metric value reported by nni.report_final_result()\n - 'type': report type, support {'FINAL', 'PERIODICAL'}\n " ]
Please provide a description of the function:def handle_trial_end(self, data): trial_job_id = data['trial_job_id'] _ended_trials.add(trial_job_id) if trial_job_id in _trial_history: _trial_history.pop(trial_job_id) if self.assessor is not None: self.assessor.trial_end(trial_job_id, data['event'] == 'SUCCEEDED') if self.tuner is not None: self.tuner.trial_end(json_tricks.loads(data['hyper_params'])['parameter_id'], data['event'] == 'SUCCEEDED')
[ "\n data: it has three keys: trial_job_id, event, hyper_params\n - trial_job_id: the id generated by training service\n - event: the job's state\n - hyper_params: the hyperparameters generated and returned by tuner\n " ]
Please provide a description of the function:def _handle_final_metric_data(self, data): id_ = data['parameter_id'] value = data['value'] if id_ in _customized_parameter_ids: self.tuner.receive_customized_trial_result(id_, _trial_params[id_], value) else: self.tuner.receive_trial_result(id_, _trial_params[id_], value)
[ "Call tuner to process final results\n " ]
Please provide a description of the function:def _handle_intermediate_metric_data(self, data): if data['type'] != 'PERIODICAL': return if self.assessor is None: return trial_job_id = data['trial_job_id'] if trial_job_id in _ended_trials: return history = _trial_history[trial_job_id] history[data['sequence']] = data['value'] ordered_history = _sort_history(history) if len(ordered_history) < data['sequence']: # no user-visible update since last time return try: result = self.assessor.assess_trial(trial_job_id, ordered_history) except Exception as e: _logger.exception('Assessor error') if isinstance(result, bool): result = AssessResult.Good if result else AssessResult.Bad elif not isinstance(result, AssessResult): msg = 'Result of Assessor.assess_trial must be an object of AssessResult, not %s' raise RuntimeError(msg % type(result)) if result is AssessResult.Bad: _logger.debug('BAD, kill %s', trial_job_id) send(CommandType.KillTrialJob, json_tricks.dumps(trial_job_id)) # notify tuner _logger.debug('env var: NNI_INCLUDE_INTERMEDIATE_RESULTS: [%s]', dispatcher_env_vars.NNI_INCLUDE_INTERMEDIATE_RESULTS) if dispatcher_env_vars.NNI_INCLUDE_INTERMEDIATE_RESULTS == 'true': self._earlystop_notify_tuner(data) else: _logger.debug('GOOD')
[ "Call assessor to process intermediate results\n " ]
Please provide a description of the function:def _earlystop_notify_tuner(self, data): _logger.debug('Early stop notify tuner data: [%s]', data) data['type'] = 'FINAL' if multi_thread_enabled(): self._handle_final_metric_data(data) else: self.enqueue_command(CommandType.ReportMetricData, data)
[ "Send last intermediate result as final result to tuner in case the\n trial is early stopped.\n " ]