Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def set(self) -> None:
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(None)
|
[
"Set the internal flag to ``True``. All waiters are awakened.\n\n Calling `.wait` once the flag is set will not block.\n "
] |
Please provide a description of the function:def wait(self, timeout: Union[float, datetime.timedelta] = None) -> Awaitable[None]:
fut = Future() # type: Future[None]
if self._value:
fut.set_result(None)
return fut
self._waiters.add(fut)
fut.add_done_callback(lambda fut: self._waiters.remove(fut))
if timeout is None:
return fut
else:
timeout_fut = gen.with_timeout(
timeout, fut, quiet_exceptions=(CancelledError,)
)
# This is a slightly clumsy workaround for the fact that
# gen.with_timeout doesn't cancel its futures. Cancelling
# fut will remove it from the waiters list.
timeout_fut.add_done_callback(
lambda tf: fut.cancel() if not fut.done() else None
)
return timeout_fut
|
[
"Block until the internal flag is true.\n\n Returns an awaitable, which raises `tornado.util.TimeoutError` after a\n timeout.\n "
] |
Please provide a description of the function:def release(self) -> None:
self._value += 1
while self._waiters:
waiter = self._waiters.popleft()
if not waiter.done():
self._value -= 1
# If the waiter is a coroutine paused at
#
# with (yield semaphore.acquire()):
#
# then the context manager's __exit__ calls release() at the end
# of the "with" block.
waiter.set_result(_ReleasingContextManager(self))
break
|
[
"Increment the counter and wake one waiter."
] |
Please provide a description of the function:def acquire(
self, timeout: Union[float, datetime.timedelta] = None
) -> Awaitable[_ReleasingContextManager]:
waiter = Future() # type: Future[_ReleasingContextManager]
if self._value > 0:
self._value -= 1
waiter.set_result(_ReleasingContextManager(self))
else:
self._waiters.append(waiter)
if timeout:
def on_timeout() -> None:
if not waiter.done():
waiter.set_exception(gen.TimeoutError())
self._garbage_collect()
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
waiter.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle)
)
return waiter
|
[
"Decrement the counter. Returns an awaitable.\n\n Block if the counter is zero and wait for a `.release`. The awaitable\n raises `.TimeoutError` after the deadline.\n "
] |
Please provide a description of the function:def release(self) -> None:
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
super(BoundedSemaphore, self).release()
|
[
"Increment the counter and wake one waiter."
] |
Please provide a description of the function:def acquire(
self, timeout: Union[float, datetime.timedelta] = None
) -> Awaitable[_ReleasingContextManager]:
return self._block.acquire(timeout)
|
[
"Attempt to lock. Returns an awaitable.\n\n Returns an awaitable, which raises `tornado.util.TimeoutError` after a\n timeout.\n "
] |
Please provide a description of the function:def read_response(self, delegate: httputil.HTTPMessageDelegate) -> Awaitable[bool]:
if self.params.decompress:
delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
return self._read_message(delegate)
|
[
"Read a single HTTP response.\n\n Typical client-mode usage is to write a request using `write_headers`,\n `write`, and `finish`, and then call ``read_response``.\n\n :arg delegate: a `.HTTPMessageDelegate`\n\n Returns a `.Future` that resolves to a bool after the full response has\n been read. The result is true if the stream is still open.\n "
] |
Please provide a description of the function:def _clear_callbacks(self) -> None:
self._write_callback = None
self._write_future = None # type: Optional[Future[None]]
self._close_callback = None # type: Optional[Callable[[], None]]
if self.stream is not None:
self.stream.set_close_callback(None)
|
[
"Clears the callback attributes.\n\n This allows the request handler to be garbage collected more\n quickly in CPython by breaking up reference cycles.\n "
] |
Please provide a description of the function:def detach(self) -> iostream.IOStream:
self._clear_callbacks()
stream = self.stream
self.stream = None # type: ignore
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
return stream
|
[
"Take control of the underlying stream.\n\n Returns the underlying `.IOStream` object and stops all further\n HTTP processing. May only be called during\n `.HTTPMessageDelegate.headers_received`. Intended for implementing\n protocols like websockets that tunnel over an HTTP handshake.\n "
] |
Please provide a description of the function:def write_headers(
self,
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
headers: httputil.HTTPHeaders,
chunk: bytes = None,
) -> "Future[None]":
lines = []
if self.is_client:
assert isinstance(start_line, httputil.RequestStartLine)
self._request_start_line = start_line
lines.append(utf8("%s %s HTTP/1.1" % (start_line[0], start_line[1])))
# Client requests with a non-empty body must have either a
# Content-Length or a Transfer-Encoding.
self._chunking_output = (
start_line.method in ("POST", "PUT", "PATCH")
and "Content-Length" not in headers
and (
"Transfer-Encoding" not in headers
or headers["Transfer-Encoding"] == "chunked"
)
)
else:
assert isinstance(start_line, httputil.ResponseStartLine)
assert self._request_start_line is not None
assert self._request_headers is not None
self._response_start_line = start_line
lines.append(utf8("HTTP/1.1 %d %s" % (start_line[1], start_line[2])))
self._chunking_output = (
# TODO: should this use
# self._request_start_line.version or
# start_line.version?
self._request_start_line.version == "HTTP/1.1"
# 1xx, 204 and 304 responses have no body (not even a zero-length
# body), and so should not have either Content-Length or
# Transfer-Encoding headers.
and start_line.code not in (204, 304)
and (start_line.code < 100 or start_line.code >= 200)
# No need to chunk the output if a Content-Length is specified.
and "Content-Length" not in headers
# Applications are discouraged from touching Transfer-Encoding,
# but if they do, leave it alone.
and "Transfer-Encoding" not in headers
)
# If connection to a 1.1 client will be closed, inform client
if (
self._request_start_line.version == "HTTP/1.1"
and self._disconnect_on_finish
):
headers["Connection"] = "close"
# If a 1.0 client asked for keep-alive, add the header.
if (
self._request_start_line.version == "HTTP/1.0"
and self._request_headers.get("Connection", "").lower() == "keep-alive"
):
headers["Connection"] = "Keep-Alive"
if self._chunking_output:
headers["Transfer-Encoding"] = "chunked"
if not self.is_client and (
self._request_start_line.method == "HEAD"
or cast(httputil.ResponseStartLine, start_line).code == 304
):
self._expected_content_remaining = 0
elif "Content-Length" in headers:
self._expected_content_remaining = int(headers["Content-Length"])
else:
self._expected_content_remaining = None
# TODO: headers are supposed to be of type str, but we still have some
# cases that let bytes slip through. Remove these native_str calls when those
# are fixed.
header_lines = (
native_str(n) + ": " + native_str(v) for n, v in headers.get_all()
)
lines.extend(l.encode("latin1") for l in header_lines)
for line in lines:
if b"\n" in line:
raise ValueError("Newline in header: " + repr(line))
future = None
if self.stream.closed():
future = self._write_future = Future()
future.set_exception(iostream.StreamClosedError())
future.exception()
else:
future = self._write_future = Future()
data = b"\r\n".join(lines) + b"\r\n\r\n"
if chunk:
data += self._format_chunk(chunk)
self._pending_write = self.stream.write(data)
future_add_done_callback(self._pending_write, self._on_write_complete)
return future
|
[
"Implements `.HTTPConnection.write_headers`."
] |
Please provide a description of the function:def write(self, chunk: bytes) -> "Future[None]":
future = None
if self.stream.closed():
future = self._write_future = Future()
self._write_future.set_exception(iostream.StreamClosedError())
self._write_future.exception()
else:
future = self._write_future = Future()
self._pending_write = self.stream.write(self._format_chunk(chunk))
future_add_done_callback(self._pending_write, self._on_write_complete)
return future
|
[
"Implements `.HTTPConnection.write`.\n\n For backwards compatibility it is allowed but deprecated to\n skip `write_headers` and instead call `write()` with a\n pre-encoded header block.\n "
] |
Please provide a description of the function:def finish(self) -> None:
if (
self._expected_content_remaining is not None
and self._expected_content_remaining != 0
and not self.stream.closed()
):
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length"
% self._expected_content_remaining
)
if self._chunking_output:
if not self.stream.closed():
self._pending_write = self.stream.write(b"0\r\n\r\n")
self._pending_write.add_done_callback(self._on_write_complete)
self._write_finished = True
# If the app finished the request while we're still reading,
# divert any remaining data away from the delegate and
# close the connection when we're done sending our response.
# Closing the connection is the only way to avoid reading the
# whole input body.
if not self._read_finished:
self._disconnect_on_finish = True
# No more data is coming, so instruct TCP to send any remaining
# data immediately instead of waiting for a full packet or ack.
self.stream.set_nodelay(True)
if self._pending_write is None:
self._finish_request(None)
else:
future_add_done_callback(self._pending_write, self._finish_request)
|
[
"Implements `.HTTPConnection.finish`."
] |
Please provide a description of the function:async def close(self) -> None:
self.stream.close()
# Block until the serving loop is done, but ignore any exceptions
# (start_serving is already responsible for logging them).
assert self._serving_future is not None
try:
await self._serving_future
except Exception:
pass
|
[
"Closes the connection.\n\n Returns a `.Future` that resolves after the serving loop has exited.\n "
] |
Please provide a description of the function:def start_serving(self, delegate: httputil.HTTPServerConnectionDelegate) -> None:
assert isinstance(delegate, httputil.HTTPServerConnectionDelegate)
fut = gen.convert_yielded(self._server_request_loop(delegate))
self._serving_future = fut
# Register the future on the IOLoop so its errors get logged.
self.stream.io_loop.add_future(fut, lambda f: f.result())
|
[
"Starts serving requests on this connection.\n\n :arg delegate: a `.HTTPServerConnectionDelegate`\n "
] |
Please provide a description of the function:def websocket_connect(
url: Union[str, httpclient.HTTPRequest],
callback: Callable[["Future[WebSocketClientConnection]"], None] = None,
connect_timeout: float = None,
on_message_callback: Callable[[Union[None, str, bytes]], None] = None,
compression_options: Dict[str, Any] = None,
ping_interval: float = None,
ping_timeout: float = None,
max_message_size: int = _default_max_message_size,
subprotocols: List[str] = None,
) -> "Awaitable[WebSocketClientConnection]":
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = cast(
httpclient.HTTPRequest,
httpclient._RequestProxy(request, httpclient.HTTPRequest._DEFAULTS),
)
conn = WebSocketClientConnection(
request,
on_message_callback=on_message_callback,
compression_options=compression_options,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size,
subprotocols=subprotocols,
)
if callback is not None:
IOLoop.current().add_future(conn.connect_future, callback)
return conn.connect_future
|
[
"Client-side websocket support.\n\n Takes a url and returns a Future whose result is a\n `WebSocketClientConnection`.\n\n ``compression_options`` is interpreted in the same way as the\n return value of `.WebSocketHandler.get_compression_options`.\n\n The connection supports two styles of operation. In the coroutine\n style, the application typically calls\n `~.WebSocketClientConnection.read_message` in a loop::\n\n conn = yield websocket_connect(url)\n while True:\n msg = yield conn.read_message()\n if msg is None: break\n # Do something with msg\n\n In the callback style, pass an ``on_message_callback`` to\n ``websocket_connect``. In both styles, a message of ``None``\n indicates that the connection has been closed.\n\n ``subprotocols`` may be a list of strings specifying proposed\n subprotocols. The selected protocol may be found on the\n ``selected_subprotocol`` attribute of the connection object\n when the connection is complete.\n\n .. versionchanged:: 3.2\n Also accepts ``HTTPRequest`` objects in place of urls.\n\n .. versionchanged:: 4.1\n Added ``compression_options`` and ``on_message_callback``.\n\n .. versionchanged:: 4.5\n Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``\n arguments, which have the same meaning as in `WebSocketHandler`.\n\n .. versionchanged:: 5.0\n The ``io_loop`` argument (deprecated since version 4.1) has been removed.\n\n .. versionchanged:: 5.1\n Added the ``subprotocols`` argument.\n "
] |
Please provide a description of the function:def write_message(
self, message: Union[bytes, str, Dict[str, Any]], binary: bool = False
) -> "Future[None]":
if self.ws_connection is None or self.ws_connection.is_closing():
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
|
[
"Sends the given message to the client of this Web Socket.\n\n The message may be either a string or a dict (which will be\n encoded as json). If the ``binary`` argument is false, the\n message will be sent as utf8; in binary mode any byte string\n is allowed.\n\n If the connection is already closed, raises `WebSocketClosedError`.\n Returns a `.Future` which can be used for flow control.\n\n .. versionchanged:: 3.2\n `WebSocketClosedError` was added (previously a closed connection\n would raise an `AttributeError`)\n\n .. versionchanged:: 4.3\n Returns a `.Future` which can be used for flow control.\n\n .. versionchanged:: 5.0\n Consistently raises `WebSocketClosedError`. Previously could\n sometimes raise `.StreamClosedError`.\n "
] |
Please provide a description of the function:def ping(self, data: Union[str, bytes] = b"") -> None:
data = utf8(data)
if self.ws_connection is None or self.ws_connection.is_closing():
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
|
[
"Send ping frame to the remote end.\n\n The data argument allows a small amount of data (up to 125\n bytes) to be sent as a part of the ping message. Note that not\n all websocket implementations expose this data to\n applications.\n\n Consider using the ``websocket_ping_interval`` application\n setting instead of sending pings manually.\n\n .. versionchanged:: 5.1\n\n The data argument is now optional.\n\n "
] |
Please provide a description of the function:def close(self, code: int = None, reason: str = None) -> None:
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
|
[
"Closes this Web Socket.\n\n Once the close handshake is successful the socket will be closed.\n\n ``code`` may be a numeric status code, taken from the values\n defined in `RFC 6455 section 7.4.1\n <https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.\n ``reason`` may be a textual message about why the connection is\n closing. These values are made available to the client, but are\n not otherwise interpreted by the websocket protocol.\n\n .. versionchanged:: 4.0\n\n Added the ``code`` and ``reason`` arguments.\n "
] |
Please provide a description of the function:def check_origin(self, origin: str) -> bool:
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
|
[
"Override to enable support for allowing alternate origins.\n\n The ``origin`` argument is the value of the ``Origin`` HTTP\n header, the url responsible for initiating this request. This\n method is not called for clients that do not send this header;\n such requests are always allowed (because all browsers that\n implement WebSockets support this header, and non-browser\n clients do not have the same cross-site security concerns).\n\n Should return ``True`` to accept the request or ``False`` to\n reject it. By default, rejects all requests with an origin on\n a host other than this one.\n\n This is a security protection against cross site scripting attacks on\n browsers, since WebSockets are allowed to bypass the usual same-origin\n policies and don't use CORS headers.\n\n .. warning::\n\n This is an important security measure; don't disable it\n without understanding the security implications. In\n particular, if your authentication is cookie-based, you\n must either restrict the origins allowed by\n ``check_origin()`` or implement your own XSRF-like\n protection for websocket connections. See `these\n <https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_\n `articles\n <https://devcenter.heroku.com/articles/websocket-security>`_\n for more.\n\n To accept all cross-origin traffic (which was the default prior to\n Tornado 4.0), simply override this method to always return ``True``::\n\n def check_origin(self, origin):\n return True\n\n To allow connections from any subdomain of your site, you might\n do something like::\n\n def check_origin(self, origin):\n parsed_origin = urllib.parse.urlparse(origin)\n return parsed_origin.netloc.endswith(\".mydomain.com\")\n\n .. versionadded:: 4.0\n\n "
] |
Please provide a description of the function:def set_nodelay(self, value: bool) -> None:
assert self.ws_connection is not None
self.ws_connection.set_nodelay(value)
|
[
"Set the no-delay flag for this stream.\n\n By default, small messages may be delayed and/or combined to minimize\n the number of packets sent. This can sometimes cause 200-500ms delays\n due to the interaction between Nagle's algorithm and TCP delayed\n ACKs. To reduce this delay (at the expense of possibly increasing\n bandwidth usage), call ``self.set_nodelay(True)`` once the websocket\n connection is established.\n\n See `.BaseIOStream.set_nodelay` for additional details.\n\n .. versionadded:: 3.1\n "
] |
Please provide a description of the function:def _run_callback(
self, callback: Callable, *args: Any, **kwargs: Any
) -> "Optional[Future[Any]]":
try:
result = callback(*args, **kwargs)
except Exception:
self.handler.log_exception(*sys.exc_info())
self._abort()
return None
else:
if result is not None:
result = gen.convert_yielded(result)
assert self.stream is not None
self.stream.io_loop.add_future(result, lambda f: f.result())
return result
|
[
"Runs the given callback with exception handling.\n\n If the callback is a coroutine, returns its Future. On error, aborts the\n websocket connection and returns None.\n "
] |
Please provide a description of the function:def _abort(self) -> None:
self.client_terminated = True
self.server_terminated = True
if self.stream is not None:
self.stream.close() # forcibly tear down the connection
self.close()
|
[
"Instantly aborts the WebSocket connection by closing the socket"
] |
Please provide a description of the function:def _handle_websocket_headers(self, handler: WebSocketHandler) -> None:
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: handler.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
|
[
"Verifies all invariant- and required headers\n\n If a header is missing or have an incorrect value ValueError will be\n raised\n "
] |
Please provide a description of the function:def compute_accept_value(key: Union[str, bytes]) -> str:
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
|
[
"Computes the value for the Sec-WebSocket-Accept header,\n given the value for Sec-WebSocket-Key.\n "
] |
Please provide a description of the function:def _process_server_headers(
self, key: Union[str, bytes], headers: httputil.HTTPHeaders
) -> None:
assert headers["Upgrade"].lower() == "websocket"
assert headers["Connection"].lower() == "upgrade"
accept = self.compute_accept_value(key)
assert headers["Sec-Websocket-Accept"] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if ext[0] == "permessage-deflate" and self._compression_options is not None:
self._create_compressors("client", ext[1])
else:
raise ValueError("unsupported extension %r", ext)
self.selected_subprotocol = headers.get("Sec-WebSocket-Protocol", None)
|
[
"Process the headers sent by the server to this client connection.\n\n 'key' is the websocket handshake challenge/response key.\n "
] |
Please provide a description of the function:def _get_compressor_options(
self,
side: str,
agreed_parameters: Dict[str, Any],
compression_options: Dict[str, Any] = None,
) -> Dict[str, Any]:
options = dict(
persistent=(side + "_no_context_takeover") not in agreed_parameters
) # type: Dict[str, Any]
wbits_header = agreed_parameters.get(side + "_max_window_bits", None)
if wbits_header is None:
options["max_wbits"] = zlib.MAX_WBITS
else:
options["max_wbits"] = int(wbits_header)
options["compression_options"] = compression_options
return options
|
[
"Converts a websocket agreed_parameters set to keyword arguments\n for our compressor objects.\n "
] |
Please provide a description of the function:def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
# For historical reasons, write methods in Tornado operate in a semi-synchronous
# mode in which awaiting the Future they return is optional (But errors can
# still be raised). This requires us to go through an awkward dance here
# to transform the errors that may be returned while presenting the same
# semi-synchronous interface.
try:
fut = self._write_frame(True, opcode, message, flags=flags)
except StreamClosedError:
raise WebSocketClosedError()
async def wrapper() -> None:
try:
await fut
except StreamClosedError:
raise WebSocketClosedError()
return asyncio.ensure_future(wrapper())
|
[
"Sends the given message to the client of this Web Socket."
] |
Please provide a description of the function:def write_ping(self, data: bytes) -> None:
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
|
[
"Send ping frame."
] |
Please provide a description of the function:def _handle_message(self, opcode: int, data: bytes) -> "Optional[Future[None]]":
if self.client_terminated:
return None
if self._frame_compressed:
assert self._decompressor is not None
try:
data = self._decompressor.decompress(data)
except _DecompressTooLargeError:
self.close(1009, "message too big after decompression")
self._abort()
return None
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return None
return self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
return self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.close_code = struct.unpack(">H", data[:2])[0]
if len(data) > 2:
self.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.close_code)
elif opcode == 0x9:
# Ping
try:
self._write_frame(True, 0xA, data)
except StreamClosedError:
self._abort()
self._run_callback(self.handler.on_ping, data)
elif opcode == 0xA:
# Pong
self.last_pong = IOLoop.current().time()
return self._run_callback(self.handler.on_pong, data)
else:
self._abort()
return None
|
[
"Execute on_message, returning its Future if it is a coroutine."
] |
Please provide a description of the function:def close(self, code: int = None, reason: str = None) -> None:
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b""
else:
close_data = struct.pack(">H", code)
if reason is not None:
close_data += utf8(reason)
try:
self._write_frame(True, 0x8, close_data)
except StreamClosedError:
self._abort()
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort
)
|
[
"Closes the WebSocket connection."
] |
Please provide a description of the function:def is_closing(self) -> bool:
return self.stream.closed() or self.client_terminated or self.server_terminated
|
[
"Return ``True`` if this connection is closing.\n\n The connection is considered closing if either side has\n initiated its closing handshake or if the stream has been\n shut down uncleanly.\n "
] |
Please provide a description of the function:def start_pinging(self) -> None:
assert self.ping_interval is not None
if self.ping_interval > 0:
self.last_ping = self.last_pong = IOLoop.current().time()
self.ping_callback = PeriodicCallback(
self.periodic_ping, self.ping_interval * 1000
)
self.ping_callback.start()
|
[
"Start sending periodic pings to keep the connection alive"
] |
Please provide a description of the function:def periodic_ping(self) -> None:
if self.is_closing() and self.ping_callback is not None:
self.ping_callback.stop()
return
# Check for timeout on pong. Make sure that we really have
# sent a recent ping in case the machine with both server and
# client has been suspended since the last ping.
now = IOLoop.current().time()
since_last_pong = now - self.last_pong
since_last_ping = now - self.last_ping
assert self.ping_interval is not None
assert self.ping_timeout is not None
if (
since_last_ping < 2 * self.ping_interval
and since_last_pong > self.ping_timeout
):
self.close()
return
self.write_ping(b"")
self.last_ping = now
|
[
"Send a ping to keep the websocket alive\n\n Called periodically if the websocket_ping_interval is set and non-zero.\n "
] |
Please provide a description of the function:def close(self, code: int = None, reason: str = None) -> None:
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
|
[
"Closes the websocket connection.\n\n ``code`` and ``reason`` are documented under\n `WebSocketHandler.close`.\n\n .. versionadded:: 3.2\n\n .. versionchanged:: 4.0\n\n Added the ``code`` and ``reason`` arguments.\n "
] |
Please provide a description of the function:def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
return self.protocol.write_message(message, binary=binary)
|
[
"Sends a message to the WebSocket server.\n\n If the stream is closed, raises `WebSocketClosedError`.\n Returns a `.Future` which can be used for flow control.\n\n .. versionchanged:: 5.0\n Exception raised on a closed stream changed from `.StreamClosedError`\n to `WebSocketClosedError`.\n "
] |
Please provide a description of the function:def read_message(
self, callback: Callable[["Future[Union[None, str, bytes]]"], None] = None
) -> Awaitable[Union[None, str, bytes]]:
awaitable = self.read_queue.get()
if callback is not None:
self.io_loop.add_future(asyncio.ensure_future(awaitable), callback)
return awaitable
|
[
"Reads a message from the WebSocket server.\n\n If on_message_callback was specified at WebSocket\n initialization, this function will never return messages\n\n Returns a future whose result is the message, or None\n if the connection is closed. If a callback argument\n is given it will be called with the future when it is\n ready.\n "
] |
Please provide a description of the function:def ping(self, data: bytes = b"") -> None:
data = utf8(data)
if self.protocol is None:
raise WebSocketClosedError()
self.protocol.write_ping(data)
|
[
"Send ping frame to the remote end.\n\n The data argument allows a small amount of data (up to 125\n bytes) to be sent as a part of the ping message. Note that not\n all websocket implementations expose this data to\n applications.\n\n Consider using the ``ping_interval`` argument to\n `websocket_connect` instead of sending pings manually.\n\n .. versionadded:: 5.1\n\n "
] |
Please provide a description of the function:def define(
name: str,
default: Any = None,
type: type = None,
help: str = None,
metavar: str = None,
multiple: bool = False,
group: str = None,
callback: Callable[[Any], None] = None,
) -> None:
return options.define(
name,
default=default,
type=type,
help=help,
metavar=metavar,
multiple=multiple,
group=group,
callback=callback,
)
|
[
"Defines an option in the global namespace.\n\n See `OptionParser.define`.\n "
] |
Please provide a description of the function:def parse_command_line(args: List[str] = None, final: bool = True) -> List[str]:
return options.parse_command_line(args, final=final)
|
[
"Parses global options from the command line.\n\n See `OptionParser.parse_command_line`.\n "
] |
Please provide a description of the function:def parse_config_file(path: str, final: bool = True) -> None:
return options.parse_config_file(path, final=final)
|
[
"Parses global options from a config file.\n\n See `OptionParser.parse_config_file`.\n "
] |
Please provide a description of the function:def items(self) -> Iterable[Tuple[str, Any]]:
return [(opt.name, opt.value()) for name, opt in self._options.items()]
|
[
"An iterable of (name, value) pairs.\n\n .. versionadded:: 3.1\n "
] |
Please provide a description of the function:def groups(self) -> Set[str]:
return set(opt.group_name for opt in self._options.values())
|
[
"The set of option-groups created by ``define``.\n\n .. versionadded:: 3.1\n "
] |
Please provide a description of the function:def group_dict(self, group: str) -> Dict[str, Any]:
return dict(
(opt.name, opt.value())
for name, opt in self._options.items()
if not group or group == opt.group_name
)
|
[
"The names and values of options in a group.\n\n Useful for copying options into Application settings::\n\n from tornado.options import define, parse_command_line, options\n\n define('template_path', group='application')\n define('static_path', group='application')\n\n parse_command_line()\n\n application = Application(\n handlers, **options.group_dict('application'))\n\n .. versionadded:: 3.1\n "
] |
Please provide a description of the function:def as_dict(self) -> Dict[str, Any]:
return dict((opt.name, opt.value()) for name, opt in self._options.items())
|
[
"The names and values of all options.\n\n .. versionadded:: 3.1\n "
] |
Please provide a description of the function:def define(
self,
name: str,
default: Any = None,
type: type = None,
help: str = None,
metavar: str = None,
multiple: bool = False,
group: str = None,
callback: Callable[[Any], None] = None,
) -> None:
normalized = self._normalize_name(name)
if normalized in self._options:
raise Error(
"Option %r already defined in %s"
% (normalized, self._options[normalized].file_name)
)
frame = sys._getframe(0)
options_file = frame.f_code.co_filename
# Can be called directly, or through top level define() fn, in which
# case, step up above that frame to look for real caller.
if (
frame.f_back.f_code.co_filename == options_file
and frame.f_back.f_code.co_name == "define"
):
frame = frame.f_back
file_name = frame.f_back.f_code.co_filename
if file_name == options_file:
file_name = ""
if type is None:
if not multiple and default is not None:
type = default.__class__
else:
type = str
if group:
group_name = group # type: Optional[str]
else:
group_name = file_name
option = _Option(
name,
file_name=file_name,
default=default,
type=type,
help=help,
metavar=metavar,
multiple=multiple,
group_name=group_name,
callback=callback,
)
self._options[normalized] = option
|
[
"Defines a new command line option.\n\n ``type`` can be any of `str`, `int`, `float`, `bool`,\n `~datetime.datetime`, or `~datetime.timedelta`. If no ``type``\n is given but a ``default`` is, ``type`` is the type of\n ``default``. Otherwise, ``type`` defaults to `str`.\n\n If ``multiple`` is True, the option value is a list of ``type``\n instead of an instance of ``type``.\n\n ``help`` and ``metavar`` are used to construct the\n automatically generated command line help string. The help\n message is formatted like::\n\n --name=METAVAR help string\n\n ``group`` is used to group the defined options in logical\n groups. By default, command line options are grouped by the\n file in which they are defined.\n\n Command line option names must be unique globally.\n\n If a ``callback`` is given, it will be run with the new value whenever\n the option is changed. This can be used to combine command-line\n and file-based options::\n\n define(\"config\", type=str, help=\"path to config file\",\n callback=lambda path: parse_config_file(path, final=False))\n\n With this definition, options in the file specified by ``--config`` will\n override options set earlier on the command line, but can be overridden\n by later flags.\n\n "
] |
Please provide a description of the function:def parse_command_line(
self, args: List[str] = None, final: bool = True
) -> List[str]:
if args is None:
args = sys.argv
remaining = [] # type: List[str]
for i in range(1, len(args)):
# All things after the last option are command line arguments
if not args[i].startswith("-"):
remaining = args[i:]
break
if args[i] == "--":
remaining = args[i + 1 :]
break
arg = args[i].lstrip("-")
name, equals, value = arg.partition("=")
name = self._normalize_name(name)
if name not in self._options:
self.print_help()
raise Error("Unrecognized command line option: %r" % name)
option = self._options[name]
if not equals:
if option.type == bool:
value = "true"
else:
raise Error("Option %r requires a value" % name)
option.parse(value)
if final:
self.run_parse_callbacks()
return remaining
|
[
"Parses all options given on the command line (defaults to\n `sys.argv`).\n\n Options look like ``--option=value`` and are parsed according\n to their ``type``. For boolean options, ``--option`` is\n equivalent to ``--option=true``\n\n If the option has ``multiple=True``, comma-separated values\n are accepted. For multi-value integer options, the syntax\n ``x:y`` is also accepted and equivalent to ``range(x, y)``.\n\n Note that ``args[0]`` is ignored since it is the program name\n in `sys.argv`.\n\n We return a list of all arguments that are not parsed as options.\n\n If ``final`` is ``False``, parse callbacks will not be run.\n This is useful for applications that wish to combine configurations\n from multiple sources.\n\n "
] |
Please provide a description of the function:def parse_config_file(self, path: str, final: bool = True) -> None:
config = {"__file__": os.path.abspath(path)}
with open(path, "rb") as f:
exec_in(native_str(f.read()), config, config)
for name in config:
normalized = self._normalize_name(name)
if normalized in self._options:
option = self._options[normalized]
if option.multiple:
if not isinstance(config[name], (list, str)):
raise Error(
"Option %r is required to be a list of %s "
"or a comma-separated string"
% (option.name, option.type.__name__)
)
if type(config[name]) == str and option.type != str:
option.parse(config[name])
else:
option.set(config[name])
if final:
self.run_parse_callbacks()
|
[
"Parses and loads the config file at the given path.\n\n The config file contains Python code that will be executed (so\n it is **not safe** to use untrusted config files). Anything in\n the global namespace that matches a defined option will be\n used to set that option's value.\n\n Options may either be the specified type for the option or\n strings (in which case they will be parsed the same way as in\n `.parse_command_line`)\n\n Example (using the options defined in the top-level docs of\n this module)::\n\n port = 80\n mysql_host = 'mydb.example.com:3306'\n # Both lists and comma-separated strings are allowed for\n # multiple=True.\n memcache_hosts = ['cache1.example.com:11011',\n 'cache2.example.com:11011']\n memcache_hosts = 'cache1.example.com:11011,cache2.example.com:11011'\n\n If ``final`` is ``False``, parse callbacks will not be run.\n This is useful for applications that wish to combine configurations\n from multiple sources.\n\n .. note::\n\n `tornado.options` is primarily a command-line library.\n Config file support is provided for applications that wish\n to use it, but applications that prefer config files may\n wish to look at other libraries instead.\n\n .. versionchanged:: 4.1\n Config files are now always interpreted as utf-8 instead of\n the system default encoding.\n\n .. versionchanged:: 4.4\n The special variable ``__file__`` is available inside config\n files, specifying the absolute path to the config file itself.\n\n .. versionchanged:: 5.1\n Added the ability to set options via strings in config files.\n\n "
] |
Please provide a description of the function:def print_help(self, file: TextIO = None) -> None:
if file is None:
file = sys.stderr
print("Usage: %s [OPTIONS]" % sys.argv[0], file=file)
print("\nOptions:\n", file=file)
by_group = {} # type: Dict[str, List[_Option]]
for option in self._options.values():
by_group.setdefault(option.group_name, []).append(option)
for filename, o in sorted(by_group.items()):
if filename:
print("\n%s options:\n" % os.path.normpath(filename), file=file)
o.sort(key=lambda option: option.name)
for option in o:
# Always print names with dashes in a CLI context.
prefix = self._normalize_name(option.name)
if option.metavar:
prefix += "=" + option.metavar
description = option.help or ""
if option.default is not None and option.default != "":
description += " (default %s)" % option.default
lines = textwrap.wrap(description, 79 - 35)
if len(prefix) > 30 or len(lines) == 0:
lines.insert(0, "")
print(" --%-30s %s" % (prefix, lines[0]), file=file)
for line in lines[1:]:
print("%-34s %s" % (" ", line), file=file)
print(file=file)
|
[
"Prints all the command line options to stderr (or another file)."
] |
Please provide a description of the function:def row_to_obj(self, row, cur):
obj = tornado.util.ObjectDict()
for val, desc in zip(row, cur.description):
obj[desc.name] = val
return obj
|
[
"Convert a SQL row to an object supporting dict and attribute access."
] |
Please provide a description of the function:async def execute(self, stmt, *args):
with (await self.application.db.cursor()) as cur:
await cur.execute(stmt, args)
|
[
"Execute a SQL statement.\n\n Must be called with ``await self.execute(...)``\n "
] |
Please provide a description of the function:async def query(self, stmt, *args):
with (await self.application.db.cursor()) as cur:
await cur.execute(stmt, args)
return [self.row_to_obj(row, cur) for row in await cur.fetchall()]
|
[
"Query for a list of results.\n\n Typical usage::\n\n results = await self.query(...)\n\n Or::\n\n for row in await self.query(...)\n "
] |
Please provide a description of the function:async def queryone(self, stmt, *args):
results = await self.query(stmt, *args)
if len(results) == 0:
raise NoResultError()
elif len(results) > 1:
raise ValueError("Expected 1 result, got %d" % len(results))
return results[0]
|
[
"Query for exactly one result.\n\n Raises NoResultError if there are no results, or ValueError if\n there are more than one.\n "
] |
Please provide a description of the function:def listen(self, port: int, address: str = "") -> None:
sockets = bind_sockets(port, address=address)
self.add_sockets(sockets)
|
[
"Starts accepting connections on the given port.\n\n This method may be called more than once to listen on multiple ports.\n `listen` takes effect immediately; it is not necessary to call\n `TCPServer.start` afterwards. It is, however, necessary to start\n the `.IOLoop`.\n "
] |
Please provide a description of the function:def add_sockets(self, sockets: Iterable[socket.socket]) -> None:
for sock in sockets:
self._sockets[sock.fileno()] = sock
self._handlers[sock.fileno()] = add_accept_handler(
sock, self._handle_connection
)
|
[
"Makes this server start accepting connections on the given sockets.\n\n The ``sockets`` parameter is a list of socket objects such as\n those returned by `~tornado.netutil.bind_sockets`.\n `add_sockets` is typically used in combination with that\n method and `tornado.process.fork_processes` to provide greater\n control over the initialization of a multi-process server.\n "
] |
Please provide a description of the function:def bind(
self,
port: int,
address: str = None,
family: socket.AddressFamily = socket.AF_UNSPEC,
backlog: int = 128,
reuse_port: bool = False,
) -> None:
sockets = bind_sockets(
port, address=address, family=family, backlog=backlog, reuse_port=reuse_port
)
if self._started:
self.add_sockets(sockets)
else:
self._pending_sockets.extend(sockets)
|
[
"Binds this server to the given port on the given address.\n\n To start the server, call `start`. If you want to run this server\n in a single process, you can call `listen` as a shortcut to the\n sequence of `bind` and `start` calls.\n\n Address may be either an IP address or hostname. If it's a hostname,\n the server will listen on all IP addresses associated with the\n name. Address may be an empty string or None to listen on all\n available interfaces. Family may be set to either `socket.AF_INET`\n or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise\n both will be used if available.\n\n The ``backlog`` argument has the same meaning as for\n `socket.listen <socket.socket.listen>`. The ``reuse_port`` argument\n has the same meaning as for `.bind_sockets`.\n\n This method may be called multiple times prior to `start` to listen\n on multiple ports or interfaces.\n\n .. versionchanged:: 4.4\n Added the ``reuse_port`` argument.\n "
] |
Please provide a description of the function:def start(self, num_processes: Optional[int] = 1, max_restarts: int = None) -> None:
assert not self._started
self._started = True
if num_processes != 1:
process.fork_processes(num_processes, max_restarts)
sockets = self._pending_sockets
self._pending_sockets = []
self.add_sockets(sockets)
|
[
"Starts this server in the `.IOLoop`.\n\n By default, we run the server in this process and do not fork any\n additional child process.\n\n If num_processes is ``None`` or <= 0, we detect the number of cores\n available on this machine and fork that number of child\n processes. If num_processes is given and > 1, we fork that\n specific number of sub-processes.\n\n Since we use processes and not threads, there is no shared memory\n between any server code.\n\n Note that multiple processes are not compatible with the autoreload\n module (or the ``autoreload=True`` option to `tornado.web.Application`\n which defaults to True when ``debug=True``).\n When using multiple processes, no IOLoops can be created or\n referenced until after the call to ``TCPServer.start(n)``.\n\n Values of ``num_processes`` other than 1 are not supported on Windows.\n\n The ``max_restarts`` argument is passed to `.fork_processes`.\n\n .. versionchanged:: 6.0\n\n Added ``max_restarts`` argument.\n "
] |
Please provide a description of the function:def stop(self) -> None:
if self._stopped:
return
self._stopped = True
for fd, sock in self._sockets.items():
assert sock.fileno() == fd
# Unregister socket from IOLoop
self._handlers.pop(fd)()
sock.close()
|
[
"Stops listening for new connections.\n\n Requests currently in progress may still continue after the\n server is stopped.\n "
] |
Please provide a description of the function:def put(
self, item: _T, timeout: Union[float, datetime.timedelta] = None
) -> "Future[None]":
future = Future() # type: Future[None]
try:
self.put_nowait(item)
except QueueFull:
self._putters.append((item, future))
_set_timeout(future, timeout)
else:
future.set_result(None)
return future
|
[
"Put an item into the queue, perhaps waiting until there is room.\n\n Returns a Future, which raises `tornado.util.TimeoutError` after a\n timeout.\n\n ``timeout`` may be a number denoting a time (on the same\n scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a\n `datetime.timedelta` object for a deadline relative to the\n current time.\n "
] |
Please provide a description of the function:def put_nowait(self, item: _T) -> None:
self._consume_expired()
if self._getters:
assert self.empty(), "queue non-empty, why are getters waiting?"
getter = self._getters.popleft()
self.__put_internal(item)
future_set_result_unless_cancelled(getter, self._get())
elif self.full():
raise QueueFull
else:
self.__put_internal(item)
|
[
"Put an item into the queue without blocking.\n\n If no free slot is immediately available, raise `QueueFull`.\n "
] |
Please provide a description of the function:def get(self, timeout: Union[float, datetime.timedelta] = None) -> Awaitable[_T]:
future = Future() # type: Future[_T]
try:
future.set_result(self.get_nowait())
except QueueEmpty:
self._getters.append(future)
_set_timeout(future, timeout)
return future
|
[
"Remove and return an item from the queue.\n\n Returns an awaitable which resolves once an item is available, or raises\n `tornado.util.TimeoutError` after a timeout.\n\n ``timeout`` may be a number denoting a time (on the same\n scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a\n `datetime.timedelta` object for a deadline relative to the\n current time.\n\n .. note::\n\n The ``timeout`` argument of this method differs from that\n of the standard library's `queue.Queue.get`. That method\n interprets numeric values as relative timeouts; this one\n interprets them as absolute deadlines and requires\n ``timedelta`` objects for relative timeouts (consistent\n with other timeouts in Tornado).\n\n "
] |
Please provide a description of the function:def get_nowait(self) -> _T:
self._consume_expired()
if self._putters:
assert self.full(), "queue not full, why are putters waiting?"
item, putter = self._putters.popleft()
self.__put_internal(item)
future_set_result_unless_cancelled(putter, None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
|
[
"Remove and return an item from the queue without blocking.\n\n Return an item if one is immediately available, else raise\n `QueueEmpty`.\n "
] |
Please provide a description of the function:def task_done(self) -> None:
if self._unfinished_tasks <= 0:
raise ValueError("task_done() called too many times")
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
|
[
"Indicate that a formerly enqueued task is complete.\n\n Used by queue consumers. For each `.get` used to fetch a task, a\n subsequent call to `.task_done` tells the queue that the processing\n on the task is complete.\n\n If a `.join` is blocking, it resumes when all items have been\n processed; that is, when every `.put` is matched by a `.task_done`.\n\n Raises `ValueError` if called more times than `.put`.\n "
] |
Please provide a description of the function:def join(self, timeout: Union[float, datetime.timedelta] = None) -> Awaitable[None]:
return self._finished.wait(timeout)
|
[
"Block until all items in the queue are processed.\n\n Returns an awaitable, which raises `tornado.util.TimeoutError` after a\n timeout.\n "
] |
Please provide a description of the function:def cpu_count() -> int:
if multiprocessing is None:
return 1
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
try:
return os.sysconf("SC_NPROCESSORS_CONF")
except (AttributeError, ValueError):
pass
gen_log.error("Could not detect number of processors; assuming 1")
return 1
|
[
"Returns the number of processors on this machine."
] |
Please provide a description of the function:def fork_processes(num_processes: Optional[int], max_restarts: int = None) -> int:
if max_restarts is None:
max_restarts = 100
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
gen_log.info("Starting %d processes", num_processes)
children = {}
def start_child(i: int) -> Optional[int]:
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
num_restarts = 0
while children:
try:
pid, status = os.wait()
except OSError as e:
if errno_from_exception(e) == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
gen_log.warning(
"child %d (pid %d) killed by signal %d, restarting",
id,
pid,
os.WTERMSIG(status),
)
elif os.WEXITSTATUS(status) != 0:
gen_log.warning(
"child %d (pid %d) exited with status %d, restarting",
id,
pid,
os.WEXITSTATUS(status),
)
else:
gen_log.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0)
|
[
"Starts multiple worker processes.\n\n If ``num_processes`` is None or <= 0, we detect the number of cores\n available on this machine and fork that number of child\n processes. If ``num_processes`` is given and > 0, we fork that\n specific number of sub-processes.\n\n Since we use processes and not threads, there is no shared memory\n between any server code.\n\n Note that multiple processes are not compatible with the autoreload\n module (or the ``autoreload=True`` option to `tornado.web.Application`\n which defaults to True when ``debug=True``).\n When using multiple processes, no IOLoops can be created or\n referenced until after the call to ``fork_processes``.\n\n In each child process, ``fork_processes`` returns its *task id*, a\n number between 0 and ``num_processes``. Processes that exit\n abnormally (due to a signal or non-zero exit status) are restarted\n with the same id (up to ``max_restarts`` times). In the parent\n process, ``fork_processes`` returns None if all child processes\n have exited normally, but will otherwise only exit by throwing an\n exception.\n\n max_restarts defaults to 100.\n\n Availability: Unix\n "
] |
Please provide a description of the function:def set_exit_callback(self, callback: Callable[[int], None]) -> None:
self._exit_callback = callback
Subprocess.initialize()
Subprocess._waiting[self.pid] = self
Subprocess._try_cleanup_process(self.pid)
|
[
"Runs ``callback`` when this process exits.\n\n The callback takes one argument, the return code of the process.\n\n This method uses a ``SIGCHLD`` handler, which is a global setting\n and may conflict if you have other libraries trying to handle the\n same signal. If you are using more than one ``IOLoop`` it may\n be necessary to call `Subprocess.initialize` first to designate\n one ``IOLoop`` to run the signal handlers.\n\n In many cases a close callback on the stdout or stderr streams\n can be used as an alternative to an exit callback if the\n signal handler is causing a problem.\n\n Availability: Unix\n "
] |
Please provide a description of the function:def wait_for_exit(self, raise_error: bool = True) -> "Future[int]":
future = Future() # type: Future[int]
def callback(ret: int) -> None:
if ret != 0 and raise_error:
# Unfortunately we don't have the original args any more.
future_set_exception_unless_cancelled(
future, CalledProcessError(ret, "unknown")
)
else:
future_set_result_unless_cancelled(future, ret)
self.set_exit_callback(callback)
return future
|
[
"Returns a `.Future` which resolves when the process exits.\n\n Usage::\n\n ret = yield proc.wait_for_exit()\n\n This is a coroutine-friendly alternative to `set_exit_callback`\n (and a replacement for the blocking `subprocess.Popen.wait`).\n\n By default, raises `subprocess.CalledProcessError` if the process\n has a non-zero exit status. Use ``wait_for_exit(raise_error=False)``\n to suppress this behavior and return the exit status without raising.\n\n .. versionadded:: 4.2\n\n Availability: Unix\n "
] |
Please provide a description of the function:def initialize(cls) -> None:
if cls._initialized:
return
io_loop = ioloop.IOLoop.current()
cls._old_sigchld = signal.signal(
signal.SIGCHLD,
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup),
)
cls._initialized = True
|
[
"Initializes the ``SIGCHLD`` handler.\n\n The signal handler is run on an `.IOLoop` to avoid locking issues.\n Note that the `.IOLoop` used for signal handling need not be the\n same one used by individual Subprocess objects (as long as the\n ``IOLoops`` are each running in separate threads).\n\n .. versionchanged:: 5.0\n The ``io_loop`` argument (deprecated since version 4.1) has been\n removed.\n\n Availability: Unix\n "
] |
Please provide a description of the function:def uninitialize(cls) -> None:
if not cls._initialized:
return
signal.signal(signal.SIGCHLD, cls._old_sigchld)
cls._initialized = False
|
[
"Removes the ``SIGCHLD`` handler."
] |
Please provide a description of the function:def _handle_socket(self, event: int, fd: int, multi: Any, data: bytes) -> None:
event_map = {
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
pycurl.POLL_IN: ioloop.IOLoop.READ,
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE,
}
if event == pycurl.POLL_REMOVE:
if fd in self._fds:
self.io_loop.remove_handler(fd)
del self._fds[fd]
else:
ioloop_event = event_map[event]
# libcurl sometimes closes a socket and then opens a new
# one using the same FD without giving us a POLL_NONE in
# between. This is a problem with the epoll IOLoop,
# because the kernel can tell when a socket is closed and
# removes it from the epoll automatically, causing future
# update_handler calls to fail. Since we can't tell when
# this has happened, always use remove and re-add
# instead of update.
if fd in self._fds:
self.io_loop.remove_handler(fd)
self.io_loop.add_handler(fd, self._handle_events, ioloop_event)
self._fds[fd] = ioloop_event
|
[
"Called by libcurl when it wants to change the file descriptors\n it cares about.\n "
] |
Please provide a description of the function:def _set_timeout(self, msecs: int) -> None:
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = self.io_loop.add_timeout(
self.io_loop.time() + msecs / 1000.0, self._handle_timeout
)
|
[
"Called by libcurl to schedule a timeout."
] |
Please provide a description of the function:def _handle_events(self, fd: int, events: int) -> None:
action = 0
if events & ioloop.IOLoop.READ:
action |= pycurl.CSELECT_IN
if events & ioloop.IOLoop.WRITE:
action |= pycurl.CSELECT_OUT
while True:
try:
ret, num_handles = self._multi.socket_action(fd, action)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
|
[
"Called by IOLoop when there is activity on one of our\n file descriptors.\n "
] |
Please provide a description of the function:def _handle_timeout(self) -> None:
self._timeout = None
while True:
try:
ret, num_handles = self._multi.socket_action(pycurl.SOCKET_TIMEOUT, 0)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout >= 0:
self._set_timeout(new_timeout)
|
[
"Called by IOLoop when the requested timeout has passed."
] |
Please provide a description of the function:def _handle_force_timeout(self) -> None:
while True:
try:
ret, num_handles = self._multi.socket_all()
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
|
[
"Called by IOLoop periodically to ask libcurl to process any\n events it may have forgotten about.\n "
] |
Please provide a description of the function:def _finish_pending_requests(self) -> None:
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
if num_q == 0:
break
self._process_queue()
|
[
"Process any requests that were completed by the last\n call to multi.socket_action.\n "
] |
Please provide a description of the function:def start(port, root_directory, bucket_depth):
application = S3Application(root_directory, bucket_depth)
http_server = httpserver.HTTPServer(application)
http_server.listen(port)
ioloop.IOLoop.current().start()
|
[
"Starts the mock S3 server on the given port at the given path."
] |
Please provide a description of the function:def close(self) -> None:
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True
|
[
"Closes the HTTPClient, freeing any resources used."
] |
Please provide a description of the function:def fetch(
self, request: Union["HTTPRequest", str], **kwargs: Any
) -> "HTTPResponse":
response = self._io_loop.run_sync(
functools.partial(self._async_client.fetch, request, **kwargs)
)
return response
|
[
"Executes a request, returning an `HTTPResponse`.\n\n The request may be either a string URL or an `HTTPRequest` object.\n If it is a string, we construct an `HTTPRequest` using any additional\n kwargs: ``HTTPRequest(request, **kwargs)``\n\n If an error occurs during the fetch, we raise an `HTTPError` unless\n the ``raise_error`` keyword argument is set to False.\n "
] |
Please provide a description of the function:def close(self) -> None:
if self._closed:
return
self._closed = True
if self._instance_cache is not None:
cached_val = self._instance_cache.pop(self.io_loop, None)
# If there's an object other than self in the instance
# cache for our IOLoop, something has gotten mixed up. A
# value of None appears to be possible when this is called
# from a destructor (HTTPClient.__del__) as the weakref
# gets cleared before the destructor runs.
if cached_val is not None and cached_val is not self:
raise RuntimeError("inconsistent AsyncHTTPClient cache")
|
[
"Destroys this HTTP client, freeing any file descriptors used.\n\n This method is **not needed in normal use** due to the way\n that `AsyncHTTPClient` objects are transparently reused.\n ``close()`` is generally only necessary when either the\n `.IOLoop` is also being closed, or the ``force_instance=True``\n argument was used when creating the `AsyncHTTPClient`.\n\n No other methods may be called on the `AsyncHTTPClient` after\n ``close()``.\n\n "
] |
Please provide a description of the function:def fetch(
self,
request: Union[str, "HTTPRequest"],
raise_error: bool = True,
**kwargs: Any
) -> Awaitable["HTTPResponse"]:
if self._closed:
raise RuntimeError("fetch() called on closed AsyncHTTPClient")
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
else:
if kwargs:
raise ValueError(
"kwargs can't be used if request is an HTTPRequest object"
)
# We may modify this (to add Host, Accept-Encoding, etc),
# so make sure we don't modify the caller's object. This is also
# where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers)
request_proxy = _RequestProxy(request, self.defaults)
future = Future() # type: Future[HTTPResponse]
def handle_response(response: "HTTPResponse") -> None:
if response.error:
if raise_error or not response._error_is_response_code:
future_set_exception_unless_cancelled(future, response.error)
return
future_set_result_unless_cancelled(future, response)
self.fetch_impl(cast(HTTPRequest, request_proxy), handle_response)
return future
|
[
"Executes a request, asynchronously returning an `HTTPResponse`.\n\n The request may be either a string URL or an `HTTPRequest` object.\n If it is a string, we construct an `HTTPRequest` using any additional\n kwargs: ``HTTPRequest(request, **kwargs)``\n\n This method returns a `.Future` whose result is an\n `HTTPResponse`. By default, the ``Future`` will raise an\n `HTTPError` if the request returned a non-200 response code\n (other errors may also be raised if the server could not be\n contacted). Instead, if ``raise_error`` is set to False, the\n response will always be returned regardless of the response\n code.\n\n If a ``callback`` is given, it will be invoked with the `HTTPResponse`.\n In the callback interface, `HTTPError` is not automatically raised.\n Instead, you must check the response's ``error`` attribute or\n call its `~HTTPResponse.rethrow` method.\n\n .. versionchanged:: 6.0\n\n The ``callback`` argument was removed. Use the returned\n `.Future` instead.\n\n The ``raise_error=False`` argument only affects the\n `HTTPError` raised when a non-200 response code is used,\n instead of suppressing all errors.\n "
] |
Please provide a description of the function:def configure(
cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any
) -> None:
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
|
[
"Configures the `AsyncHTTPClient` subclass to use.\n\n ``AsyncHTTPClient()`` actually creates an instance of a subclass.\n This method may be called with either a class object or the\n fully-qualified name of such a class (or ``None`` to use the default,\n ``SimpleAsyncHTTPClient``)\n\n If additional keyword arguments are given, they will be passed\n to the constructor of each subclass instance created. The\n keyword argument ``max_clients`` determines the maximum number\n of simultaneous `~AsyncHTTPClient.fetch()` operations that can\n execute in parallel on each `.IOLoop`. Additional arguments\n may be supported depending on the implementation class in use.\n\n Example::\n\n AsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\n "
] |
Please provide a description of the function:def _cleanup(self) -> None:
if self._cleanup_handle:
self._cleanup_handle.cancel()
now = self._loop.time()
timeout = self._keepalive_timeout
if self._conns:
connections = {}
deadline = now - timeout
for key, conns in self._conns.items():
alive = []
for proto, use_time in conns:
if proto.is_connected():
if use_time - deadline < 0:
transport = proto.transport
proto.close()
if (key.is_ssl and
not self._cleanup_closed_disabled):
self._cleanup_closed_transports.append(
transport)
else:
alive.append((proto, use_time))
if alive:
connections[key] = alive
self._conns = connections
if self._conns:
self._cleanup_handle = helpers.weakref_handle(
self, '_cleanup', timeout, self._loop)
|
[
"Cleanup unused transports."
] |
Please provide a description of the function:def _cleanup_closed(self) -> None:
if self._cleanup_closed_handle:
self._cleanup_closed_handle.cancel()
for transport in self._cleanup_closed_transports:
if transport is not None:
transport.abort()
self._cleanup_closed_transports = []
if not self._cleanup_closed_disabled:
self._cleanup_closed_handle = helpers.weakref_handle(
self, '_cleanup_closed',
self._cleanup_closed_period, self._loop)
|
[
"Double confirmation for transport close.\n Some broken ssl servers may leave socket open without proper close.\n "
] |
Please provide a description of the function:def _available_connections(self, key: 'ConnectionKey') -> int:
if self._limit:
# total calc available connections
available = self._limit - len(self._acquired)
# check limit per host
if (self._limit_per_host and available > 0 and
key in self._acquired_per_host):
acquired = self._acquired_per_host.get(key)
assert acquired is not None
available = self._limit_per_host - len(acquired)
elif self._limit_per_host and key in self._acquired_per_host:
# check limit per host
acquired = self._acquired_per_host.get(key)
assert acquired is not None
available = self._limit_per_host - len(acquired)
else:
available = 1
return available
|
[
"\n Return number of available connections taking into account\n the limit, limit_per_host and the connection key.\n\n If it returns less than 1 means that there is no connections\n availables.\n "
] |
Please provide a description of the function:async def connect(self, req: 'ClientRequest',
traces: List['Trace'],
timeout: 'ClientTimeout') -> Connection:
key = req.connection_key
available = self._available_connections(key)
# Wait if there are no available connections.
if available <= 0:
fut = self._loop.create_future()
# This connection will now count towards the limit.
waiters = self._waiters[key]
waiters.append(fut)
if traces:
for trace in traces:
await trace.send_connection_queued_start()
try:
await fut
except BaseException as e:
# remove a waiter even if it was cancelled, normally it's
# removed when it's notified
try:
waiters.remove(fut)
except ValueError: # fut may no longer be in list
pass
raise e
finally:
if not waiters:
try:
del self._waiters[key]
except KeyError:
# the key was evicted before.
pass
if traces:
for trace in traces:
await trace.send_connection_queued_end()
proto = self._get(key)
if proto is None:
placeholder = cast(ResponseHandler, _TransportPlaceholder())
self._acquired.add(placeholder)
self._acquired_per_host[key].add(placeholder)
if traces:
for trace in traces:
await trace.send_connection_create_start()
try:
proto = await self._create_connection(req, traces, timeout)
if self._closed:
proto.close()
raise ClientConnectionError("Connector is closed.")
except BaseException:
if not self._closed:
self._acquired.remove(placeholder)
self._drop_acquired_per_host(key, placeholder)
self._release_waiter()
raise
else:
if not self._closed:
self._acquired.remove(placeholder)
self._drop_acquired_per_host(key, placeholder)
if traces:
for trace in traces:
await trace.send_connection_create_end()
else:
if traces:
for trace in traces:
await trace.send_connection_reuseconn()
self._acquired.add(proto)
self._acquired_per_host[key].add(proto)
return Connection(self, key, proto, self._loop)
|
[
"Get from pool or create new connection."
] |
Please provide a description of the function:def _release_waiter(self) -> None:
if not self._waiters:
return
# Having the dict keys ordered this avoids to iterate
# at the same order at each call.
queues = list(self._waiters.keys())
random.shuffle(queues)
for key in queues:
if self._available_connections(key) < 1:
continue
waiters = self._waiters[key]
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
return
|
[
"\n Iterates over all waiters till found one that is not finsihed and\n belongs to a host that has available connections.\n "
] |
Please provide a description of the function:def close(self) -> Awaitable[None]:
for ev in self._throttle_dns_events.values():
ev.cancel()
return super().close()
|
[
"Close all ongoing DNS calls."
] |
Please provide a description of the function:def clear_dns_cache(self,
host: Optional[str]=None,
port: Optional[int]=None) -> None:
if host is not None and port is not None:
self._cached_hosts.remove((host, port))
elif host is not None or port is not None:
raise ValueError("either both host and port "
"or none of them are allowed")
else:
self._cached_hosts.clear()
|
[
"Remove specified host/port or clear all dns local cache."
] |
Please provide a description of the function:async def _create_connection(self, req: 'ClientRequest',
traces: List['Trace'],
timeout: 'ClientTimeout') -> ResponseHandler:
if req.proxy:
_, proto = await self._create_proxy_connection(
req, traces, timeout)
else:
_, proto = await self._create_direct_connection(
req, traces, timeout)
return proto
|
[
"Create connection.\n\n Has same keyword arguments as BaseEventLoop.create_connection.\n "
] |
Please provide a description of the function:def _get_ssl_context(self, req: 'ClientRequest') -> Optional[SSLContext]:
if req.is_ssl():
if ssl is None: # pragma: no cover
raise RuntimeError('SSL is not supported.')
sslcontext = req.ssl
if isinstance(sslcontext, ssl.SSLContext):
return sslcontext
if sslcontext is not None:
# not verified or fingerprinted
return self._make_ssl_context(False)
sslcontext = self._ssl
if isinstance(sslcontext, ssl.SSLContext):
return sslcontext
if sslcontext is not None:
# not verified or fingerprinted
return self._make_ssl_context(False)
return self._make_ssl_context(True)
else:
return None
|
[
"Logic to get the correct SSL context\n\n 0. if req.ssl is false, return None\n\n 1. if ssl_context is specified in req, use it\n 2. if _ssl_context is specified in self, use it\n 3. otherwise:\n 1. if verify_ssl is not specified in req, use self.ssl_context\n (will generate a default context according to self.verify_ssl)\n 2. if verify_ssl is True in req, generate a default SSL context\n 3. if verify_ssl is False in req, generate a SSL context that\n won't verify\n "
] |
Please provide a description of the function:def _websocket_mask_python(mask: bytes, data: bytearray) -> None:
assert isinstance(data, bytearray), data
assert len(mask) == 4, mask
if data:
a, b, c, d = (_XOR_TABLE[n] for n in mask)
data[::4] = data[::4].translate(a)
data[1::4] = data[1::4].translate(b)
data[2::4] = data[2::4].translate(c)
data[3::4] = data[3::4].translate(d)
|
[
"Websocket masking function.\n\n `mask` is a `bytes` object of length 4; `data` is a `bytearray`\n object of any length. The contents of `data` are masked with `mask`,\n as specified in section 5.3 of RFC 6455.\n\n Note that this function mutates the `data` argument.\n\n This pure-python implementation may be replaced by an optimized\n version when available.\n\n "
] |
Please provide a description of the function:def json(self, *, # type: ignore
loads: Callable[[Any], Any]=json.loads) -> None:
return loads(self.data)
|
[
"Return parsed JSON data.\n\n .. versionadded:: 0.22\n "
] |
Please provide a description of the function:def parse_frame(self, buf: bytes) -> List[Tuple[bool, Optional[int],
bytearray,
Optional[bool]]]:
frames = []
if self._tail:
buf, self._tail = self._tail + buf, b''
start_pos = 0
buf_length = len(buf)
while True:
# read header
if self._state == WSParserState.READ_HEADER:
if buf_length - start_pos >= 2:
data = buf[start_pos:start_pos+2]
start_pos += 2
first_byte, second_byte = data
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
# frame-fin = %x0 ; more frames of this message follow
# / %x1 ; final frame of this message
# frame-rsv1 = %x0 ;
# 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv2 = %x0 ;
# 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv3 = %x0 ;
# 1 bit, MUST be 0 unless negotiated otherwise
#
# Remove rsv1 from this test for deflate development
if rsv2 or rsv3 or (rsv1 and not self._compress):
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Received frame with non-zero reserved bits')
if opcode > 0x7 and fin == 0:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Received fragmented control frame')
has_mask = (second_byte >> 7) & 1
length = second_byte & 0x7f
# Control frames MUST have a payload
# length of 125 bytes or less
if opcode > 0x7 and length > 125:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Control frame payload cannot be '
'larger than 125 bytes')
# Set compress status if last package is FIN
# OR set compress status if this is first fragment
# Raise error if not first fragment with rsv1 = 0x1
if self._frame_fin or self._compressed is None:
self._compressed = True if rsv1 else False
elif rsv1:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
'Received frame with non-zero reserved bits')
self._frame_fin = bool(fin)
self._frame_opcode = opcode
self._has_mask = bool(has_mask)
self._payload_length_flag = length
self._state = WSParserState.READ_PAYLOAD_LENGTH
else:
break
# read payload length
if self._state == WSParserState.READ_PAYLOAD_LENGTH:
length = self._payload_length_flag
if length == 126:
if buf_length - start_pos >= 2:
data = buf[start_pos:start_pos+2]
start_pos += 2
length = UNPACK_LEN2(data)[0]
self._payload_length = length
self._state = (
WSParserState.READ_PAYLOAD_MASK
if self._has_mask
else WSParserState.READ_PAYLOAD)
else:
break
elif length > 126:
if buf_length - start_pos >= 8:
data = buf[start_pos:start_pos+8]
start_pos += 8
length = UNPACK_LEN3(data)[0]
self._payload_length = length
self._state = (
WSParserState.READ_PAYLOAD_MASK
if self._has_mask
else WSParserState.READ_PAYLOAD)
else:
break
else:
self._payload_length = length
self._state = (
WSParserState.READ_PAYLOAD_MASK
if self._has_mask
else WSParserState.READ_PAYLOAD)
# read payload mask
if self._state == WSParserState.READ_PAYLOAD_MASK:
if buf_length - start_pos >= 4:
self._frame_mask = buf[start_pos:start_pos+4]
start_pos += 4
self._state = WSParserState.READ_PAYLOAD
else:
break
if self._state == WSParserState.READ_PAYLOAD:
length = self._payload_length
payload = self._frame_payload
chunk_len = buf_length - start_pos
if length >= chunk_len:
self._payload_length = length - chunk_len
payload.extend(buf[start_pos:])
start_pos = buf_length
else:
self._payload_length = 0
payload.extend(buf[start_pos:start_pos+length])
start_pos = start_pos + length
if self._payload_length == 0:
if self._has_mask:
assert self._frame_mask is not None
_websocket_mask(self._frame_mask, payload)
frames.append((
self._frame_fin,
self._frame_opcode,
payload,
self._compressed))
self._frame_payload = bytearray()
self._state = WSParserState.READ_HEADER
else:
break
self._tail = buf[start_pos:]
return frames
|
[
"Return the next frame from the socket."
] |
Please provide a description of the function:async def _send_frame(self, message: bytes, opcode: int,
compress: Optional[int]=None) -> None:
if self._closing:
ws_logger.warning('websocket connection is closing.')
rsv = 0
# Only compress larger packets (disabled)
# Does small packet needs to be compressed?
# if self.compress and opcode < 8 and len(message) > 124:
if (compress or self.compress) and opcode < 8:
if compress:
# Do not set self._compress if compressing is for this frame
compressobj = zlib.compressobj(wbits=-compress)
else: # self.compress
if not self._compressobj:
self._compressobj = zlib.compressobj(wbits=-self.compress)
compressobj = self._compressobj
message = compressobj.compress(message)
message = message + compressobj.flush(
zlib.Z_FULL_FLUSH if self.notakeover else zlib.Z_SYNC_FLUSH)
if message.endswith(_WS_DEFLATE_TRAILING):
message = message[:-4]
rsv = rsv | 0x40
msg_length = len(message)
use_mask = self.use_mask
if use_mask:
mask_bit = 0x80
else:
mask_bit = 0
if msg_length < 126:
header = PACK_LEN1(0x80 | rsv | opcode, msg_length | mask_bit)
elif msg_length < (1 << 16):
header = PACK_LEN2(0x80 | rsv | opcode, 126 | mask_bit, msg_length)
else:
header = PACK_LEN3(0x80 | rsv | opcode, 127 | mask_bit, msg_length)
if use_mask:
mask = self.randrange(0, 0xffffffff)
mask = mask.to_bytes(4, 'big')
message = bytearray(message)
_websocket_mask(mask, message)
self.transport.write(header + mask + message)
self._output_size += len(header) + len(mask) + len(message)
else:
if len(message) > MSG_SIZE:
self.transport.write(header)
self.transport.write(message)
else:
self.transport.write(header + message)
self._output_size += len(header) + len(message)
if self._output_size > self._limit:
self._output_size = 0
await self.protocol._drain_helper()
|
[
"Send a frame over the websocket with message as its payload."
] |
Please provide a description of the function:async def pong(self, message: bytes=b'') -> None:
if isinstance(message, str):
message = message.encode('utf-8')
await self._send_frame(message, WSMsgType.PONG)
|
[
"Send pong message."
] |
Please provide a description of the function:async def ping(self, message: bytes=b'') -> None:
if isinstance(message, str):
message = message.encode('utf-8')
await self._send_frame(message, WSMsgType.PING)
|
[
"Send ping message."
] |
Please provide a description of the function:async def send(self, message: Union[str, bytes],
binary: bool=False,
compress: Optional[int]=None) -> None:
if isinstance(message, str):
message = message.encode('utf-8')
if binary:
await self._send_frame(message, WSMsgType.BINARY, compress)
else:
await self._send_frame(message, WSMsgType.TEXT, compress)
|
[
"Send a frame over the websocket with message as its payload."
] |
Please provide a description of the function:async def close(self, code: int=1000, message: bytes=b'') -> None:
if isinstance(message, str):
message = message.encode('utf-8')
try:
await self._send_frame(
PACK_CLOSE_CODE(code) + message, opcode=WSMsgType.CLOSE)
finally:
self._closing = True
|
[
"Close the websocket, sending the specified code and message."
] |
Please provide a description of the function:def update_cookies(self,
cookies: LooseCookies,
response_url: URL=URL()) -> None:
hostname = response_url.raw_host
if not self._unsafe and is_ip_address(hostname):
# Don't accept cookies from IPs
return
if isinstance(cookies, Mapping):
cookies = cookies.items() # type: ignore
for name, cookie in cookies:
if not isinstance(cookie, Morsel):
tmp = SimpleCookie()
tmp[name] = cookie # type: ignore
cookie = tmp[name]
domain = cookie["domain"]
# ignore domains with trailing dots
if domain.endswith('.'):
domain = ""
del cookie["domain"]
if not domain and hostname is not None:
# Set the cookie's domain to the response hostname
# and set its host-only-flag
self._host_only_cookies.add((hostname, name))
domain = cookie["domain"] = hostname
if domain.startswith("."):
# Remove leading dot
domain = domain[1:]
cookie["domain"] = domain
if hostname and not self._is_domain_match(domain, hostname):
# Setting cookies for different domains is not allowed
continue
path = cookie["path"]
if not path or not path.startswith("/"):
# Set the cookie's path to the response path
path = response_url.path
if not path.startswith("/"):
path = "/"
else:
# Cut everything from the last slash to the end
path = "/" + path[1:path.rfind("/")]
cookie["path"] = path
max_age = cookie["max-age"]
if max_age:
try:
delta_seconds = int(max_age)
self._expire_cookie(self._loop.time() + delta_seconds,
domain, name)
except ValueError:
cookie["max-age"] = ""
else:
expires = cookie["expires"]
if expires:
expire_time = self._parse_date(expires)
if expire_time:
self._expire_cookie(expire_time.timestamp(),
domain, name)
else:
cookie["expires"] = ""
self._cookies[domain][name] = cookie
self._do_expiration()
|
[
"Update cookies."
] |
Please provide a description of the function:def filter_cookies(self, request_url: URL=URL()) -> 'BaseCookie[str]':
self._do_expiration()
request_url = URL(request_url)
filtered = SimpleCookie()
hostname = request_url.raw_host or ""
is_not_secure = request_url.scheme not in ("https", "wss")
for cookie in self:
name = cookie.key
domain = cookie["domain"]
# Send shared cookies
if not domain:
filtered[name] = cookie.value
continue
if not self._unsafe and is_ip_address(hostname):
continue
if (domain, name) in self._host_only_cookies:
if domain != hostname:
continue
elif not self._is_domain_match(domain, hostname):
continue
if not self._is_path_match(request_url.path, cookie["path"]):
continue
if is_not_secure and cookie["secure"]:
continue
# It's critical we use the Morsel so the coded_value
# (based on cookie version) is preserved
mrsl_val = cast('Morsel[str]', cookie.get(cookie.key, Morsel()))
mrsl_val.set(cookie.key, cookie.value, cookie.coded_value)
filtered[name] = mrsl_val
return filtered
|
[
"Returns this jar's cookies filtered by their attributes."
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.