content
stringlengths 1
103k
⌀ | path
stringlengths 8
216
| filename
stringlengths 2
179
| language
stringclasses 15
values | size_bytes
int64 2
189k
| quality_score
float64 0.5
0.95
| complexity
float64 0
1
| documentation_ratio
float64 0
1
| repository
stringclasses 5
values | stars
int64 0
1k
| created_date
stringdate 2023-07-10 19:21:08
2025-07-09 19:11:45
| license
stringclasses 4
values | is_test
bool 2
classes | file_hash
stringlengths 32
32
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
from __future__ import annotations\n\nimport enum\nimport logging\nimport ssl\nimport time\nimport types\nimport typing\n\nimport h11\n\nfrom .._backends.base import NetworkStream\nfrom .._exceptions import (\n ConnectionNotAvailable,\n LocalProtocolError,\n RemoteProtocolError,\n WriteError,\n map_exceptions,\n)\nfrom .._models import Origin, Request, Response\nfrom .._synchronization import Lock, ShieldCancellation\nfrom .._trace import Trace\nfrom .interfaces import ConnectionInterface\n\nlogger = logging.getLogger("httpcore.http11")\n\n\n# A subset of `h11.Event` types supported by `_send_event`\nH11SendEvent = typing.Union[\n h11.Request,\n h11.Data,\n h11.EndOfMessage,\n]\n\n\nclass HTTPConnectionState(enum.IntEnum):\n NEW = 0\n ACTIVE = 1\n IDLE = 2\n CLOSED = 3\n\n\nclass HTTP11Connection(ConnectionInterface):\n READ_NUM_BYTES = 64 * 1024\n MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024\n\n def __init__(\n self,\n origin: Origin,\n stream: NetworkStream,\n keepalive_expiry: float | None = None,\n ) -> None:\n self._origin = origin\n self._network_stream = stream\n self._keepalive_expiry: float | None = keepalive_expiry\n self._expire_at: float | None = None\n self._state = HTTPConnectionState.NEW\n self._state_lock = Lock()\n self._request_count = 0\n self._h11_state = h11.Connection(\n our_role=h11.CLIENT,\n max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE,\n )\n\n def handle_request(self, request: Request) -> Response:\n if not self.can_handle_request(request.url.origin):\n raise RuntimeError(\n f"Attempted to send request to {request.url.origin} on connection "\n f"to {self._origin}"\n )\n\n with self._state_lock:\n if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE):\n self._request_count += 1\n self._state = HTTPConnectionState.ACTIVE\n self._expire_at = None\n else:\n raise ConnectionNotAvailable()\n\n try:\n kwargs = {"request": request}\n try:\n with Trace(\n "send_request_headers", logger, request, kwargs\n ) as trace:\n self._send_request_headers(**kwargs)\n with Trace("send_request_body", logger, request, kwargs) as trace:\n self._send_request_body(**kwargs)\n except WriteError:\n # If we get a write error while we're writing the request,\n # then we supress this error and move on to attempting to\n # read the response. Servers can sometimes close the request\n # pre-emptively and then respond with a well formed HTTP\n # error response.\n pass\n\n with Trace(\n "receive_response_headers", logger, request, kwargs\n ) as trace:\n (\n http_version,\n status,\n reason_phrase,\n headers,\n trailing_data,\n ) = self._receive_response_headers(**kwargs)\n trace.return_value = (\n http_version,\n status,\n reason_phrase,\n headers,\n )\n\n network_stream = self._network_stream\n\n # CONNECT or Upgrade request\n if (status == 101) or (\n (request.method == b"CONNECT") and (200 <= status < 300)\n ):\n network_stream = HTTP11UpgradeStream(network_stream, trailing_data)\n\n return Response(\n status=status,\n headers=headers,\n content=HTTP11ConnectionByteStream(self, request),\n extensions={\n "http_version": http_version,\n "reason_phrase": reason_phrase,\n "network_stream": network_stream,\n },\n )\n except BaseException as exc:\n with ShieldCancellation():\n with Trace("response_closed", logger, request) as trace:\n self._response_closed()\n raise exc\n\n # Sending the request...\n\n def _send_request_headers(self, request: Request) -> None:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("write", None)\n\n with map_exceptions({h11.LocalProtocolError: LocalProtocolError}):\n event = h11.Request(\n method=request.method,\n target=request.url.target,\n headers=request.headers,\n )\n self._send_event(event, timeout=timeout)\n\n def _send_request_body(self, request: Request) -> None:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("write", None)\n\n assert isinstance(request.stream, typing.Iterable)\n for chunk in request.stream:\n event = h11.Data(data=chunk)\n self._send_event(event, timeout=timeout)\n\n self._send_event(h11.EndOfMessage(), timeout=timeout)\n\n def _send_event(self, event: h11.Event, timeout: float | None = None) -> None:\n bytes_to_send = self._h11_state.send(event)\n if bytes_to_send is not None:\n self._network_stream.write(bytes_to_send, timeout=timeout)\n\n # Receiving the response...\n\n def _receive_response_headers(\n self, request: Request\n ) -> tuple[bytes, int, bytes, list[tuple[bytes, bytes]], bytes]:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("read", None)\n\n while True:\n event = self._receive_event(timeout=timeout)\n if isinstance(event, h11.Response):\n break\n if (\n isinstance(event, h11.InformationalResponse)\n and event.status_code == 101\n ):\n break\n\n http_version = b"HTTP/" + event.http_version\n\n # h11 version 0.11+ supports a `raw_items` interface to get the\n # raw header casing, rather than the enforced lowercase headers.\n headers = event.headers.raw_items()\n\n trailing_data, _ = self._h11_state.trailing_data\n\n return http_version, event.status_code, event.reason, headers, trailing_data\n\n def _receive_response_body(\n self, request: Request\n ) -> typing.Iterator[bytes]:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("read", None)\n\n while True:\n event = self._receive_event(timeout=timeout)\n if isinstance(event, h11.Data):\n yield bytes(event.data)\n elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)):\n break\n\n def _receive_event(\n self, timeout: float | None = None\n ) -> h11.Event | type[h11.PAUSED]:\n while True:\n with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}):\n event = self._h11_state.next_event()\n\n if event is h11.NEED_DATA:\n data = self._network_stream.read(\n self.READ_NUM_BYTES, timeout=timeout\n )\n\n # If we feed this case through h11 we'll raise an exception like:\n #\n # httpcore.RemoteProtocolError: can't handle event type\n # ConnectionClosed when role=SERVER and state=SEND_RESPONSE\n #\n # Which is accurate, but not very informative from an end-user\n # perspective. Instead we handle this case distinctly and treat\n # it as a ConnectError.\n if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE:\n msg = "Server disconnected without sending a response."\n raise RemoteProtocolError(msg)\n\n self._h11_state.receive_data(data)\n else:\n # mypy fails to narrow the type in the above if statement above\n return event # type: ignore[return-value]\n\n def _response_closed(self) -> None:\n with self._state_lock:\n if (\n self._h11_state.our_state is h11.DONE\n and self._h11_state.their_state is h11.DONE\n ):\n self._state = HTTPConnectionState.IDLE\n self._h11_state.start_next_cycle()\n if self._keepalive_expiry is not None:\n now = time.monotonic()\n self._expire_at = now + self._keepalive_expiry\n else:\n self.close()\n\n # Once the connection is no longer required...\n\n def close(self) -> None:\n # Note that this method unilaterally closes the connection, and does\n # not have any kind of locking in place around it.\n self._state = HTTPConnectionState.CLOSED\n self._network_stream.close()\n\n # The ConnectionInterface methods provide information about the state of\n # the connection, allowing for a connection pooling implementation to\n # determine when to reuse and when to close the connection...\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._origin\n\n def is_available(self) -> bool:\n # Note that HTTP/1.1 connections in the "NEW" state are not treated as\n # being "available". The control flow which created the connection will\n # be able to send an outgoing request, but the connection will not be\n # acquired from the connection pool for any other request.\n return self._state == HTTPConnectionState.IDLE\n\n def has_expired(self) -> bool:\n now = time.monotonic()\n keepalive_expired = self._expire_at is not None and now > self._expire_at\n\n # If the HTTP connection is idle but the socket is readable, then the\n # only valid state is that the socket is about to return b"", indicating\n # a server-initiated disconnect.\n server_disconnected = (\n self._state == HTTPConnectionState.IDLE\n and self._network_stream.get_extra_info("is_readable")\n )\n\n return keepalive_expired or server_disconnected\n\n def is_idle(self) -> bool:\n return self._state == HTTPConnectionState.IDLE\n\n def is_closed(self) -> bool:\n return self._state == HTTPConnectionState.CLOSED\n\n def info(self) -> str:\n origin = str(self._origin)\n return (\n f"{origin!r}, HTTP/1.1, {self._state.name}, "\n f"Request Count: {self._request_count}"\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n origin = str(self._origin)\n return (\n f"<{class_name} [{origin!r}, {self._state.name}, "\n f"Request Count: {self._request_count}]>"\n )\n\n # These context managers are not used in the standard flow, but are\n # useful for testing or working with connection instances directly.\n\n def __enter__(self) -> HTTP11Connection:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: types.TracebackType | None = None,\n ) -> None:\n self.close()\n\n\nclass HTTP11ConnectionByteStream:\n def __init__(self, connection: HTTP11Connection, request: Request) -> None:\n self._connection = connection\n self._request = request\n self._closed = False\n\n def __iter__(self) -> typing.Iterator[bytes]:\n kwargs = {"request": self._request}\n try:\n with Trace("receive_response_body", logger, self._request, kwargs):\n for chunk in self._connection._receive_response_body(**kwargs):\n yield chunk\n except BaseException as exc:\n # If we get an exception while streaming the response,\n # we want to close the response (and possibly the connection)\n # before raising that exception.\n with ShieldCancellation():\n self.close()\n raise exc\n\n def close(self) -> None:\n if not self._closed:\n self._closed = True\n with Trace("response_closed", logger, self._request):\n self._connection._response_closed()\n\n\nclass HTTP11UpgradeStream(NetworkStream):\n def __init__(self, stream: NetworkStream, leading_data: bytes) -> None:\n self._stream = stream\n self._leading_data = leading_data\n\n def read(self, max_bytes: int, timeout: float | None = None) -> bytes:\n if self._leading_data:\n buffer = self._leading_data[:max_bytes]\n self._leading_data = self._leading_data[max_bytes:]\n return buffer\n else:\n return self._stream.read(max_bytes, timeout)\n\n def write(self, buffer: bytes, timeout: float | None = None) -> None:\n self._stream.write(buffer, timeout)\n\n def close(self) -> None:\n self._stream.close()\n\n def start_tls(\n self,\n ssl_context: ssl.SSLContext,\n server_hostname: str | None = None,\n timeout: float | None = None,\n ) -> NetworkStream:\n return self._stream.start_tls(ssl_context, server_hostname, timeout)\n\n def get_extra_info(self, info: str) -> typing.Any:\n return self._stream.get_extra_info(info)\n
|
.venv\Lib\site-packages\httpcore\_sync\http11.py
|
http11.py
|
Python
| 13,476 | 0.95 | 0.155673 | 0.121019 |
react-lib
| 391 |
2025-05-26T21:40:11.870539
|
BSD-3-Clause
| false |
c21f0b1e2a542fa39fff7a3184ac648e
|
from __future__ import annotations\n\nimport enum\nimport logging\nimport time\nimport types\nimport typing\n\nimport h2.config\nimport h2.connection\nimport h2.events\nimport h2.exceptions\nimport h2.settings\n\nfrom .._backends.base import NetworkStream\nfrom .._exceptions import (\n ConnectionNotAvailable,\n LocalProtocolError,\n RemoteProtocolError,\n)\nfrom .._models import Origin, Request, Response\nfrom .._synchronization import Lock, Semaphore, ShieldCancellation\nfrom .._trace import Trace\nfrom .interfaces import ConnectionInterface\n\nlogger = logging.getLogger("httpcore.http2")\n\n\ndef has_body_headers(request: Request) -> bool:\n return any(\n k.lower() == b"content-length" or k.lower() == b"transfer-encoding"\n for k, v in request.headers\n )\n\n\nclass HTTPConnectionState(enum.IntEnum):\n ACTIVE = 1\n IDLE = 2\n CLOSED = 3\n\n\nclass HTTP2Connection(ConnectionInterface):\n READ_NUM_BYTES = 64 * 1024\n CONFIG = h2.config.H2Configuration(validate_inbound_headers=False)\n\n def __init__(\n self,\n origin: Origin,\n stream: NetworkStream,\n keepalive_expiry: float | None = None,\n ):\n self._origin = origin\n self._network_stream = stream\n self._keepalive_expiry: float | None = keepalive_expiry\n self._h2_state = h2.connection.H2Connection(config=self.CONFIG)\n self._state = HTTPConnectionState.IDLE\n self._expire_at: float | None = None\n self._request_count = 0\n self._init_lock = Lock()\n self._state_lock = Lock()\n self._read_lock = Lock()\n self._write_lock = Lock()\n self._sent_connection_init = False\n self._used_all_stream_ids = False\n self._connection_error = False\n\n # Mapping from stream ID to response stream events.\n self._events: dict[\n int,\n list[\n h2.events.ResponseReceived\n | h2.events.DataReceived\n | h2.events.StreamEnded\n | h2.events.StreamReset,\n ],\n ] = {}\n\n # Connection terminated events are stored as state since\n # we need to handle them for all streams.\n self._connection_terminated: h2.events.ConnectionTerminated | None = None\n\n self._read_exception: Exception | None = None\n self._write_exception: Exception | None = None\n\n def handle_request(self, request: Request) -> Response:\n if not self.can_handle_request(request.url.origin):\n # This cannot occur in normal operation, since the connection pool\n # will only send requests on connections that handle them.\n # It's in place simply for resilience as a guard against incorrect\n # usage, for anyone working directly with httpcore connections.\n raise RuntimeError(\n f"Attempted to send request to {request.url.origin} on connection "\n f"to {self._origin}"\n )\n\n with self._state_lock:\n if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE):\n self._request_count += 1\n self._expire_at = None\n self._state = HTTPConnectionState.ACTIVE\n else:\n raise ConnectionNotAvailable()\n\n with self._init_lock:\n if not self._sent_connection_init:\n try:\n sci_kwargs = {"request": request}\n with Trace(\n "send_connection_init", logger, request, sci_kwargs\n ):\n self._send_connection_init(**sci_kwargs)\n except BaseException as exc:\n with ShieldCancellation():\n self.close()\n raise exc\n\n self._sent_connection_init = True\n\n # Initially start with just 1 until the remote server provides\n # its max_concurrent_streams value\n self._max_streams = 1\n\n local_settings_max_streams = (\n self._h2_state.local_settings.max_concurrent_streams\n )\n self._max_streams_semaphore = Semaphore(local_settings_max_streams)\n\n for _ in range(local_settings_max_streams - self._max_streams):\n self._max_streams_semaphore.acquire()\n\n self._max_streams_semaphore.acquire()\n\n try:\n stream_id = self._h2_state.get_next_available_stream_id()\n self._events[stream_id] = []\n except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover\n self._used_all_stream_ids = True\n self._request_count -= 1\n raise ConnectionNotAvailable()\n\n try:\n kwargs = {"request": request, "stream_id": stream_id}\n with Trace("send_request_headers", logger, request, kwargs):\n self._send_request_headers(request=request, stream_id=stream_id)\n with Trace("send_request_body", logger, request, kwargs):\n self._send_request_body(request=request, stream_id=stream_id)\n with Trace(\n "receive_response_headers", logger, request, kwargs\n ) as trace:\n status, headers = self._receive_response(\n request=request, stream_id=stream_id\n )\n trace.return_value = (status, headers)\n\n return Response(\n status=status,\n headers=headers,\n content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id),\n extensions={\n "http_version": b"HTTP/2",\n "network_stream": self._network_stream,\n "stream_id": stream_id,\n },\n )\n except BaseException as exc: # noqa: PIE786\n with ShieldCancellation():\n kwargs = {"stream_id": stream_id}\n with Trace("response_closed", logger, request, kwargs):\n self._response_closed(stream_id=stream_id)\n\n if isinstance(exc, h2.exceptions.ProtocolError):\n # One case where h2 can raise a protocol error is when a\n # closed frame has been seen by the state machine.\n #\n # This happens when one stream is reading, and encounters\n # a GOAWAY event. Other flows of control may then raise\n # a protocol error at any point they interact with the 'h2_state'.\n #\n # In this case we'll have stored the event, and should raise\n # it as a RemoteProtocolError.\n if self._connection_terminated: # pragma: nocover\n raise RemoteProtocolError(self._connection_terminated)\n # If h2 raises a protocol error in some other state then we\n # must somehow have made a protocol violation.\n raise LocalProtocolError(exc) # pragma: nocover\n\n raise exc\n\n def _send_connection_init(self, request: Request) -> None:\n """\n The HTTP/2 connection requires some initial setup before we can start\n using individual request/response streams on it.\n """\n # Need to set these manually here instead of manipulating via\n # __setitem__() otherwise the H2Connection will emit SettingsUpdate\n # frames in addition to sending the undesired defaults.\n self._h2_state.local_settings = h2.settings.Settings(\n client=True,\n initial_values={\n # Disable PUSH_PROMISE frames from the server since we don't do anything\n # with them for now. Maybe when we support caching?\n h2.settings.SettingCodes.ENABLE_PUSH: 0,\n # These two are taken from h2 for safe defaults\n h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,\n h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536,\n },\n )\n\n # Some websites (*cough* Yahoo *cough*) balk at this setting being\n # present in the initial handshake since it's not defined in the original\n # RFC despite the RFC mandating ignoring settings you don't know about.\n del self._h2_state.local_settings[\n h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL\n ]\n\n self._h2_state.initiate_connection()\n self._h2_state.increment_flow_control_window(2**24)\n self._write_outgoing_data(request)\n\n # Sending the request...\n\n def _send_request_headers(self, request: Request, stream_id: int) -> None:\n """\n Send the request headers to a given stream ID.\n """\n end_stream = not has_body_headers(request)\n\n # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'.\n # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require\n # HTTP/1.1 style headers, and map them appropriately if we end up on\n # an HTTP/2 connection.\n authority = [v for k, v in request.headers if k.lower() == b"host"][0]\n\n headers = [\n (b":method", request.method),\n (b":authority", authority),\n (b":scheme", request.url.scheme),\n (b":path", request.url.target),\n ] + [\n (k.lower(), v)\n for k, v in request.headers\n if k.lower()\n not in (\n b"host",\n b"transfer-encoding",\n )\n ]\n\n self._h2_state.send_headers(stream_id, headers, end_stream=end_stream)\n self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id)\n self._write_outgoing_data(request)\n\n def _send_request_body(self, request: Request, stream_id: int) -> None:\n """\n Iterate over the request body sending it to a given stream ID.\n """\n if not has_body_headers(request):\n return\n\n assert isinstance(request.stream, typing.Iterable)\n for data in request.stream:\n self._send_stream_data(request, stream_id, data)\n self._send_end_stream(request, stream_id)\n\n def _send_stream_data(\n self, request: Request, stream_id: int, data: bytes\n ) -> None:\n """\n Send a single chunk of data in one or more data frames.\n """\n while data:\n max_flow = self._wait_for_outgoing_flow(request, stream_id)\n chunk_size = min(len(data), max_flow)\n chunk, data = data[:chunk_size], data[chunk_size:]\n self._h2_state.send_data(stream_id, chunk)\n self._write_outgoing_data(request)\n\n def _send_end_stream(self, request: Request, stream_id: int) -> None:\n """\n Send an empty data frame on on a given stream ID with the END_STREAM flag set.\n """\n self._h2_state.end_stream(stream_id)\n self._write_outgoing_data(request)\n\n # Receiving the response...\n\n def _receive_response(\n self, request: Request, stream_id: int\n ) -> tuple[int, list[tuple[bytes, bytes]]]:\n """\n Return the response status code and headers for a given stream ID.\n """\n while True:\n event = self._receive_stream_event(request, stream_id)\n if isinstance(event, h2.events.ResponseReceived):\n break\n\n status_code = 200\n headers = []\n assert event.headers is not None\n for k, v in event.headers:\n if k == b":status":\n status_code = int(v.decode("ascii", errors="ignore"))\n elif not k.startswith(b":"):\n headers.append((k, v))\n\n return (status_code, headers)\n\n def _receive_response_body(\n self, request: Request, stream_id: int\n ) -> typing.Iterator[bytes]:\n """\n Iterator that returns the bytes of the response body for a given stream ID.\n """\n while True:\n event = self._receive_stream_event(request, stream_id)\n if isinstance(event, h2.events.DataReceived):\n assert event.flow_controlled_length is not None\n assert event.data is not None\n amount = event.flow_controlled_length\n self._h2_state.acknowledge_received_data(amount, stream_id)\n self._write_outgoing_data(request)\n yield event.data\n elif isinstance(event, h2.events.StreamEnded):\n break\n\n def _receive_stream_event(\n self, request: Request, stream_id: int\n ) -> h2.events.ResponseReceived | h2.events.DataReceived | h2.events.StreamEnded:\n """\n Return the next available event for a given stream ID.\n\n Will read more data from the network if required.\n """\n while not self._events.get(stream_id):\n self._receive_events(request, stream_id)\n event = self._events[stream_id].pop(0)\n if isinstance(event, h2.events.StreamReset):\n raise RemoteProtocolError(event)\n return event\n\n def _receive_events(\n self, request: Request, stream_id: int | None = None\n ) -> None:\n """\n Read some data from the network until we see one or more events\n for a given stream ID.\n """\n with self._read_lock:\n if self._connection_terminated is not None:\n last_stream_id = self._connection_terminated.last_stream_id\n if stream_id and last_stream_id and stream_id > last_stream_id:\n self._request_count -= 1\n raise ConnectionNotAvailable()\n raise RemoteProtocolError(self._connection_terminated)\n\n # This conditional is a bit icky. We don't want to block reading if we've\n # actually got an event to return for a given stream. We need to do that\n # check *within* the atomic read lock. Though it also need to be optional,\n # because when we call it from `_wait_for_outgoing_flow` we *do* want to\n # block until we've available flow control, event when we have events\n # pending for the stream ID we're attempting to send on.\n if stream_id is None or not self._events.get(stream_id):\n events = self._read_incoming_data(request)\n for event in events:\n if isinstance(event, h2.events.RemoteSettingsChanged):\n with Trace(\n "receive_remote_settings", logger, request\n ) as trace:\n self._receive_remote_settings_change(event)\n trace.return_value = event\n\n elif isinstance(\n event,\n (\n h2.events.ResponseReceived,\n h2.events.DataReceived,\n h2.events.StreamEnded,\n h2.events.StreamReset,\n ),\n ):\n if event.stream_id in self._events:\n self._events[event.stream_id].append(event)\n\n elif isinstance(event, h2.events.ConnectionTerminated):\n self._connection_terminated = event\n\n self._write_outgoing_data(request)\n\n def _receive_remote_settings_change(\n self, event: h2.events.RemoteSettingsChanged\n ) -> None:\n max_concurrent_streams = event.changed_settings.get(\n h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS\n )\n if max_concurrent_streams:\n new_max_streams = min(\n max_concurrent_streams.new_value,\n self._h2_state.local_settings.max_concurrent_streams,\n )\n if new_max_streams and new_max_streams != self._max_streams:\n while new_max_streams > self._max_streams:\n self._max_streams_semaphore.release()\n self._max_streams += 1\n while new_max_streams < self._max_streams:\n self._max_streams_semaphore.acquire()\n self._max_streams -= 1\n\n def _response_closed(self, stream_id: int) -> None:\n self._max_streams_semaphore.release()\n del self._events[stream_id]\n with self._state_lock:\n if self._connection_terminated and not self._events:\n self.close()\n\n elif self._state == HTTPConnectionState.ACTIVE and not self._events:\n self._state = HTTPConnectionState.IDLE\n if self._keepalive_expiry is not None:\n now = time.monotonic()\n self._expire_at = now + self._keepalive_expiry\n if self._used_all_stream_ids: # pragma: nocover\n self.close()\n\n def close(self) -> None:\n # Note that this method unilaterally closes the connection, and does\n # not have any kind of locking in place around it.\n self._h2_state.close_connection()\n self._state = HTTPConnectionState.CLOSED\n self._network_stream.close()\n\n # Wrappers around network read/write operations...\n\n def _read_incoming_data(self, request: Request) -> list[h2.events.Event]:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("read", None)\n\n if self._read_exception is not None:\n raise self._read_exception # pragma: nocover\n\n try:\n data = self._network_stream.read(self.READ_NUM_BYTES, timeout)\n if data == b"":\n raise RemoteProtocolError("Server disconnected")\n except Exception as exc:\n # If we get a network error we should:\n #\n # 1. Save the exception and just raise it immediately on any future reads.\n # (For example, this means that a single read timeout or disconnect will\n # immediately close all pending streams. Without requiring multiple\n # sequential timeouts.)\n # 2. Mark the connection as errored, so that we don't accept any other\n # incoming requests.\n self._read_exception = exc\n self._connection_error = True\n raise exc\n\n events: list[h2.events.Event] = self._h2_state.receive_data(data)\n\n return events\n\n def _write_outgoing_data(self, request: Request) -> None:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("write", None)\n\n with self._write_lock:\n data_to_send = self._h2_state.data_to_send()\n\n if self._write_exception is not None:\n raise self._write_exception # pragma: nocover\n\n try:\n self._network_stream.write(data_to_send, timeout)\n except Exception as exc: # pragma: nocover\n # If we get a network error we should:\n #\n # 1. Save the exception and just raise it immediately on any future write.\n # (For example, this means that a single write timeout or disconnect will\n # immediately close all pending streams. Without requiring multiple\n # sequential timeouts.)\n # 2. Mark the connection as errored, so that we don't accept any other\n # incoming requests.\n self._write_exception = exc\n self._connection_error = True\n raise exc\n\n # Flow control...\n\n def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int:\n """\n Returns the maximum allowable outgoing flow for a given stream.\n\n If the allowable flow is zero, then waits on the network until\n WindowUpdated frames have increased the flow rate.\n https://tools.ietf.org/html/rfc7540#section-6.9\n """\n local_flow: int = self._h2_state.local_flow_control_window(stream_id)\n max_frame_size: int = self._h2_state.max_outbound_frame_size\n flow = min(local_flow, max_frame_size)\n while flow == 0:\n self._receive_events(request)\n local_flow = self._h2_state.local_flow_control_window(stream_id)\n max_frame_size = self._h2_state.max_outbound_frame_size\n flow = min(local_flow, max_frame_size)\n return flow\n\n # Interface for connection pooling...\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._origin\n\n def is_available(self) -> bool:\n return (\n self._state != HTTPConnectionState.CLOSED\n and not self._connection_error\n and not self._used_all_stream_ids\n and not (\n self._h2_state.state_machine.state\n == h2.connection.ConnectionState.CLOSED\n )\n )\n\n def has_expired(self) -> bool:\n now = time.monotonic()\n return self._expire_at is not None and now > self._expire_at\n\n def is_idle(self) -> bool:\n return self._state == HTTPConnectionState.IDLE\n\n def is_closed(self) -> bool:\n return self._state == HTTPConnectionState.CLOSED\n\n def info(self) -> str:\n origin = str(self._origin)\n return (\n f"{origin!r}, HTTP/2, {self._state.name}, "\n f"Request Count: {self._request_count}"\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n origin = str(self._origin)\n return (\n f"<{class_name} [{origin!r}, {self._state.name}, "\n f"Request Count: {self._request_count}]>"\n )\n\n # These context managers are not used in the standard flow, but are\n # useful for testing or working with connection instances directly.\n\n def __enter__(self) -> HTTP2Connection:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: types.TracebackType | None = None,\n ) -> None:\n self.close()\n\n\nclass HTTP2ConnectionByteStream:\n def __init__(\n self, connection: HTTP2Connection, request: Request, stream_id: int\n ) -> None:\n self._connection = connection\n self._request = request\n self._stream_id = stream_id\n self._closed = False\n\n def __iter__(self) -> typing.Iterator[bytes]:\n kwargs = {"request": self._request, "stream_id": self._stream_id}\n try:\n with Trace("receive_response_body", logger, self._request, kwargs):\n for chunk in self._connection._receive_response_body(\n request=self._request, stream_id=self._stream_id\n ):\n yield chunk\n except BaseException as exc:\n # If we get an exception while streaming the response,\n # we want to close the response (and possibly the connection)\n # before raising that exception.\n with ShieldCancellation():\n self.close()\n raise exc\n\n def close(self) -> None:\n if not self._closed:\n self._closed = True\n kwargs = {"stream_id": self._stream_id}\n with Trace("response_closed", logger, self._request, kwargs):\n self._connection._response_closed(stream_id=self._stream_id)\n
|
.venv\Lib\site-packages\httpcore\_sync\http2.py
|
http2.py
|
Python
| 23,400 | 0.95 | 0.165541 | 0.131631 |
react-lib
| 37 |
2024-03-30T21:04:41.088428
|
Apache-2.0
| false |
80ba5c525538c1671a7b84dc46e2b3b0
|
from __future__ import annotations\n\nimport base64\nimport logging\nimport ssl\nimport typing\n\nfrom .._backends.base import SOCKET_OPTION, NetworkBackend\nfrom .._exceptions import ProxyError\nfrom .._models import (\n URL,\n Origin,\n Request,\n Response,\n enforce_bytes,\n enforce_headers,\n enforce_url,\n)\nfrom .._ssl import default_ssl_context\nfrom .._synchronization import Lock\nfrom .._trace import Trace\nfrom .connection import HTTPConnection\nfrom .connection_pool import ConnectionPool\nfrom .http11 import HTTP11Connection\nfrom .interfaces import ConnectionInterface\n\nByteOrStr = typing.Union[bytes, str]\nHeadersAsSequence = typing.Sequence[typing.Tuple[ByteOrStr, ByteOrStr]]\nHeadersAsMapping = typing.Mapping[ByteOrStr, ByteOrStr]\n\n\nlogger = logging.getLogger("httpcore.proxy")\n\n\ndef merge_headers(\n default_headers: typing.Sequence[tuple[bytes, bytes]] | None = None,\n override_headers: typing.Sequence[tuple[bytes, bytes]] | None = None,\n) -> list[tuple[bytes, bytes]]:\n """\n Append default_headers and override_headers, de-duplicating if a key exists\n in both cases.\n """\n default_headers = [] if default_headers is None else list(default_headers)\n override_headers = [] if override_headers is None else list(override_headers)\n has_override = set(key.lower() for key, value in override_headers)\n default_headers = [\n (key, value)\n for key, value in default_headers\n if key.lower() not in has_override\n ]\n return default_headers + override_headers\n\n\nclass HTTPProxy(ConnectionPool): # pragma: nocover\n """\n A connection pool that sends requests via an HTTP proxy.\n """\n\n def __init__(\n self,\n proxy_url: URL | bytes | str,\n proxy_auth: tuple[bytes | str, bytes | str] | None = None,\n proxy_headers: HeadersAsMapping | HeadersAsSequence | None = None,\n ssl_context: ssl.SSLContext | None = None,\n proxy_ssl_context: ssl.SSLContext | None = None,\n max_connections: int | None = 10,\n max_keepalive_connections: int | None = None,\n keepalive_expiry: float | None = None,\n http1: bool = True,\n http2: bool = False,\n retries: int = 0,\n local_address: str | None = None,\n uds: str | None = None,\n network_backend: NetworkBackend | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> None:\n """\n A connection pool for making HTTP requests.\n\n Parameters:\n proxy_url: The URL to use when connecting to the proxy server.\n For example `"http://127.0.0.1:8080/"`.\n proxy_auth: Any proxy authentication as a two-tuple of\n (username, password). May be either bytes or ascii-only str.\n proxy_headers: Any HTTP headers to use for the proxy requests.\n For example `{"Proxy-Authorization": "Basic <username>:<password>"}`.\n ssl_context: An SSL context to use for verifying connections.\n If not specified, the default `httpcore.default_ssl_context()`\n will be used.\n proxy_ssl_context: The same as `ssl_context`, but for a proxy server rather than a remote origin.\n max_connections: The maximum number of concurrent HTTP connections that\n the pool should allow. Any attempt to send a request on a pool that\n would exceed this amount will block until a connection is available.\n max_keepalive_connections: The maximum number of idle HTTP connections\n that will be maintained in the pool.\n keepalive_expiry: The duration in seconds that an idle HTTP connection\n may be maintained for before being expired from the pool.\n http1: A boolean indicating if HTTP/1.1 requests should be supported\n by the connection pool. Defaults to True.\n http2: A boolean indicating if HTTP/2 requests should be supported by\n the connection pool. Defaults to False.\n retries: The maximum number of retries when trying to establish\n a connection.\n local_address: Local address to connect from. Can also be used to\n connect using a particular address family. Using\n `local_address="0.0.0.0"` will connect using an `AF_INET` address\n (IPv4), while using `local_address="::"` will connect using an\n `AF_INET6` address (IPv6).\n uds: Path to a Unix Domain Socket to use instead of TCP sockets.\n network_backend: A backend instance to use for handling network I/O.\n """\n super().__init__(\n ssl_context=ssl_context,\n max_connections=max_connections,\n max_keepalive_connections=max_keepalive_connections,\n keepalive_expiry=keepalive_expiry,\n http1=http1,\n http2=http2,\n network_backend=network_backend,\n retries=retries,\n local_address=local_address,\n uds=uds,\n socket_options=socket_options,\n )\n\n self._proxy_url = enforce_url(proxy_url, name="proxy_url")\n if (\n self._proxy_url.scheme == b"http" and proxy_ssl_context is not None\n ): # pragma: no cover\n raise RuntimeError(\n "The `proxy_ssl_context` argument is not allowed for the http scheme"\n )\n\n self._ssl_context = ssl_context\n self._proxy_ssl_context = proxy_ssl_context\n self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")\n if proxy_auth is not None:\n username = enforce_bytes(proxy_auth[0], name="proxy_auth")\n password = enforce_bytes(proxy_auth[1], name="proxy_auth")\n userpass = username + b":" + password\n authorization = b"Basic " + base64.b64encode(userpass)\n self._proxy_headers = [\n (b"Proxy-Authorization", authorization)\n ] + self._proxy_headers\n\n def create_connection(self, origin: Origin) -> ConnectionInterface:\n if origin.scheme == b"http":\n return ForwardHTTPConnection(\n proxy_origin=self._proxy_url.origin,\n proxy_headers=self._proxy_headers,\n remote_origin=origin,\n keepalive_expiry=self._keepalive_expiry,\n network_backend=self._network_backend,\n proxy_ssl_context=self._proxy_ssl_context,\n )\n return TunnelHTTPConnection(\n proxy_origin=self._proxy_url.origin,\n proxy_headers=self._proxy_headers,\n remote_origin=origin,\n ssl_context=self._ssl_context,\n proxy_ssl_context=self._proxy_ssl_context,\n keepalive_expiry=self._keepalive_expiry,\n http1=self._http1,\n http2=self._http2,\n network_backend=self._network_backend,\n )\n\n\nclass ForwardHTTPConnection(ConnectionInterface):\n def __init__(\n self,\n proxy_origin: Origin,\n remote_origin: Origin,\n proxy_headers: HeadersAsMapping | HeadersAsSequence | None = None,\n keepalive_expiry: float | None = None,\n network_backend: NetworkBackend | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n proxy_ssl_context: ssl.SSLContext | None = None,\n ) -> None:\n self._connection = HTTPConnection(\n origin=proxy_origin,\n keepalive_expiry=keepalive_expiry,\n network_backend=network_backend,\n socket_options=socket_options,\n ssl_context=proxy_ssl_context,\n )\n self._proxy_origin = proxy_origin\n self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")\n self._remote_origin = remote_origin\n\n def handle_request(self, request: Request) -> Response:\n headers = merge_headers(self._proxy_headers, request.headers)\n url = URL(\n scheme=self._proxy_origin.scheme,\n host=self._proxy_origin.host,\n port=self._proxy_origin.port,\n target=bytes(request.url),\n )\n proxy_request = Request(\n method=request.method,\n url=url,\n headers=headers,\n content=request.stream,\n extensions=request.extensions,\n )\n return self._connection.handle_request(proxy_request)\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._remote_origin\n\n def close(self) -> None:\n self._connection.close()\n\n def info(self) -> str:\n return self._connection.info()\n\n def is_available(self) -> bool:\n return self._connection.is_available()\n\n def has_expired(self) -> bool:\n return self._connection.has_expired()\n\n def is_idle(self) -> bool:\n return self._connection.is_idle()\n\n def is_closed(self) -> bool:\n return self._connection.is_closed()\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{self.info()}]>"\n\n\nclass TunnelHTTPConnection(ConnectionInterface):\n def __init__(\n self,\n proxy_origin: Origin,\n remote_origin: Origin,\n ssl_context: ssl.SSLContext | None = None,\n proxy_ssl_context: ssl.SSLContext | None = None,\n proxy_headers: typing.Sequence[tuple[bytes, bytes]] | None = None,\n keepalive_expiry: float | None = None,\n http1: bool = True,\n http2: bool = False,\n network_backend: NetworkBackend | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> None:\n self._connection: ConnectionInterface = HTTPConnection(\n origin=proxy_origin,\n keepalive_expiry=keepalive_expiry,\n network_backend=network_backend,\n socket_options=socket_options,\n ssl_context=proxy_ssl_context,\n )\n self._proxy_origin = proxy_origin\n self._remote_origin = remote_origin\n self._ssl_context = ssl_context\n self._proxy_ssl_context = proxy_ssl_context\n self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")\n self._keepalive_expiry = keepalive_expiry\n self._http1 = http1\n self._http2 = http2\n self._connect_lock = Lock()\n self._connected = False\n\n def handle_request(self, request: Request) -> Response:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("connect", None)\n\n with self._connect_lock:\n if not self._connected:\n target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port)\n\n connect_url = URL(\n scheme=self._proxy_origin.scheme,\n host=self._proxy_origin.host,\n port=self._proxy_origin.port,\n target=target,\n )\n connect_headers = merge_headers(\n [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers\n )\n connect_request = Request(\n method=b"CONNECT",\n url=connect_url,\n headers=connect_headers,\n extensions=request.extensions,\n )\n connect_response = self._connection.handle_request(\n connect_request\n )\n\n if connect_response.status < 200 or connect_response.status > 299:\n reason_bytes = connect_response.extensions.get("reason_phrase", b"")\n reason_str = reason_bytes.decode("ascii", errors="ignore")\n msg = "%d %s" % (connect_response.status, reason_str)\n self._connection.close()\n raise ProxyError(msg)\n\n stream = connect_response.extensions["network_stream"]\n\n # Upgrade the stream to SSL\n ssl_context = (\n default_ssl_context()\n if self._ssl_context is None\n else self._ssl_context\n )\n alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]\n ssl_context.set_alpn_protocols(alpn_protocols)\n\n kwargs = {\n "ssl_context": ssl_context,\n "server_hostname": self._remote_origin.host.decode("ascii"),\n "timeout": timeout,\n }\n with Trace("start_tls", logger, request, kwargs) as trace:\n stream = stream.start_tls(**kwargs)\n trace.return_value = stream\n\n # Determine if we should be using HTTP/1.1 or HTTP/2\n ssl_object = stream.get_extra_info("ssl_object")\n http2_negotiated = (\n ssl_object is not None\n and ssl_object.selected_alpn_protocol() == "h2"\n )\n\n # Create the HTTP/1.1 or HTTP/2 connection\n if http2_negotiated or (self._http2 and not self._http1):\n from .http2 import HTTP2Connection\n\n self._connection = HTTP2Connection(\n origin=self._remote_origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n else:\n self._connection = HTTP11Connection(\n origin=self._remote_origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n\n self._connected = True\n return self._connection.handle_request(request)\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._remote_origin\n\n def close(self) -> None:\n self._connection.close()\n\n def info(self) -> str:\n return self._connection.info()\n\n def is_available(self) -> bool:\n return self._connection.is_available()\n\n def has_expired(self) -> bool:\n return self._connection.has_expired()\n\n def is_idle(self) -> bool:\n return self._connection.is_idle()\n\n def is_closed(self) -> bool:\n return self._connection.is_closed()\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{self.info()}]>"\n
|
.venv\Lib\site-packages\httpcore\_sync\http_proxy.py
|
http_proxy.py
|
Python
| 14,463 | 0.95 | 0.138965 | 0.009346 |
python-kit
| 778 |
2025-02-17T20:06:45.037585
|
Apache-2.0
| false |
05c4617ff7607d361ea7d7d7fdd75e6b
|
from __future__ import annotations\n\nimport contextlib\nimport typing\n\nfrom .._models import (\n URL,\n Extensions,\n HeaderTypes,\n Origin,\n Request,\n Response,\n enforce_bytes,\n enforce_headers,\n enforce_url,\n include_request_headers,\n)\n\n\nclass RequestInterface:\n def request(\n self,\n method: bytes | str,\n url: URL | bytes | str,\n *,\n headers: HeaderTypes = None,\n content: bytes | typing.Iterator[bytes] | None = None,\n extensions: Extensions | None = None,\n ) -> Response:\n # Strict type checking on our parameters.\n method = enforce_bytes(method, name="method")\n url = enforce_url(url, name="url")\n headers = enforce_headers(headers, name="headers")\n\n # Include Host header, and optionally Content-Length or Transfer-Encoding.\n headers = include_request_headers(headers, url=url, content=content)\n\n request = Request(\n method=method,\n url=url,\n headers=headers,\n content=content,\n extensions=extensions,\n )\n response = self.handle_request(request)\n try:\n response.read()\n finally:\n response.close()\n return response\n\n @contextlib.contextmanager\n def stream(\n self,\n method: bytes | str,\n url: URL | bytes | str,\n *,\n headers: HeaderTypes = None,\n content: bytes | typing.Iterator[bytes] | None = None,\n extensions: Extensions | None = None,\n ) -> typing.Iterator[Response]:\n # Strict type checking on our parameters.\n method = enforce_bytes(method, name="method")\n url = enforce_url(url, name="url")\n headers = enforce_headers(headers, name="headers")\n\n # Include Host header, and optionally Content-Length or Transfer-Encoding.\n headers = include_request_headers(headers, url=url, content=content)\n\n request = Request(\n method=method,\n url=url,\n headers=headers,\n content=content,\n extensions=extensions,\n )\n response = self.handle_request(request)\n try:\n yield response\n finally:\n response.close()\n\n def handle_request(self, request: Request) -> Response:\n raise NotImplementedError() # pragma: nocover\n\n\nclass ConnectionInterface(RequestInterface):\n def close(self) -> None:\n raise NotImplementedError() # pragma: nocover\n\n def info(self) -> str:\n raise NotImplementedError() # pragma: nocover\n\n def can_handle_request(self, origin: Origin) -> bool:\n raise NotImplementedError() # pragma: nocover\n\n def is_available(self) -> bool:\n """\n Return `True` if the connection is currently able to accept an\n outgoing request.\n\n An HTTP/1.1 connection will only be available if it is currently idle.\n\n An HTTP/2 connection will be available so long as the stream ID space is\n not yet exhausted, and the connection is not in an error state.\n\n While the connection is being established we may not yet know if it is going\n to result in an HTTP/1.1 or HTTP/2 connection. The connection should be\n treated as being available, but might ultimately raise `NewConnectionRequired`\n required exceptions if multiple requests are attempted over a connection\n that ends up being established as HTTP/1.1.\n """\n raise NotImplementedError() # pragma: nocover\n\n def has_expired(self) -> bool:\n """\n Return `True` if the connection is in a state where it should be closed.\n\n This either means that the connection is idle and it has passed the\n expiry time on its keep-alive, or that server has sent an EOF.\n """\n raise NotImplementedError() # pragma: nocover\n\n def is_idle(self) -> bool:\n """\n Return `True` if the connection is currently idle.\n """\n raise NotImplementedError() # pragma: nocover\n\n def is_closed(self) -> bool:\n """\n Return `True` if the connection has been closed.\n\n Used when a response is closed to determine if the connection may be\n returned to the connection pool or not.\n """\n raise NotImplementedError() # pragma: nocover\n
|
.venv\Lib\site-packages\httpcore\_sync\interfaces.py
|
interfaces.py
|
Python
| 4,344 | 0.95 | 0.160584 | 0.052632 |
python-kit
| 173 |
2024-10-14T02:26:43.414093
|
Apache-2.0
| false |
0bec497f2de09e467262ca615d358374
|
from __future__ import annotations\n\nimport logging\nimport ssl\n\nimport socksio\n\nfrom .._backends.sync import SyncBackend\nfrom .._backends.base import NetworkBackend, NetworkStream\nfrom .._exceptions import ConnectionNotAvailable, ProxyError\nfrom .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url\nfrom .._ssl import default_ssl_context\nfrom .._synchronization import Lock\nfrom .._trace import Trace\nfrom .connection_pool import ConnectionPool\nfrom .http11 import HTTP11Connection\nfrom .interfaces import ConnectionInterface\n\nlogger = logging.getLogger("httpcore.socks")\n\n\nAUTH_METHODS = {\n b"\x00": "NO AUTHENTICATION REQUIRED",\n b"\x01": "GSSAPI",\n b"\x02": "USERNAME/PASSWORD",\n b"\xff": "NO ACCEPTABLE METHODS",\n}\n\nREPLY_CODES = {\n b"\x00": "Succeeded",\n b"\x01": "General SOCKS server failure",\n b"\x02": "Connection not allowed by ruleset",\n b"\x03": "Network unreachable",\n b"\x04": "Host unreachable",\n b"\x05": "Connection refused",\n b"\x06": "TTL expired",\n b"\x07": "Command not supported",\n b"\x08": "Address type not supported",\n}\n\n\ndef _init_socks5_connection(\n stream: NetworkStream,\n *,\n host: bytes,\n port: int,\n auth: tuple[bytes, bytes] | None = None,\n) -> None:\n conn = socksio.socks5.SOCKS5Connection()\n\n # Auth method request\n auth_method = (\n socksio.socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED\n if auth is None\n else socksio.socks5.SOCKS5AuthMethod.USERNAME_PASSWORD\n )\n conn.send(socksio.socks5.SOCKS5AuthMethodsRequest([auth_method]))\n outgoing_bytes = conn.data_to_send()\n stream.write(outgoing_bytes)\n\n # Auth method response\n incoming_bytes = stream.read(max_bytes=4096)\n response = conn.receive_data(incoming_bytes)\n assert isinstance(response, socksio.socks5.SOCKS5AuthReply)\n if response.method != auth_method:\n requested = AUTH_METHODS.get(auth_method, "UNKNOWN")\n responded = AUTH_METHODS.get(response.method, "UNKNOWN")\n raise ProxyError(\n f"Requested {requested} from proxy server, but got {responded}."\n )\n\n if response.method == socksio.socks5.SOCKS5AuthMethod.USERNAME_PASSWORD:\n # Username/password request\n assert auth is not None\n username, password = auth\n conn.send(socksio.socks5.SOCKS5UsernamePasswordRequest(username, password))\n outgoing_bytes = conn.data_to_send()\n stream.write(outgoing_bytes)\n\n # Username/password response\n incoming_bytes = stream.read(max_bytes=4096)\n response = conn.receive_data(incoming_bytes)\n assert isinstance(response, socksio.socks5.SOCKS5UsernamePasswordReply)\n if not response.success:\n raise ProxyError("Invalid username/password")\n\n # Connect request\n conn.send(\n socksio.socks5.SOCKS5CommandRequest.from_address(\n socksio.socks5.SOCKS5Command.CONNECT, (host, port)\n )\n )\n outgoing_bytes = conn.data_to_send()\n stream.write(outgoing_bytes)\n\n # Connect response\n incoming_bytes = stream.read(max_bytes=4096)\n response = conn.receive_data(incoming_bytes)\n assert isinstance(response, socksio.socks5.SOCKS5Reply)\n if response.reply_code != socksio.socks5.SOCKS5ReplyCode.SUCCEEDED:\n reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN")\n raise ProxyError(f"Proxy Server could not connect: {reply_code}.")\n\n\nclass SOCKSProxy(ConnectionPool): # pragma: nocover\n """\n A connection pool that sends requests via an HTTP proxy.\n """\n\n def __init__(\n self,\n proxy_url: URL | bytes | str,\n proxy_auth: tuple[bytes | str, bytes | str] | None = None,\n ssl_context: ssl.SSLContext | None = None,\n max_connections: int | None = 10,\n max_keepalive_connections: int | None = None,\n keepalive_expiry: float | None = None,\n http1: bool = True,\n http2: bool = False,\n retries: int = 0,\n network_backend: NetworkBackend | None = None,\n ) -> None:\n """\n A connection pool for making HTTP requests.\n\n Parameters:\n proxy_url: The URL to use when connecting to the proxy server.\n For example `"http://127.0.0.1:8080/"`.\n ssl_context: An SSL context to use for verifying connections.\n If not specified, the default `httpcore.default_ssl_context()`\n will be used.\n max_connections: The maximum number of concurrent HTTP connections that\n the pool should allow. Any attempt to send a request on a pool that\n would exceed this amount will block until a connection is available.\n max_keepalive_connections: The maximum number of idle HTTP connections\n that will be maintained in the pool.\n keepalive_expiry: The duration in seconds that an idle HTTP connection\n may be maintained for before being expired from the pool.\n http1: A boolean indicating if HTTP/1.1 requests should be supported\n by the connection pool. Defaults to True.\n http2: A boolean indicating if HTTP/2 requests should be supported by\n the connection pool. Defaults to False.\n retries: The maximum number of retries when trying to establish\n a connection.\n local_address: Local address to connect from. Can also be used to\n connect using a particular address family. Using\n `local_address="0.0.0.0"` will connect using an `AF_INET` address\n (IPv4), while using `local_address="::"` will connect using an\n `AF_INET6` address (IPv6).\n uds: Path to a Unix Domain Socket to use instead of TCP sockets.\n network_backend: A backend instance to use for handling network I/O.\n """\n super().__init__(\n ssl_context=ssl_context,\n max_connections=max_connections,\n max_keepalive_connections=max_keepalive_connections,\n keepalive_expiry=keepalive_expiry,\n http1=http1,\n http2=http2,\n network_backend=network_backend,\n retries=retries,\n )\n self._ssl_context = ssl_context\n self._proxy_url = enforce_url(proxy_url, name="proxy_url")\n if proxy_auth is not None:\n username, password = proxy_auth\n username_bytes = enforce_bytes(username, name="proxy_auth")\n password_bytes = enforce_bytes(password, name="proxy_auth")\n self._proxy_auth: tuple[bytes, bytes] | None = (\n username_bytes,\n password_bytes,\n )\n else:\n self._proxy_auth = None\n\n def create_connection(self, origin: Origin) -> ConnectionInterface:\n return Socks5Connection(\n proxy_origin=self._proxy_url.origin,\n remote_origin=origin,\n proxy_auth=self._proxy_auth,\n ssl_context=self._ssl_context,\n keepalive_expiry=self._keepalive_expiry,\n http1=self._http1,\n http2=self._http2,\n network_backend=self._network_backend,\n )\n\n\nclass Socks5Connection(ConnectionInterface):\n def __init__(\n self,\n proxy_origin: Origin,\n remote_origin: Origin,\n proxy_auth: tuple[bytes, bytes] | None = None,\n ssl_context: ssl.SSLContext | None = None,\n keepalive_expiry: float | None = None,\n http1: bool = True,\n http2: bool = False,\n network_backend: NetworkBackend | None = None,\n ) -> None:\n self._proxy_origin = proxy_origin\n self._remote_origin = remote_origin\n self._proxy_auth = proxy_auth\n self._ssl_context = ssl_context\n self._keepalive_expiry = keepalive_expiry\n self._http1 = http1\n self._http2 = http2\n\n self._network_backend: NetworkBackend = (\n SyncBackend() if network_backend is None else network_backend\n )\n self._connect_lock = Lock()\n self._connection: ConnectionInterface | None = None\n self._connect_failed = False\n\n def handle_request(self, request: Request) -> Response:\n timeouts = request.extensions.get("timeout", {})\n sni_hostname = request.extensions.get("sni_hostname", None)\n timeout = timeouts.get("connect", None)\n\n with self._connect_lock:\n if self._connection is None:\n try:\n # Connect to the proxy\n kwargs = {\n "host": self._proxy_origin.host.decode("ascii"),\n "port": self._proxy_origin.port,\n "timeout": timeout,\n }\n with Trace("connect_tcp", logger, request, kwargs) as trace:\n stream = self._network_backend.connect_tcp(**kwargs)\n trace.return_value = stream\n\n # Connect to the remote host using socks5\n kwargs = {\n "stream": stream,\n "host": self._remote_origin.host.decode("ascii"),\n "port": self._remote_origin.port,\n "auth": self._proxy_auth,\n }\n with Trace(\n "setup_socks5_connection", logger, request, kwargs\n ) as trace:\n _init_socks5_connection(**kwargs)\n trace.return_value = stream\n\n # Upgrade the stream to SSL\n if self._remote_origin.scheme == b"https":\n ssl_context = (\n default_ssl_context()\n if self._ssl_context is None\n else self._ssl_context\n )\n alpn_protocols = (\n ["http/1.1", "h2"] if self._http2 else ["http/1.1"]\n )\n ssl_context.set_alpn_protocols(alpn_protocols)\n\n kwargs = {\n "ssl_context": ssl_context,\n "server_hostname": sni_hostname\n or self._remote_origin.host.decode("ascii"),\n "timeout": timeout,\n }\n with Trace("start_tls", logger, request, kwargs) as trace:\n stream = stream.start_tls(**kwargs)\n trace.return_value = stream\n\n # Determine if we should be using HTTP/1.1 or HTTP/2\n ssl_object = stream.get_extra_info("ssl_object")\n http2_negotiated = (\n ssl_object is not None\n and ssl_object.selected_alpn_protocol() == "h2"\n )\n\n # Create the HTTP/1.1 or HTTP/2 connection\n if http2_negotiated or (\n self._http2 and not self._http1\n ): # pragma: nocover\n from .http2 import HTTP2Connection\n\n self._connection = HTTP2Connection(\n origin=self._remote_origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n else:\n self._connection = HTTP11Connection(\n origin=self._remote_origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n except Exception as exc:\n self._connect_failed = True\n raise exc\n elif not self._connection.is_available(): # pragma: nocover\n raise ConnectionNotAvailable()\n\n return self._connection.handle_request(request)\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._remote_origin\n\n def close(self) -> None:\n if self._connection is not None:\n self._connection.close()\n\n def is_available(self) -> bool:\n if self._connection is None: # pragma: nocover\n # If HTTP/2 support is enabled, and the resulting connection could\n # end up as HTTP/2 then we should indicate the connection as being\n # available to service multiple requests.\n return (\n self._http2\n and (self._remote_origin.scheme == b"https" or not self._http1)\n and not self._connect_failed\n )\n return self._connection.is_available()\n\n def has_expired(self) -> bool:\n if self._connection is None: # pragma: nocover\n return self._connect_failed\n return self._connection.has_expired()\n\n def is_idle(self) -> bool:\n if self._connection is None: # pragma: nocover\n return self._connect_failed\n return self._connection.is_idle()\n\n def is_closed(self) -> bool:\n if self._connection is None: # pragma: nocover\n return self._connect_failed\n return self._connection.is_closed()\n\n def info(self) -> str:\n if self._connection is None: # pragma: nocover\n return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"\n return self._connection.info()\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{self.info()}]>"\n
|
.venv\Lib\site-packages\httpcore\_sync\socks_proxy.py
|
socks_proxy.py
|
Python
| 13,614 | 0.95 | 0.1261 | 0.049834 |
vue-tools
| 113 |
2024-09-16T18:38:53.925499
|
BSD-3-Clause
| false |
31236ddd5ae1611837d8403a28743ac5
|
from .connection import HTTPConnection\nfrom .connection_pool import ConnectionPool\nfrom .http11 import HTTP11Connection\nfrom .http_proxy import HTTPProxy\nfrom .interfaces import ConnectionInterface\n\ntry:\n from .http2 import HTTP2Connection\nexcept ImportError: # pragma: nocover\n\n class HTTP2Connection: # type: ignore\n def __init__(self, *args, **kwargs) -> None: # type: ignore\n raise RuntimeError(\n "Attempted to use http2 support, but the `h2` package is not "\n "installed. Use 'pip install httpcore[http2]'."\n )\n\n\ntry:\n from .socks_proxy import SOCKSProxy\nexcept ImportError: # pragma: nocover\n\n class SOCKSProxy: # type: ignore\n def __init__(self, *args, **kwargs) -> None: # type: ignore\n raise RuntimeError(\n "Attempted to use SOCKS support, but the `socksio` package is not "\n "installed. Use 'pip install httpcore[socks]'."\n )\n\n\n__all__ = [\n "HTTPConnection",\n "ConnectionPool",\n "HTTPProxy",\n "HTTP11Connection",\n "HTTP2Connection",\n "ConnectionInterface",\n "SOCKSProxy",\n]\n
|
.venv\Lib\site-packages\httpcore\_sync\__init__.py
|
__init__.py
|
Python
| 1,141 | 0.95 | 0.153846 | 0 |
python-kit
| 519 |
2024-09-15T02:28:00.393637
|
BSD-3-Clause
| false |
523f60d79625fc83c75633d867fe180d
|
\n\n
|
.venv\Lib\site-packages\httpcore\_sync\__pycache__\connection.cpython-313.pyc
|
connection.cpython-313.pyc
|
Other
| 10,652 | 0.8 | 0 | 0 |
react-lib
| 598 |
2024-09-02T13:06:24.815107
|
MIT
| false |
fe94883697f326b7aab2d8924e0da4e5
|
\n\n
|
.venv\Lib\site-packages\httpcore\_sync\__pycache__\connection_pool.cpython-313.pyc
|
connection_pool.cpython-313.pyc
|
Other
| 18,766 | 0.8 | 0.040724 | 0 |
react-lib
| 863 |
2024-02-19T07:44:31.946545
|
BSD-3-Clause
| false |
7ae2c44b45478111e870750d1c0be434
|
\n\n
|
.venv\Lib\site-packages\httpcore\_sync\__pycache__\http11.cpython-313.pyc
|
http11.cpython-313.pyc
|
Other
| 18,127 | 0.8 | 0 | 0 |
node-utils
| 703 |
2025-02-15T06:48:34.146241
|
BSD-3-Clause
| false |
5ee48ba2d800dbd64ec79cb0ab698e94
|
\n\n
|
.venv\Lib\site-packages\httpcore\_sync\__pycache__\http2.cpython-313.pyc
|
http2.cpython-313.pyc
|
Other
| 27,828 | 0.95 | 0.023438 | 0 |
vue-tools
| 774 |
2023-10-24T15:40:13.251664
|
MIT
| false |
b02e5a70d34fdf332903b6448c1b0515
|
\n\n
|
.venv\Lib\site-packages\httpcore\_sync\__pycache__\http_proxy.cpython-313.pyc
|
http_proxy.cpython-313.pyc
|
Other
| 17,088 | 0.8 | 0.067073 | 0 |
python-kit
| 506 |
2025-01-02T08:45:16.826519
|
Apache-2.0
| false |
059b05696e895c2e89ea26cb485feb2e
|
\n\n
|
.venv\Lib\site-packages\httpcore\_sync\__pycache__\interfaces.cpython-313.pyc
|
interfaces.cpython-313.pyc
|
Other
| 5,168 | 0.95 | 0.119403 | 0.016393 |
awesome-app
| 840 |
2024-11-06T08:54:35.981910
|
BSD-3-Clause
| false |
df919ee8ee99785ebd31089a28c40298
|
\n\n
|
.venv\Lib\site-packages\httpcore\_sync\__pycache__\socks_proxy.cpython-313.pyc
|
socks_proxy.cpython-313.pyc
|
Other
| 15,700 | 0.8 | 0.040698 | 0 |
python-kit
| 30 |
2023-11-21T15:22:34.784347
|
BSD-3-Clause
| false |
1f2dd62aa74234cd833cc9b53879193a
|
\n\n
|
.venv\Lib\site-packages\httpcore\_sync\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 1,655 | 0.7 | 0 | 0 |
python-kit
| 820 |
2024-06-03T12:18:05.224180
|
BSD-3-Clause
| false |
83f6edc865f2800144c3ee3d6acf5733
|
\n\n
|
.venv\Lib\site-packages\httpcore\__pycache__\_api.cpython-313.pyc
|
_api.cpython-313.pyc
|
Other
| 3,612 | 0.95 | 0.043478 | 0 |
awesome-app
| 202 |
2024-03-27T20:19:22.390488
|
GPL-3.0
| false |
1e9fb60de1f7a05069d499bf747a380c
|
\n\n
|
.venv\Lib\site-packages\httpcore\__pycache__\_exceptions.cpython-313.pyc
|
_exceptions.cpython-313.pyc
|
Other
| 3,501 | 0.8 | 0 | 0 |
python-kit
| 875 |
2023-08-03T07:15:21.472295
|
Apache-2.0
| false |
49e35a7751d6931ea5a93c1ece327c0e
|
\n\n
|
.venv\Lib\site-packages\httpcore\__pycache__\_models.cpython-313.pyc
|
_models.cpython-313.pyc
|
Other
| 23,286 | 0.95 | 0.033333 | 0.014286 |
vue-tools
| 77 |
2023-12-20T13:06:23.532783
|
Apache-2.0
| false |
067618d7ff83f2a2fbccb5bd7fb94e40
|
\n\n
|
.venv\Lib\site-packages\httpcore\__pycache__\_ssl.cpython-313.pyc
|
_ssl.cpython-313.pyc
|
Other
| 610 | 0.7 | 0 | 0 |
awesome-app
| 479 |
2025-05-19T05:28:10.074724
|
BSD-3-Clause
| false |
27ddfdddeb44f3c5598880ad115a64ea
|
\n\n
|
.venv\Lib\site-packages\httpcore\__pycache__\_synchronization.cpython-313.pyc
|
_synchronization.cpython-313.pyc
|
Other
| 14,649 | 0.95 | 0.025316 | 0 |
node-utils
| 483 |
2024-10-04T09:13:30.288529
|
GPL-3.0
| false |
1beabb046a401bb0165a4f6438c04a06
|
\n\n
|
.venv\Lib\site-packages\httpcore\__pycache__\_trace.cpython-313.pyc
|
_trace.cpython-313.pyc
|
Other
| 5,869 | 0.95 | 0.065574 | 0 |
python-kit
| 437 |
2024-12-12T05:38:00.761702
|
BSD-3-Clause
| false |
e83ed325922339bd7e1007de7a65b82d
|
\n\n
|
.venv\Lib\site-packages\httpcore\__pycache__\_utils.cpython-313.pyc
|
_utils.cpython-313.pyc
|
Other
| 1,284 | 0.8 | 0 | 0 |
react-lib
| 83 |
2024-01-06T14:34:11.429635
|
MIT
| false |
e36b3cebd74e1318d1d4b313ba4bdfe4
|
\n\n
|
.venv\Lib\site-packages\httpcore\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 3,259 | 0.8 | 0 | 0 |
vue-tools
| 963 |
2025-03-18T03:44:00.612711
|
Apache-2.0
| false |
014163200a9e8de772dba84db7f0101b
|
pip\n
|
.venv\Lib\site-packages\httpcore-1.0.9.dist-info\INSTALLER
|
INSTALLER
|
Other
| 4 | 0.5 | 0 | 0 |
vue-tools
| 873 |
2023-08-09T12:45:33.639996
|
GPL-3.0
| false |
365c9bfeb7d89244f2ce01c1de44cb85
|
Metadata-Version: 2.4\nName: httpcore\nVersion: 1.0.9\nSummary: A minimal low-level HTTP client.\nProject-URL: Documentation, https://www.encode.io/httpcore\nProject-URL: Homepage, https://www.encode.io/httpcore/\nProject-URL: Source, https://github.com/encode/httpcore\nAuthor-email: Tom Christie <tom@tomchristie.com>\nLicense-Expression: BSD-3-Clause\nLicense-File: LICENSE.md\nClassifier: Development Status :: 3 - Alpha\nClassifier: Environment :: Web Environment\nClassifier: Framework :: AsyncIO\nClassifier: Framework :: Trio\nClassifier: Intended Audience :: Developers\nClassifier: License :: OSI Approved :: BSD License\nClassifier: Operating System :: OS Independent\nClassifier: Programming Language :: Python :: 3\nClassifier: Programming Language :: Python :: 3 :: Only\nClassifier: Programming Language :: Python :: 3.8\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: 3.11\nClassifier: Programming Language :: Python :: 3.12\nClassifier: Topic :: Internet :: WWW/HTTP\nRequires-Python: >=3.8\nRequires-Dist: certifi\nRequires-Dist: h11>=0.16\nProvides-Extra: asyncio\nRequires-Dist: anyio<5.0,>=4.0; extra == 'asyncio'\nProvides-Extra: http2\nRequires-Dist: h2<5,>=3; extra == 'http2'\nProvides-Extra: socks\nRequires-Dist: socksio==1.*; extra == 'socks'\nProvides-Extra: trio\nRequires-Dist: trio<1.0,>=0.22.0; extra == 'trio'\nDescription-Content-Type: text/markdown\n\n# HTTP Core\n\n[](https://github.com/encode/httpcore/actions)\n[](https://pypi.org/project/httpcore/)\n\n> *Do one thing, and do it well.*\n\nThe HTTP Core package provides a minimal low-level HTTP client, which does\none thing only. Sending HTTP requests.\n\nIt does not provide any high level model abstractions over the API,\ndoes not handle redirects, multipart uploads, building authentication headers,\ntransparent HTTP caching, URL parsing, session cookie handling,\ncontent or charset decoding, handling JSON, environment based configuration\ndefaults, or any of that Jazz.\n\nSome things HTTP Core does do:\n\n* Sending HTTP requests.\n* Thread-safe / task-safe connection pooling.\n* HTTP(S) proxy & SOCKS proxy support.\n* Supports HTTP/1.1 and HTTP/2.\n* Provides both sync and async interfaces.\n* Async backend support for `asyncio` and `trio`.\n\n## Requirements\n\nPython 3.8+\n\n## Installation\n\nFor HTTP/1.1 only support, install with:\n\n```shell\n$ pip install httpcore\n```\n\nThere are also a number of optional extras available...\n\n```shell\n$ pip install httpcore['asyncio,trio,http2,socks']\n```\n\n## Sending requests\n\nSend an HTTP request:\n\n```python\nimport httpcore\n\nresponse = httpcore.request("GET", "https://www.example.com/")\n\nprint(response)\n# <Response [200]>\nprint(response.status)\n# 200\nprint(response.headers)\n# [(b'Accept-Ranges', b'bytes'), (b'Age', b'557328'), (b'Cache-Control', b'max-age=604800'), ...]\nprint(response.content)\n# b'<!doctype html>\n<html>\n<head>\n<title>Example Domain</title>\n\n<meta charset="utf-8"/>\n ...'\n```\n\nThe top-level `httpcore.request()` function is provided for convenience. In practice whenever you're working with `httpcore` you'll want to use the connection pooling functionality that it provides.\n\n```python\nimport httpcore\n\nhttp = httpcore.ConnectionPool()\nresponse = http.request("GET", "https://www.example.com/")\n```\n\nOnce you're ready to get going, [head over to the documentation](https://www.encode.io/httpcore/).\n\n## Motivation\n\nYou *probably* don't want to be using HTTP Core directly. It might make sense if\nyou're writing something like a proxy service in Python, and you just want\nsomething at the lowest possible level, but more typically you'll want to use\na higher level client library, such as `httpx`.\n\nThe motivation for `httpcore` is:\n\n* To provide a reusable low-level client library, that other packages can then build on top of.\n* To provide a *really clear interface split* between the networking code and client logic,\n so that each is easier to understand and reason about in isolation.\n\n## Dependencies\n\nThe `httpcore` package has the following dependencies...\n\n* `h11`\n* `certifi`\n\nAnd the following optional extras...\n\n* `anyio` - Required by `pip install httpcore['asyncio']`.\n* `trio` - Required by `pip install httpcore['trio']`.\n* `h2` - Required by `pip install httpcore['http2']`.\n* `socksio` - Required by `pip install httpcore['socks']`.\n\n## Versioning\n\nWe use [SEMVER for our versioning policy](https://semver.org/).\n\nFor changes between package versions please see our [project changelog](CHANGELOG.md).\n\nWe recommend pinning your requirements either the most current major version, or a more specific version range:\n\n```python\npip install 'httpcore==1.*'\n```\n# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).\n\n## Version 1.0.9 (April 24th, 2025)\n\n- Resolve https://github.com/advisories/GHSA-vqfr-h8mv-ghfj with h11 dependency update. (#1008)\n\n## Version 1.0.8 (April 11th, 2025)\n\n- Fix `AttributeError` when importing on Python 3.14. (#1005)\n\n## Version 1.0.7 (November 15th, 2024)\n\n- Support `proxy=…` configuration on `ConnectionPool()`. (#974)\n\n## Version 1.0.6 (October 1st, 2024)\n\n- Relax `trio` dependency pinning. (#956)\n- Handle `trio` raising `NotImplementedError` on unsupported platforms. (#955)\n- Handle mapping `ssl.SSLError` to `httpcore.ConnectError`. (#918)\n\n## 1.0.5 (March 27th, 2024)\n\n- Handle `EndOfStream` exception for anyio backend. (#899)\n- Allow trio `0.25.*` series in package dependancies. (#903)\n\n## 1.0.4 (February 21st, 2024)\n\n- Add `target` request extension. (#888)\n- Fix support for connection `Upgrade` and `CONNECT` when some data in the stream has been read. (#882)\n\n## 1.0.3 (February 13th, 2024)\n\n- Fix support for async cancellations. (#880)\n- Fix trace extension when used with socks proxy. (#849)\n- Fix SSL context for connections using the "wss" scheme (#869)\n\n## 1.0.2 (November 10th, 2023)\n\n- Fix `float("inf")` timeouts in `Event.wait` function. (#846)\n\n## 1.0.1 (November 3rd, 2023)\n\n- Fix pool timeout to account for the total time spent retrying. (#823)\n- Raise a neater RuntimeError when the correct async deps are not installed. (#826)\n- Add support for synchronous TLS-in-TLS streams. (#840)\n\n## 1.0.0 (October 6th, 2023)\n\nFrom version 1.0 our async support is now optional, as the package has minimal dependencies by default.\n\nFor async support use either `pip install 'httpcore[asyncio]'` or `pip install 'httpcore[trio]'`.\n\nThe project versioning policy is now explicitly governed by SEMVER. See https://semver.org/.\n\n- Async support becomes fully optional. (#809)\n- Add support for Python 3.12. (#807)\n\n## 0.18.0 (September 8th, 2023)\n\n- Add support for HTTPS proxies. (#745, #786)\n- Drop Python 3.7 support. (#727)\n- Handle `sni_hostname` extension with SOCKS proxy. (#774)\n- Handle HTTP/1.1 half-closed connections gracefully. (#641)\n- Change the type of `Extensions` from `Mapping[Str, Any]` to `MutableMapping[Str, Any]`. (#762)\n\n## 0.17.3 (July 5th, 2023)\n\n- Support async cancellations, ensuring that the connection pool is left in a clean state when cancellations occur. (#726)\n- The networking backend interface has [been added to the public API](https://www.encode.io/httpcore/network-backends). Some classes which were previously private implementation detail are now part of the top-level public API. (#699)\n- Graceful handling of HTTP/2 GoAway frames, with requests being transparently retried on a new connection. (#730)\n- Add exceptions when a synchronous `trace callback` is passed to an asynchronous request or an asynchronous `trace callback` is passed to a synchronous request. (#717)\n- Drop Python 3.7 support. (#727)\n\n## 0.17.2 (May 23th, 2023)\n\n- Add `socket_options` argument to `ConnectionPool` and `HTTProxy` classes. (#668)\n- Improve logging with per-module logger names. (#690)\n- Add `sni_hostname` request extension. (#696)\n- Resolve race condition during import of `anyio` package. (#692)\n- Enable TCP_NODELAY for all synchronous sockets. (#651)\n\n## 0.17.1 (May 17th, 2023)\n\n- If 'retries' is set, then allow retries if an SSL handshake error occurs. (#669)\n- Improve correctness of tracebacks on network exceptions, by raising properly chained exceptions. (#678)\n- Prevent connection-hanging behaviour when HTTP/2 connections are closed by a server-sent 'GoAway' frame. (#679)\n- Fix edge-case exception when removing requests from the connection pool. (#680)\n- Fix pool timeout edge-case. (#688)\n\n## 0.17.0 (March 16th, 2023)\n\n- Add DEBUG level logging. (#648)\n- Respect HTTP/2 max concurrent streams when settings updates are sent by server. (#652)\n- Increase the allowable HTTP header size to 100kB. (#647)\n- Add `retries` option to SOCKS proxy classes. (#643)\n\n## 0.16.3 (December 20th, 2022)\n\n- Allow `ws` and `wss` schemes. Allows us to properly support websocket upgrade connections. (#625)\n- Forwarding HTTP proxies use a connection-per-remote-host. Required by some proxy implementations. (#637)\n- Don't raise `RuntimeError` when closing a connection pool with active connections. Removes some error cases when cancellations are used. (#631)\n- Lazy import `anyio`, so that it's no longer a hard dependancy, and isn't imported if unused. (#639)\n\n## 0.16.2 (November 25th, 2022)\n\n- Revert 'Fix async cancellation behaviour', which introduced race conditions. (#627)\n- Raise `RuntimeError` if attempting to us UNIX domain sockets on Windows. (#619)\n\n## 0.16.1 (November 17th, 2022)\n\n- Fix HTTP/1.1 interim informational responses, such as "100 Continue". (#605)\n\n## 0.16.0 (October 11th, 2022)\n\n- Support HTTP/1.1 informational responses. (#581)\n- Fix async cancellation behaviour. (#580)\n- Support `h11` 0.14. (#579)\n\n## 0.15.0 (May 17th, 2022)\n\n- Drop Python 3.6 support (#535)\n- Ensure HTTP proxy CONNECT requests include `timeout` configuration. (#506)\n- Switch to explicit `typing.Optional` for type hints. (#513)\n- For `trio` map OSError exceptions to `ConnectError`. (#543)\n\n## 0.14.7 (February 4th, 2022)\n\n- Requests which raise a PoolTimeout need to be removed from the pool queue. (#502)\n- Fix AttributeError that happened when Socks5Connection were terminated. (#501)\n\n## 0.14.6 (February 1st, 2022)\n\n- Fix SOCKS support for `http://` URLs. (#492)\n- Resolve race condition around exceptions during streaming a response. (#491)\n\n## 0.14.5 (January 18th, 2022)\n\n- SOCKS proxy support. (#478)\n- Add proxy_auth argument to HTTPProxy. (#481)\n- Improve error message on 'RemoteProtocolError' exception when server disconnects without sending a response. (#479)\n\n## 0.14.4 (January 5th, 2022)\n\n- Support HTTP/2 on HTTPS tunnelling proxies. (#468)\n- Fix proxy headers missing on HTTP forwarding. (#456)\n- Only instantiate SSL context if required. (#457)\n- More robust HTTP/2 handling. (#253, #439, #440, #441)\n\n## 0.14.3 (November 17th, 2021)\n\n- Fix race condition when removing closed connections from the pool. (#437)\n\n## 0.14.2 (November 16th, 2021)\n\n- Failed connections no longer remain in the pool. (Pull #433)\n\n## 0.14.1 (November 12th, 2021)\n\n- `max_connections` becomes optional. (Pull #429)\n- `certifi` is now included in the install dependancies. (Pull #428)\n- `h2` is now strictly optional. (Pull #428)\n\n## 0.14.0 (November 11th, 2021)\n\nThe 0.14 release is a complete reworking of `httpcore`, comprehensively addressing some underlying issues in the connection pooling, as well as substantially redesigning the API to be more user friendly.\n\nSome of the lower-level API design also makes the components more easily testable in isolation, and the package now has 100% test coverage.\n\nSee [discussion #419](https://github.com/encode/httpcore/discussions/419) for a little more background.\n\nThere's some other neat bits in there too, such as the "trace" extension, which gives a hook into inspecting the internal events that occur during the request/response cycle. This extension is needed for the HTTPX cli, in order to...\n\n* Log the point at which the connection is established, and the IP/port on which it is made.\n* Determine if the outgoing request should log as HTTP/1.1 or HTTP/2, rather than having to assume it's HTTP/2 if the --http2 flag was passed. (Which may not actually be true.)\n* Log SSL version info / certificate info.\n\nNote that `curio` support is not currently available in 0.14.0. If you're using `httpcore` with `curio` please get in touch, so we can assess if we ought to prioritize it as a feature or not.\n\n## 0.13.7 (September 13th, 2021)\n\n- Fix broken error messaging when URL scheme is missing, or a non HTTP(S) scheme is used. (Pull #403)\n\n## 0.13.6 (June 15th, 2021)\n\n### Fixed\n\n- Close sockets when read or write timeouts occur. (Pull #365)\n\n## 0.13.5 (June 14th, 2021)\n\n### Fixed\n\n- Resolved niggles with AnyIO EOF behaviours. (Pull #358, #362)\n\n## 0.13.4 (June 9th, 2021)\n\n### Added\n\n- Improved error messaging when URL scheme is missing, or a non HTTP(S) scheme is used. (Pull #354)\n\n### Fixed\n\n- Switched to `anyio` as the default backend implementation when running with `asyncio`. Resolves some awkward [TLS timeout issues](https://github.com/encode/httpx/discussions/1511).\n\n## 0.13.3 (May 6th, 2021)\n\n### Added\n\n- Support HTTP/2 prior knowledge, using `httpcore.SyncConnectionPool(http1=False)`. (Pull #333)\n\n### Fixed\n\n- Handle cases where environment does not provide `select.poll` support. (Pull #331)\n\n## 0.13.2 (April 29th, 2021)\n\n### Added\n\n- Improve error message for specific case of `RemoteProtocolError` where server disconnects without sending a response. (Pull #313)\n\n## 0.13.1 (April 28th, 2021)\n\n### Fixed\n\n- More resiliant testing for closed connections. (Pull #311)\n- Don't raise exceptions on ungraceful connection closes. (Pull #310)\n\n## 0.13.0 (April 21st, 2021)\n\nThe 0.13 release updates the core API in order to match the HTTPX Transport API,\nintroduced in HTTPX 0.18 onwards.\n\nAn example of making requests with the new interface is:\n\n```python\nwith httpcore.SyncConnectionPool() as http:\n status_code, headers, stream, extensions = http.handle_request(\n method=b'GET',\n url=(b'https', b'example.org', 443, b'/'),\n headers=[(b'host', b'example.org'), (b'user-agent', b'httpcore')]\n stream=httpcore.ByteStream(b''),\n extensions={}\n )\n body = stream.read()\n print(status_code, body)\n```\n\n### Changed\n\n- The `.request()` method is now `handle_request()`. (Pull #296)\n- The `.arequest()` method is now `.handle_async_request()`. (Pull #296)\n- The `headers` argument is no longer optional. (Pull #296)\n- The `stream` argument is no longer optional. (Pull #296)\n- The `ext` argument is now named `extensions`, and is no longer optional. (Pull #296)\n- The `"reason"` extension keyword is now named `"reason_phrase"`. (Pull #296)\n- The `"reason_phrase"` and `"http_version"` extensions now use byte strings for their values. (Pull #296)\n- The `httpcore.PlainByteStream()` class becomes `httpcore.ByteStream()`. (Pull #296)\n\n### Added\n\n- Streams now support a `.read()` interface. (Pull #296)\n\n### Fixed\n\n- Task cancellation no longer leaks connections from the connection pool. (Pull #305)\n\n## 0.12.3 (December 7th, 2020)\n\n### Fixed\n\n- Abort SSL connections on close rather than waiting for remote EOF when using `asyncio`. (Pull #167)\n- Fix exception raised in case of connect timeouts when using the `anyio` backend. (Pull #236)\n- Fix `Host` header precedence for `:authority` in HTTP/2. (Pull #241, #243)\n- Handle extra edge case when detecting for socket readability when using `asyncio`. (Pull #242, #244)\n- Fix `asyncio` SSL warning when using proxy tunneling. (Pull #249)\n\n## 0.12.2 (November 20th, 2020)\n\n### Fixed\n\n- Properly wrap connect errors on the asyncio backend. (Pull #235)\n- Fix `ImportError` occurring on Python 3.9 when using the HTTP/1.1 sync client in a multithreaded context. (Pull #237)\n\n## 0.12.1 (November 7th, 2020)\n\n### Added\n\n- Add connect retries. (Pull #221)\n\n### Fixed\n\n- Tweak detection of dropped connections, resolving an issue with open files limits on Linux. (Pull #185)\n- Avoid leaking connections when establishing an HTTP tunnel to a proxy has failed. (Pull #223)\n- Properly wrap OS errors when using `trio`. (Pull #225)\n\n## 0.12.0 (October 6th, 2020)\n\n### Changed\n\n- HTTP header casing is now preserved, rather than always sent in lowercase. (#216 and python-hyper/h11#104)\n\n### Added\n\n- Add Python 3.9 to officially supported versions.\n\n### Fixed\n\n- Gracefully handle a stdlib asyncio bug when a connection is closed while it is in a paused-for-reading state. (#201)\n\n## 0.11.1 (September 28nd, 2020)\n\n### Fixed\n\n- Add await to async semaphore release() coroutine (#197)\n- Drop incorrect curio classifier (#192)\n\n## 0.11.0 (September 22nd, 2020)\n\nThe Transport API with 0.11.0 has a couple of significant changes.\n\nFirstly we've moved changed the request interface in order to allow extensions, which will later enable us to support features\nsuch as trailing headers, HTTP/2 server push, and CONNECT/Upgrade connections.\n\nThe interface changes from:\n\n```python\ndef request(method, url, headers, stream, timeout):\n return (http_version, status_code, reason, headers, stream)\n```\n\nTo instead including an optional dictionary of extensions on the request and response:\n\n```python\ndef request(method, url, headers, stream, ext):\n return (status_code, headers, stream, ext)\n```\n\nHaving an open-ended extensions point will allow us to add later support for various optional features, that wouldn't otherwise be supported without these API changes.\n\nIn particular:\n\n* Trailing headers support.\n* HTTP/2 Server Push\n* sendfile.\n* Exposing raw connection on CONNECT, Upgrade, HTTP/2 bi-di streaming.\n* Exposing debug information out of the API, including template name, template context.\n\nCurrently extensions are limited to:\n\n* request: `timeout` - Optional. Timeout dictionary.\n* response: `http_version` - Optional. Include the HTTP version used on the response.\n* response: `reason` - Optional. Include the reason phrase used on the response. Only valid with HTTP/1.*.\n\nSee https://github.com/encode/httpx/issues/1274#issuecomment-694884553 for the history behind this.\n\nSecondly, the async version of `request` is now namespaced as `arequest`.\n\nThis allows concrete transports to support both sync and async implementations on the same class.\n\n### Added\n\n- Add curio support. (Pull #168)\n- Add anyio support, with `backend="anyio"`. (Pull #169)\n\n### Changed\n\n- Update the Transport API to use 'ext' for optional extensions. (Pull #190)\n- Update the Transport API to use `.request` and `.arequest` so implementations can support both sync and async. (Pull #189)\n\n## 0.10.2 (August 20th, 2020)\n\n### Added\n\n- Added Unix Domain Socket support. (Pull #139)\n\n### Fixed\n\n- Always include the port on proxy CONNECT requests. (Pull #154)\n- Fix `max_keepalive_connections` configuration. (Pull #153)\n- Fixes behaviour in HTTP/1.1 where server disconnects can be used to signal the end of the response body. (Pull #164)\n\n## 0.10.1 (August 7th, 2020)\n\n- Include `max_keepalive_connections` on `AsyncHTTPProxy`/`SyncHTTPProxy` classes.\n\n## 0.10.0 (August 7th, 2020)\n\nThe most notable change in the 0.10.0 release is that HTTP/2 support is now fully optional.\n\nUse either `pip install httpcore` for HTTP/1.1 support only, or `pip install httpcore[http2]` for HTTP/1.1 and HTTP/2 support.\n\n### Added\n\n- HTTP/2 support becomes optional. (Pull #121, #130)\n- Add `local_address=...` support. (Pull #100, #134)\n- Add `PlainByteStream`, `IteratorByteStream`, `AsyncIteratorByteStream`. The `AsyncByteSteam` and `SyncByteStream` classes are now pure interface classes. (#133)\n- Add `LocalProtocolError`, `RemoteProtocolError` exceptions. (Pull #129)\n- Add `UnsupportedProtocol` exception. (Pull #128)\n- Add `.get_connection_info()` method. (Pull #102, #137)\n- Add better TRACE logs. (Pull #101)\n\n### Changed\n\n- `max_keepalive` is deprecated in favour of `max_keepalive_connections`. (Pull #140)\n\n### Fixed\n\n- Improve handling of server disconnects. (Pull #112)\n\n## 0.9.1 (May 27th, 2020)\n\n### Fixed\n\n- Proper host resolution for sync case, including IPv6 support. (Pull #97)\n- Close outstanding connections when connection pool is closed. (Pull #98)\n\n## 0.9.0 (May 21th, 2020)\n\n### Changed\n\n- URL port becomes an `Optional[int]` instead of `int`. (Pull #92)\n\n### Fixed\n\n- Honor HTTP/2 max concurrent streams settings. (Pull #89, #90)\n- Remove incorrect debug log. (Pull #83)\n\n## 0.8.4 (May 11th, 2020)\n\n### Added\n\n- Logging via HTTPCORE_LOG_LEVEL and HTTPX_LOG_LEVEL environment variables\nand TRACE level logging. (Pull #79)\n\n### Fixed\n\n- Reuse of connections on HTTP/2 in close concurrency situations. (Pull #81)\n\n## 0.8.3 (May 6rd, 2020)\n\n### Fixed\n\n- Include `Host` and `Accept` headers on proxy "CONNECT" requests.\n- De-duplicate any headers also contained in proxy_headers.\n- HTTP/2 flag not being passed down to proxy connections.\n\n## 0.8.2 (May 3rd, 2020)\n\n### Fixed\n\n- Fix connections using proxy forwarding requests not being added to the\nconnection pool properly. (Pull #70)\n\n## 0.8.1 (April 30th, 2020)\n\n### Changed\n\n- Allow inherintance of both `httpcore.AsyncByteStream`, `httpcore.SyncByteStream` without type conflicts.\n\n## 0.8.0 (April 30th, 2020)\n\n### Fixed\n\n- Fixed tunnel proxy support.\n\n### Added\n\n- New `TimeoutException` base class.\n\n## 0.7.0 (March 5th, 2020)\n\n- First integration with HTTPX.\n
|
.venv\Lib\site-packages\httpcore-1.0.9.dist-info\METADATA
|
METADATA
|
Other
| 21,529 | 0.95 | 0.0736 | 0.312655 |
awesome-app
| 45 |
2024-05-09T02:44:42.692252
|
GPL-3.0
| false |
93e5194aa8681a9afad54f0403bc147a
|
httpcore-1.0.9.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\nhttpcore-1.0.9.dist-info/METADATA,sha256=_i1P2mGZEol4d54M8n88BFxTGGP83Zh-rMdPOhjUHCE,21529\nhttpcore-1.0.9.dist-info/RECORD,,\nhttpcore-1.0.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87\nhttpcore-1.0.9.dist-info/licenses/LICENSE.md,sha256=_ctZFUx0y6uhahEkL3dAvqnyPW_rVUeRfYxflKgDkqU,1518\nhttpcore/__init__.py,sha256=9kT_kqChCCJUTHww24ZmR_ezcdbpRYWksD-gYNzkZP8,3445\nhttpcore/__pycache__/__init__.cpython-313.pyc,,\nhttpcore/__pycache__/_api.cpython-313.pyc,,\nhttpcore/__pycache__/_exceptions.cpython-313.pyc,,\nhttpcore/__pycache__/_models.cpython-313.pyc,,\nhttpcore/__pycache__/_ssl.cpython-313.pyc,,\nhttpcore/__pycache__/_synchronization.cpython-313.pyc,,\nhttpcore/__pycache__/_trace.cpython-313.pyc,,\nhttpcore/__pycache__/_utils.cpython-313.pyc,,\nhttpcore/_api.py,sha256=unZmeDschBWCGCPCwkS3Wot9euK6bg_kKxLtGTxw214,3146\nhttpcore/_async/__init__.py,sha256=EWdl2v4thnAHzJpqjU4h2a8DUiGAvNiWrkii9pfhTf0,1221\nhttpcore/_async/__pycache__/__init__.cpython-313.pyc,,\nhttpcore/_async/__pycache__/connection.cpython-313.pyc,,\nhttpcore/_async/__pycache__/connection_pool.cpython-313.pyc,,\nhttpcore/_async/__pycache__/http11.cpython-313.pyc,,\nhttpcore/_async/__pycache__/http2.cpython-313.pyc,,\nhttpcore/_async/__pycache__/http_proxy.cpython-313.pyc,,\nhttpcore/_async/__pycache__/interfaces.cpython-313.pyc,,\nhttpcore/_async/__pycache__/socks_proxy.cpython-313.pyc,,\nhttpcore/_async/connection.py,sha256=6OcPXqMEfc0BU38_-iHUNDd1vKSTc2UVT09XqNb_BOk,8449\nhttpcore/_async/connection_pool.py,sha256=DOIQ2s2ZCf9qfwxhzMprTPLqCL8OxGXiKF6qRHxvVyY,17307\nhttpcore/_async/http11.py,sha256=-qM9bV7PjSQF5vxs37-eUXOIFwbIjPcZbNliuX9TtBw,13880\nhttpcore/_async/http2.py,sha256=azX1fcmtXaIwjputFlZ4vd92J8xwjGOa9ax9QIv4394,23936\nhttpcore/_async/http_proxy.py,sha256=2zVkrlv-Ds-rWGaqaXlrhEJiAQFPo23BT3Gq_sWoBXU,14701\nhttpcore/_async/interfaces.py,sha256=jTiaWL83pgpGC9ziv90ZfwaKNMmHwmOalzaKiuTxATo,4455\nhttpcore/_async/socks_proxy.py,sha256=lLKgLlggPfhFlqi0ODeBkOWvt9CghBBUyqsnsU1tx6Q,13841\nhttpcore/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nhttpcore/_backends/__pycache__/__init__.cpython-313.pyc,,\nhttpcore/_backends/__pycache__/anyio.cpython-313.pyc,,\nhttpcore/_backends/__pycache__/auto.cpython-313.pyc,,\nhttpcore/_backends/__pycache__/base.cpython-313.pyc,,\nhttpcore/_backends/__pycache__/mock.cpython-313.pyc,,\nhttpcore/_backends/__pycache__/sync.cpython-313.pyc,,\nhttpcore/_backends/__pycache__/trio.cpython-313.pyc,,\nhttpcore/_backends/anyio.py,sha256=x8PgEhXRC8bVqsdzk_YJx8Y6d9Tub06CuUSwnbmtqoY,5252\nhttpcore/_backends/auto.py,sha256=zO136PKZmsaTDK-HRk84eA-MUg8_2wJf4NvmK432Aio,1662\nhttpcore/_backends/base.py,sha256=aShgRdZnMmRhFWHetjumlM73f8Kz1YOAyCUP_4kHslA,3042\nhttpcore/_backends/mock.py,sha256=er9T436uSe7NLrfiLa4x6Nuqg5ivQ693CxWYCWsgbH4,4077\nhttpcore/_backends/sync.py,sha256=bhE4d9iK9Umxdsdsgm2EfKnXaBms2WggGYU-7jmUujU,7977\nhttpcore/_backends/trio.py,sha256=LHu4_Mr5MswQmmT3yE4oLgf9b_JJfeVS4BjDxeJc7Ro,5996\nhttpcore/_exceptions.py,sha256=looCKga3_YVYu3s-d3L9RMPRJyhsY7fiuuGxvkOD0c0,1184\nhttpcore/_models.py,sha256=IO2CcXcdpovRcLTdGFGB6RyBZdEm2h_TOmoCc4rEKho,17623\nhttpcore/_ssl.py,sha256=srqmSNU4iOUvWF-SrJvb8G_YEbHFELOXQOwdDIBTS9c,187\nhttpcore/_sync/__init__.py,sha256=JBDIgXt5la1LCJ1sLQeKhjKFpLnpNr8Svs6z2ni3fgg,1141\nhttpcore/_sync/__pycache__/__init__.cpython-313.pyc,,\nhttpcore/_sync/__pycache__/connection.cpython-313.pyc,,\nhttpcore/_sync/__pycache__/connection_pool.cpython-313.pyc,,\nhttpcore/_sync/__pycache__/http11.cpython-313.pyc,,\nhttpcore/_sync/__pycache__/http2.cpython-313.pyc,,\nhttpcore/_sync/__pycache__/http_proxy.cpython-313.pyc,,\nhttpcore/_sync/__pycache__/interfaces.cpython-313.pyc,,\nhttpcore/_sync/__pycache__/socks_proxy.cpython-313.pyc,,\nhttpcore/_sync/connection.py,sha256=9exGOb3PB-Mp2T1-sckSeL2t-tJ_9-NXomV8ihmWCgU,8238\nhttpcore/_sync/connection_pool.py,sha256=a-T8LTsUxc7r0Ww1atfHSDoWPjQ0fA8Ul7S3-F0Mj70,16955\nhttpcore/_sync/http11.py,sha256=IFobD1Md5JFlJGKWnh1_Q3epikUryI8qo09v8MiJIEA,13476\nhttpcore/_sync/http2.py,sha256=AxU4yhcq68Bn5vqdJYtiXKYUj7nvhYbxz3v4rT4xnvA,23400\nhttpcore/_sync/http_proxy.py,sha256=_al_6crKuEZu2wyvu493RZImJdBJnj5oGKNjLOJL2Zo,14463\nhttpcore/_sync/interfaces.py,sha256=snXON42vUDHO5JBJvo8D4VWk2Wat44z2OXXHDrjbl94,4344\nhttpcore/_sync/socks_proxy.py,sha256=zegZW9Snqj2_992DFJa8_CppOVBkVL4AgwduRkStakQ,13614\nhttpcore/_synchronization.py,sha256=zSi13mAColBnknjZBknUC6hKNDQT4C6ijnezZ-r0T2s,9434\nhttpcore/_trace.py,sha256=ck6ZoIzYTkdNAIfq5MGeKqBXDtqjOX-qfYwmZFbrGco,3952\nhttpcore/_utils.py,sha256=_RLgXYOAYC350ikALV59GZ68IJrdocRZxPs9PjmzdFY,1537\nhttpcore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\n
|
.venv\Lib\site-packages\httpcore-1.0.9.dist-info\RECORD
|
RECORD
|
Other
| 4,762 | 0.7 | 0 | 0 |
vue-tools
| 66 |
2025-05-30T23:01:14.706444
|
BSD-3-Clause
| false |
6d76c9e31fdecc43e94a9852e457b9ed
|
Wheel-Version: 1.0\nGenerator: hatchling 1.27.0\nRoot-Is-Purelib: true\nTag: py3-none-any\n
|
.venv\Lib\site-packages\httpcore-1.0.9.dist-info\WHEEL
|
WHEEL
|
Other
| 87 | 0.5 | 0 | 0 |
node-utils
| 46 |
2024-02-23T04:23:48.672859
|
Apache-2.0
| false |
e2fcb0ad9ea59332c808928b4b439e7a
|
Copyright © 2020, [Encode OSS Ltd](https://www.encode.io/).\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n
|
.venv\Lib\site-packages\httpcore-1.0.9.dist-info\licenses\LICENSE.md
|
LICENSE.md
|
Markdown
| 1,518 | 0.8 | 0 | 0.136364 |
react-lib
| 181 |
2024-08-21T01:09:16.969775
|
Apache-2.0
| false |
1c1f23b073da202e1f4f9e426490210c
|
from __future__ import annotations\n\nimport typing\nfrom contextlib import contextmanager\n\nfrom ._client import Client\nfrom ._config import DEFAULT_TIMEOUT_CONFIG\nfrom ._models import Response\nfrom ._types import (\n AuthTypes,\n CookieTypes,\n HeaderTypes,\n ProxyTypes,\n QueryParamTypes,\n RequestContent,\n RequestData,\n RequestFiles,\n TimeoutTypes,\n)\nfrom ._urls import URL\n\nif typing.TYPE_CHECKING:\n import ssl # pragma: no cover\n\n\n__all__ = [\n "delete",\n "get",\n "head",\n "options",\n "patch",\n "post",\n "put",\n "request",\n "stream",\n]\n\n\ndef request(\n method: str,\n url: URL | str,\n *,\n params: QueryParamTypes | None = None,\n content: RequestContent | None = None,\n data: RequestData | None = None,\n files: RequestFiles | None = None,\n json: typing.Any | None = None,\n headers: HeaderTypes | None = None,\n cookies: CookieTypes | None = None,\n auth: AuthTypes | None = None,\n proxy: ProxyTypes | None = None,\n timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,\n follow_redirects: bool = False,\n verify: ssl.SSLContext | str | bool = True,\n trust_env: bool = True,\n) -> Response:\n """\n Sends an HTTP request.\n\n **Parameters:**\n\n * **method** - HTTP method for the new `Request` object: `GET`, `OPTIONS`,\n `HEAD`, `POST`, `PUT`, `PATCH`, or `DELETE`.\n * **url** - URL for the new `Request` object.\n * **params** - *(optional)* Query parameters to include in the URL, as a\n string, dictionary, or sequence of two-tuples.\n * **content** - *(optional)* Binary content to include in the body of the\n request, as bytes or a byte iterator.\n * **data** - *(optional)* Form data to include in the body of the request,\n as a dictionary.\n * **files** - *(optional)* A dictionary of upload files to include in the\n body of the request.\n * **json** - *(optional)* A JSON serializable object to include in the body\n of the request.\n * **headers** - *(optional)* Dictionary of HTTP headers to include in the\n request.\n * **cookies** - *(optional)* Dictionary of Cookie items to include in the\n request.\n * **auth** - *(optional)* An authentication class to use when sending the\n request.\n * **proxy** - *(optional)* A proxy URL where all the traffic should be routed.\n * **timeout** - *(optional)* The timeout configuration to use when sending\n the request.\n * **follow_redirects** - *(optional)* Enables or disables HTTP redirects.\n * **verify** - *(optional)* Either `True` to use an SSL context with the\n default CA bundle, `False` to disable verification, or an instance of\n `ssl.SSLContext` to use a custom context.\n * **trust_env** - *(optional)* Enables or disables usage of environment\n variables for configuration.\n\n **Returns:** `Response`\n\n Usage:\n\n ```\n >>> import httpx\n >>> response = httpx.request('GET', 'https://httpbin.org/get')\n >>> response\n <Response [200 OK]>\n ```\n """\n with Client(\n cookies=cookies,\n proxy=proxy,\n verify=verify,\n timeout=timeout,\n trust_env=trust_env,\n ) as client:\n return client.request(\n method=method,\n url=url,\n content=content,\n data=data,\n files=files,\n json=json,\n params=params,\n headers=headers,\n auth=auth,\n follow_redirects=follow_redirects,\n )\n\n\n@contextmanager\ndef stream(\n method: str,\n url: URL | str,\n *,\n params: QueryParamTypes | None = None,\n content: RequestContent | None = None,\n data: RequestData | None = None,\n files: RequestFiles | None = None,\n json: typing.Any | None = None,\n headers: HeaderTypes | None = None,\n cookies: CookieTypes | None = None,\n auth: AuthTypes | None = None,\n proxy: ProxyTypes | None = None,\n timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,\n follow_redirects: bool = False,\n verify: ssl.SSLContext | str | bool = True,\n trust_env: bool = True,\n) -> typing.Iterator[Response]:\n """\n Alternative to `httpx.request()` that streams the response body\n instead of loading it into memory at once.\n\n **Parameters**: See `httpx.request`.\n\n See also: [Streaming Responses][0]\n\n [0]: /quickstart#streaming-responses\n """\n with Client(\n cookies=cookies,\n proxy=proxy,\n verify=verify,\n timeout=timeout,\n trust_env=trust_env,\n ) as client:\n with client.stream(\n method=method,\n url=url,\n content=content,\n data=data,\n files=files,\n json=json,\n params=params,\n headers=headers,\n auth=auth,\n follow_redirects=follow_redirects,\n ) as response:\n yield response\n\n\ndef get(\n url: URL | str,\n *,\n params: QueryParamTypes | None = None,\n headers: HeaderTypes | None = None,\n cookies: CookieTypes | None = None,\n auth: AuthTypes | None = None,\n proxy: ProxyTypes | None = None,\n follow_redirects: bool = False,\n verify: ssl.SSLContext | str | bool = True,\n timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,\n trust_env: bool = True,\n) -> Response:\n """\n Sends a `GET` request.\n\n **Parameters**: See `httpx.request`.\n\n Note that the `data`, `files`, `json` and `content` parameters are not available\n on this function, as `GET` requests should not include a request body.\n """\n return request(\n "GET",\n url,\n params=params,\n headers=headers,\n cookies=cookies,\n auth=auth,\n proxy=proxy,\n follow_redirects=follow_redirects,\n verify=verify,\n timeout=timeout,\n trust_env=trust_env,\n )\n\n\ndef options(\n url: URL | str,\n *,\n params: QueryParamTypes | None = None,\n headers: HeaderTypes | None = None,\n cookies: CookieTypes | None = None,\n auth: AuthTypes | None = None,\n proxy: ProxyTypes | None = None,\n follow_redirects: bool = False,\n verify: ssl.SSLContext | str | bool = True,\n timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,\n trust_env: bool = True,\n) -> Response:\n """\n Sends an `OPTIONS` request.\n\n **Parameters**: See `httpx.request`.\n\n Note that the `data`, `files`, `json` and `content` parameters are not available\n on this function, as `OPTIONS` requests should not include a request body.\n """\n return request(\n "OPTIONS",\n url,\n params=params,\n headers=headers,\n cookies=cookies,\n auth=auth,\n proxy=proxy,\n follow_redirects=follow_redirects,\n verify=verify,\n timeout=timeout,\n trust_env=trust_env,\n )\n\n\ndef head(\n url: URL | str,\n *,\n params: QueryParamTypes | None = None,\n headers: HeaderTypes | None = None,\n cookies: CookieTypes | None = None,\n auth: AuthTypes | None = None,\n proxy: ProxyTypes | None = None,\n follow_redirects: bool = False,\n verify: ssl.SSLContext | str | bool = True,\n timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,\n trust_env: bool = True,\n) -> Response:\n """\n Sends a `HEAD` request.\n\n **Parameters**: See `httpx.request`.\n\n Note that the `data`, `files`, `json` and `content` parameters are not available\n on this function, as `HEAD` requests should not include a request body.\n """\n return request(\n "HEAD",\n url,\n params=params,\n headers=headers,\n cookies=cookies,\n auth=auth,\n proxy=proxy,\n follow_redirects=follow_redirects,\n verify=verify,\n timeout=timeout,\n trust_env=trust_env,\n )\n\n\ndef post(\n url: URL | str,\n *,\n content: RequestContent | None = None,\n data: RequestData | None = None,\n files: RequestFiles | None = None,\n json: typing.Any | None = None,\n params: QueryParamTypes | None = None,\n headers: HeaderTypes | None = None,\n cookies: CookieTypes | None = None,\n auth: AuthTypes | None = None,\n proxy: ProxyTypes | None = None,\n follow_redirects: bool = False,\n verify: ssl.SSLContext | str | bool = True,\n timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,\n trust_env: bool = True,\n) -> Response:\n """\n Sends a `POST` request.\n\n **Parameters**: See `httpx.request`.\n """\n return request(\n "POST",\n url,\n content=content,\n data=data,\n files=files,\n json=json,\n params=params,\n headers=headers,\n cookies=cookies,\n auth=auth,\n proxy=proxy,\n follow_redirects=follow_redirects,\n verify=verify,\n timeout=timeout,\n trust_env=trust_env,\n )\n\n\ndef put(\n url: URL | str,\n *,\n content: RequestContent | None = None,\n data: RequestData | None = None,\n files: RequestFiles | None = None,\n json: typing.Any | None = None,\n params: QueryParamTypes | None = None,\n headers: HeaderTypes | None = None,\n cookies: CookieTypes | None = None,\n auth: AuthTypes | None = None,\n proxy: ProxyTypes | None = None,\n follow_redirects: bool = False,\n verify: ssl.SSLContext | str | bool = True,\n timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,\n trust_env: bool = True,\n) -> Response:\n """\n Sends a `PUT` request.\n\n **Parameters**: See `httpx.request`.\n """\n return request(\n "PUT",\n url,\n content=content,\n data=data,\n files=files,\n json=json,\n params=params,\n headers=headers,\n cookies=cookies,\n auth=auth,\n proxy=proxy,\n follow_redirects=follow_redirects,\n verify=verify,\n timeout=timeout,\n trust_env=trust_env,\n )\n\n\ndef patch(\n url: URL | str,\n *,\n content: RequestContent | None = None,\n data: RequestData | None = None,\n files: RequestFiles | None = None,\n json: typing.Any | None = None,\n params: QueryParamTypes | None = None,\n headers: HeaderTypes | None = None,\n cookies: CookieTypes | None = None,\n auth: AuthTypes | None = None,\n proxy: ProxyTypes | None = None,\n follow_redirects: bool = False,\n verify: ssl.SSLContext | str | bool = True,\n timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,\n trust_env: bool = True,\n) -> Response:\n """\n Sends a `PATCH` request.\n\n **Parameters**: See `httpx.request`.\n """\n return request(\n "PATCH",\n url,\n content=content,\n data=data,\n files=files,\n json=json,\n params=params,\n headers=headers,\n cookies=cookies,\n auth=auth,\n proxy=proxy,\n follow_redirects=follow_redirects,\n verify=verify,\n timeout=timeout,\n trust_env=trust_env,\n )\n\n\ndef delete(\n url: URL | str,\n *,\n params: QueryParamTypes | None = None,\n headers: HeaderTypes | None = None,\n cookies: CookieTypes | None = None,\n auth: AuthTypes | None = None,\n proxy: ProxyTypes | None = None,\n follow_redirects: bool = False,\n timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,\n verify: ssl.SSLContext | str | bool = True,\n trust_env: bool = True,\n) -> Response:\n """\n Sends a `DELETE` request.\n\n **Parameters**: See `httpx.request`.\n\n Note that the `data`, `files`, `json` and `content` parameters are not available\n on this function, as `DELETE` requests should not include a request body.\n """\n return request(\n "DELETE",\n url,\n params=params,\n headers=headers,\n cookies=cookies,\n auth=auth,\n proxy=proxy,\n follow_redirects=follow_redirects,\n verify=verify,\n timeout=timeout,\n trust_env=trust_env,\n )\n
|
.venv\Lib\site-packages\httpx\_api.py
|
_api.py
|
Python
| 11,743 | 0.95 | 0.041096 | 0.085859 |
python-kit
| 181 |
2024-03-11T19:46:13.297095
|
GPL-3.0
| false |
fdc9efc17007ea1fde15414de8c07c59
|
from __future__ import annotations\n\nimport hashlib\nimport os\nimport re\nimport time\nimport typing\nfrom base64 import b64encode\nfrom urllib.request import parse_http_list\n\nfrom ._exceptions import ProtocolError\nfrom ._models import Cookies, Request, Response\nfrom ._utils import to_bytes, to_str, unquote\n\nif typing.TYPE_CHECKING: # pragma: no cover\n from hashlib import _Hash\n\n\n__all__ = ["Auth", "BasicAuth", "DigestAuth", "NetRCAuth"]\n\n\nclass Auth:\n """\n Base class for all authentication schemes.\n\n To implement a custom authentication scheme, subclass `Auth` and override\n the `.auth_flow()` method.\n\n If the authentication scheme does I/O such as disk access or network calls, or uses\n synchronization primitives such as locks, you should override `.sync_auth_flow()`\n and/or `.async_auth_flow()` instead of `.auth_flow()` to provide specialized\n implementations that will be used by `Client` and `AsyncClient` respectively.\n """\n\n requires_request_body = False\n requires_response_body = False\n\n def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:\n """\n Execute the authentication flow.\n\n To dispatch a request, `yield` it:\n\n ```\n yield request\n ```\n\n The client will `.send()` the response back into the flow generator. You can\n access it like so:\n\n ```\n response = yield request\n ```\n\n A `return` (or reaching the end of the generator) will result in the\n client returning the last response obtained from the server.\n\n You can dispatch as many requests as is necessary.\n """\n yield request\n\n def sync_auth_flow(\n self, request: Request\n ) -> typing.Generator[Request, Response, None]:\n """\n Execute the authentication flow synchronously.\n\n By default, this defers to `.auth_flow()`. You should override this method\n when the authentication scheme does I/O and/or uses concurrency primitives.\n """\n if self.requires_request_body:\n request.read()\n\n flow = self.auth_flow(request)\n request = next(flow)\n\n while True:\n response = yield request\n if self.requires_response_body:\n response.read()\n\n try:\n request = flow.send(response)\n except StopIteration:\n break\n\n async def async_auth_flow(\n self, request: Request\n ) -> typing.AsyncGenerator[Request, Response]:\n """\n Execute the authentication flow asynchronously.\n\n By default, this defers to `.auth_flow()`. You should override this method\n when the authentication scheme does I/O and/or uses concurrency primitives.\n """\n if self.requires_request_body:\n await request.aread()\n\n flow = self.auth_flow(request)\n request = next(flow)\n\n while True:\n response = yield request\n if self.requires_response_body:\n await response.aread()\n\n try:\n request = flow.send(response)\n except StopIteration:\n break\n\n\nclass FunctionAuth(Auth):\n """\n Allows the 'auth' argument to be passed as a simple callable function,\n that takes the request, and returns a new, modified request.\n """\n\n def __init__(self, func: typing.Callable[[Request], Request]) -> None:\n self._func = func\n\n def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:\n yield self._func(request)\n\n\nclass BasicAuth(Auth):\n """\n Allows the 'auth' argument to be passed as a (username, password) pair,\n and uses HTTP Basic authentication.\n """\n\n def __init__(self, username: str | bytes, password: str | bytes) -> None:\n self._auth_header = self._build_auth_header(username, password)\n\n def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:\n request.headers["Authorization"] = self._auth_header\n yield request\n\n def _build_auth_header(self, username: str | bytes, password: str | bytes) -> str:\n userpass = b":".join((to_bytes(username), to_bytes(password)))\n token = b64encode(userpass).decode()\n return f"Basic {token}"\n\n\nclass NetRCAuth(Auth):\n """\n Use a 'netrc' file to lookup basic auth credentials based on the url host.\n """\n\n def __init__(self, file: str | None = None) -> None:\n # Lazily import 'netrc'.\n # There's no need for us to load this module unless 'NetRCAuth' is being used.\n import netrc\n\n self._netrc_info = netrc.netrc(file)\n\n def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:\n auth_info = self._netrc_info.authenticators(request.url.host)\n if auth_info is None or not auth_info[2]:\n # The netrc file did not have authentication credentials for this host.\n yield request\n else:\n # Build a basic auth header with credentials from the netrc file.\n request.headers["Authorization"] = self._build_auth_header(\n username=auth_info[0], password=auth_info[2]\n )\n yield request\n\n def _build_auth_header(self, username: str | bytes, password: str | bytes) -> str:\n userpass = b":".join((to_bytes(username), to_bytes(password)))\n token = b64encode(userpass).decode()\n return f"Basic {token}"\n\n\nclass DigestAuth(Auth):\n _ALGORITHM_TO_HASH_FUNCTION: dict[str, typing.Callable[[bytes], _Hash]] = {\n "MD5": hashlib.md5,\n "MD5-SESS": hashlib.md5,\n "SHA": hashlib.sha1,\n "SHA-SESS": hashlib.sha1,\n "SHA-256": hashlib.sha256,\n "SHA-256-SESS": hashlib.sha256,\n "SHA-512": hashlib.sha512,\n "SHA-512-SESS": hashlib.sha512,\n }\n\n def __init__(self, username: str | bytes, password: str | bytes) -> None:\n self._username = to_bytes(username)\n self._password = to_bytes(password)\n self._last_challenge: _DigestAuthChallenge | None = None\n self._nonce_count = 1\n\n def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:\n if self._last_challenge:\n request.headers["Authorization"] = self._build_auth_header(\n request, self._last_challenge\n )\n\n response = yield request\n\n if response.status_code != 401 or "www-authenticate" not in response.headers:\n # If the response is not a 401 then we don't\n # need to build an authenticated request.\n return\n\n for auth_header in response.headers.get_list("www-authenticate"):\n if auth_header.lower().startswith("digest "):\n break\n else:\n # If the response does not include a 'WWW-Authenticate: Digest ...'\n # header, then we don't need to build an authenticated request.\n return\n\n self._last_challenge = self._parse_challenge(request, response, auth_header)\n self._nonce_count = 1\n\n request.headers["Authorization"] = self._build_auth_header(\n request, self._last_challenge\n )\n if response.cookies:\n Cookies(response.cookies).set_cookie_header(request=request)\n yield request\n\n def _parse_challenge(\n self, request: Request, response: Response, auth_header: str\n ) -> _DigestAuthChallenge:\n """\n Returns a challenge from a Digest WWW-Authenticate header.\n These take the form of:\n `Digest realm="realm@host.com",qop="auth,auth-int",nonce="abc",opaque="xyz"`\n """\n scheme, _, fields = auth_header.partition(" ")\n\n # This method should only ever have been called with a Digest auth header.\n assert scheme.lower() == "digest"\n\n header_dict: dict[str, str] = {}\n for field in parse_http_list(fields):\n key, value = field.strip().split("=", 1)\n header_dict[key] = unquote(value)\n\n try:\n realm = header_dict["realm"].encode()\n nonce = header_dict["nonce"].encode()\n algorithm = header_dict.get("algorithm", "MD5")\n opaque = header_dict["opaque"].encode() if "opaque" in header_dict else None\n qop = header_dict["qop"].encode() if "qop" in header_dict else None\n return _DigestAuthChallenge(\n realm=realm, nonce=nonce, algorithm=algorithm, opaque=opaque, qop=qop\n )\n except KeyError as exc:\n message = "Malformed Digest WWW-Authenticate header"\n raise ProtocolError(message, request=request) from exc\n\n def _build_auth_header(\n self, request: Request, challenge: _DigestAuthChallenge\n ) -> str:\n hash_func = self._ALGORITHM_TO_HASH_FUNCTION[challenge.algorithm.upper()]\n\n def digest(data: bytes) -> bytes:\n return hash_func(data).hexdigest().encode()\n\n A1 = b":".join((self._username, challenge.realm, self._password))\n\n path = request.url.raw_path\n A2 = b":".join((request.method.encode(), path))\n # TODO: implement auth-int\n HA2 = digest(A2)\n\n nc_value = b"%08x" % self._nonce_count\n cnonce = self._get_client_nonce(self._nonce_count, challenge.nonce)\n self._nonce_count += 1\n\n HA1 = digest(A1)\n if challenge.algorithm.lower().endswith("-sess"):\n HA1 = digest(b":".join((HA1, challenge.nonce, cnonce)))\n\n qop = self._resolve_qop(challenge.qop, request=request)\n if qop is None:\n # Following RFC 2069\n digest_data = [HA1, challenge.nonce, HA2]\n else:\n # Following RFC 2617/7616\n digest_data = [HA1, challenge.nonce, nc_value, cnonce, qop, HA2]\n\n format_args = {\n "username": self._username,\n "realm": challenge.realm,\n "nonce": challenge.nonce,\n "uri": path,\n "response": digest(b":".join(digest_data)),\n "algorithm": challenge.algorithm.encode(),\n }\n if challenge.opaque:\n format_args["opaque"] = challenge.opaque\n if qop:\n format_args["qop"] = b"auth"\n format_args["nc"] = nc_value\n format_args["cnonce"] = cnonce\n\n return "Digest " + self._get_header_value(format_args)\n\n def _get_client_nonce(self, nonce_count: int, nonce: bytes) -> bytes:\n s = str(nonce_count).encode()\n s += nonce\n s += time.ctime().encode()\n s += os.urandom(8)\n\n return hashlib.sha1(s).hexdigest()[:16].encode()\n\n def _get_header_value(self, header_fields: dict[str, bytes]) -> str:\n NON_QUOTED_FIELDS = ("algorithm", "qop", "nc")\n QUOTED_TEMPLATE = '{}="{}"'\n NON_QUOTED_TEMPLATE = "{}={}"\n\n header_value = ""\n for i, (field, value) in enumerate(header_fields.items()):\n if i > 0:\n header_value += ", "\n template = (\n QUOTED_TEMPLATE\n if field not in NON_QUOTED_FIELDS\n else NON_QUOTED_TEMPLATE\n )\n header_value += template.format(field, to_str(value))\n\n return header_value\n\n def _resolve_qop(self, qop: bytes | None, request: Request) -> bytes | None:\n if qop is None:\n return None\n qops = re.split(b", ?", qop)\n if b"auth" in qops:\n return b"auth"\n\n if qops == [b"auth-int"]:\n raise NotImplementedError("Digest auth-int support is not yet implemented")\n\n message = f'Unexpected qop value "{qop!r}" in digest auth'\n raise ProtocolError(message, request=request)\n\n\nclass _DigestAuthChallenge(typing.NamedTuple):\n realm: bytes\n nonce: bytes\n algorithm: str\n opaque: bytes | None\n qop: bytes | None\n
|
.venv\Lib\site-packages\httpx\_auth.py
|
_auth.py
|
Python
| 11,891 | 0.95 | 0.16954 | 0.043796 |
vue-tools
| 318 |
2025-01-03T20:51:22.787959
|
Apache-2.0
| false |
9d85ddaa40ef44c126e2bafd1162b4f3
|
from __future__ import annotations\n\nimport os\nimport typing\n\nfrom ._models import Headers\nfrom ._types import CertTypes, HeaderTypes, TimeoutTypes\nfrom ._urls import URL\n\nif typing.TYPE_CHECKING:\n import ssl # pragma: no cover\n\n__all__ = ["Limits", "Proxy", "Timeout", "create_ssl_context"]\n\n\nclass UnsetType:\n pass # pragma: no cover\n\n\nUNSET = UnsetType()\n\n\ndef create_ssl_context(\n verify: ssl.SSLContext | str | bool = True,\n cert: CertTypes | None = None,\n trust_env: bool = True,\n) -> ssl.SSLContext:\n import ssl\n import warnings\n\n import certifi\n\n if verify is True:\n if trust_env and os.environ.get("SSL_CERT_FILE"): # pragma: nocover\n ctx = ssl.create_default_context(cafile=os.environ["SSL_CERT_FILE"])\n elif trust_env and os.environ.get("SSL_CERT_DIR"): # pragma: nocover\n ctx = ssl.create_default_context(capath=os.environ["SSL_CERT_DIR"])\n else:\n # Default case...\n ctx = ssl.create_default_context(cafile=certifi.where())\n elif verify is False:\n ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n elif isinstance(verify, str): # pragma: nocover\n message = (\n "`verify=<str>` is deprecated. "\n "Use `verify=ssl.create_default_context(cafile=...)` "\n "or `verify=ssl.create_default_context(capath=...)` instead."\n )\n warnings.warn(message, DeprecationWarning)\n if os.path.isdir(verify):\n return ssl.create_default_context(capath=verify)\n return ssl.create_default_context(cafile=verify)\n else:\n ctx = verify\n\n if cert: # pragma: nocover\n message = (\n "`cert=...` is deprecated. Use `verify=<ssl_context>` instead,"\n "with `.load_cert_chain()` to configure the certificate chain."\n )\n warnings.warn(message, DeprecationWarning)\n if isinstance(cert, str):\n ctx.load_cert_chain(cert)\n else:\n ctx.load_cert_chain(*cert)\n\n return ctx\n\n\nclass Timeout:\n """\n Timeout configuration.\n\n **Usage**:\n\n Timeout(None) # No timeouts.\n Timeout(5.0) # 5s timeout on all operations.\n Timeout(None, connect=5.0) # 5s timeout on connect, no other timeouts.\n Timeout(5.0, connect=10.0) # 10s timeout on connect. 5s timeout elsewhere.\n Timeout(5.0, pool=None) # No timeout on acquiring connection from pool.\n # 5s timeout elsewhere.\n """\n\n def __init__(\n self,\n timeout: TimeoutTypes | UnsetType = UNSET,\n *,\n connect: None | float | UnsetType = UNSET,\n read: None | float | UnsetType = UNSET,\n write: None | float | UnsetType = UNSET,\n pool: None | float | UnsetType = UNSET,\n ) -> None:\n if isinstance(timeout, Timeout):\n # Passed as a single explicit Timeout.\n assert connect is UNSET\n assert read is UNSET\n assert write is UNSET\n assert pool is UNSET\n self.connect = timeout.connect # type: typing.Optional[float]\n self.read = timeout.read # type: typing.Optional[float]\n self.write = timeout.write # type: typing.Optional[float]\n self.pool = timeout.pool # type: typing.Optional[float]\n elif isinstance(timeout, tuple):\n # Passed as a tuple.\n self.connect = timeout[0]\n self.read = timeout[1]\n self.write = None if len(timeout) < 3 else timeout[2]\n self.pool = None if len(timeout) < 4 else timeout[3]\n elif not (\n isinstance(connect, UnsetType)\n or isinstance(read, UnsetType)\n or isinstance(write, UnsetType)\n or isinstance(pool, UnsetType)\n ):\n self.connect = connect\n self.read = read\n self.write = write\n self.pool = pool\n else:\n if isinstance(timeout, UnsetType):\n raise ValueError(\n "httpx.Timeout must either include a default, or set all "\n "four parameters explicitly."\n )\n self.connect = timeout if isinstance(connect, UnsetType) else connect\n self.read = timeout if isinstance(read, UnsetType) else read\n self.write = timeout if isinstance(write, UnsetType) else write\n self.pool = timeout if isinstance(pool, UnsetType) else pool\n\n def as_dict(self) -> dict[str, float | None]:\n return {\n "connect": self.connect,\n "read": self.read,\n "write": self.write,\n "pool": self.pool,\n }\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, self.__class__)\n and self.connect == other.connect\n and self.read == other.read\n and self.write == other.write\n and self.pool == other.pool\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n if len({self.connect, self.read, self.write, self.pool}) == 1:\n return f"{class_name}(timeout={self.connect})"\n return (\n f"{class_name}(connect={self.connect}, "\n f"read={self.read}, write={self.write}, pool={self.pool})"\n )\n\n\nclass Limits:\n """\n Configuration for limits to various client behaviors.\n\n **Parameters:**\n\n * **max_connections** - The maximum number of concurrent connections that may be\n established.\n * **max_keepalive_connections** - Allow the connection pool to maintain\n keep-alive connections below this point. Should be less than or equal\n to `max_connections`.\n * **keepalive_expiry** - Time limit on idle keep-alive connections in seconds.\n """\n\n def __init__(\n self,\n *,\n max_connections: int | None = None,\n max_keepalive_connections: int | None = None,\n keepalive_expiry: float | None = 5.0,\n ) -> None:\n self.max_connections = max_connections\n self.max_keepalive_connections = max_keepalive_connections\n self.keepalive_expiry = keepalive_expiry\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, self.__class__)\n and self.max_connections == other.max_connections\n and self.max_keepalive_connections == other.max_keepalive_connections\n and self.keepalive_expiry == other.keepalive_expiry\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return (\n f"{class_name}(max_connections={self.max_connections}, "\n f"max_keepalive_connections={self.max_keepalive_connections}, "\n f"keepalive_expiry={self.keepalive_expiry})"\n )\n\n\nclass Proxy:\n def __init__(\n self,\n url: URL | str,\n *,\n ssl_context: ssl.SSLContext | None = None,\n auth: tuple[str, str] | None = None,\n headers: HeaderTypes | None = None,\n ) -> None:\n url = URL(url)\n headers = Headers(headers)\n\n if url.scheme not in ("http", "https", "socks5", "socks5h"):\n raise ValueError(f"Unknown scheme for proxy URL {url!r}")\n\n if url.username or url.password:\n # Remove any auth credentials from the URL.\n auth = (url.username, url.password)\n url = url.copy_with(username=None, password=None)\n\n self.url = url\n self.auth = auth\n self.headers = headers\n self.ssl_context = ssl_context\n\n @property\n def raw_auth(self) -> tuple[bytes, bytes] | None:\n # The proxy authentication as raw bytes.\n return (\n None\n if self.auth is None\n else (self.auth[0].encode("utf-8"), self.auth[1].encode("utf-8"))\n )\n\n def __repr__(self) -> str:\n # The authentication is represented with the password component masked.\n auth = (self.auth[0], "********") if self.auth else None\n\n # Build a nice concise representation.\n url_str = f"{str(self.url)!r}"\n auth_str = f", auth={auth!r}" if auth else ""\n headers_str = f", headers={dict(self.headers)!r}" if self.headers else ""\n return f"Proxy({url_str}{auth_str}{headers_str})"\n\n\nDEFAULT_TIMEOUT_CONFIG = Timeout(timeout=5.0)\nDEFAULT_LIMITS = Limits(max_connections=100, max_keepalive_connections=20)\nDEFAULT_MAX_REDIRECTS = 20\n
|
.venv\Lib\site-packages\httpx\_config.py
|
_config.py
|
Python
| 8,547 | 0.95 | 0.153226 | 0.076555 |
awesome-app
| 400 |
2023-08-27T08:58:48.305721
|
BSD-3-Clause
| false |
b40ab3f057d28de0d386cb2ef0e6e281
|
"""\nOur exception hierarchy:\n\n* HTTPError\n x RequestError\n + TransportError\n - TimeoutException\n · ConnectTimeout\n · ReadTimeout\n · WriteTimeout\n · PoolTimeout\n - NetworkError\n · ConnectError\n · ReadError\n · WriteError\n · CloseError\n - ProtocolError\n · LocalProtocolError\n · RemoteProtocolError\n - ProxyError\n - UnsupportedProtocol\n + DecodingError\n + TooManyRedirects\n x HTTPStatusError\n* InvalidURL\n* CookieConflict\n* StreamError\n x StreamConsumed\n x StreamClosed\n x ResponseNotRead\n x RequestNotRead\n"""\n\nfrom __future__ import annotations\n\nimport contextlib\nimport typing\n\nif typing.TYPE_CHECKING:\n from ._models import Request, Response # pragma: no cover\n\n__all__ = [\n "CloseError",\n "ConnectError",\n "ConnectTimeout",\n "CookieConflict",\n "DecodingError",\n "HTTPError",\n "HTTPStatusError",\n "InvalidURL",\n "LocalProtocolError",\n "NetworkError",\n "PoolTimeout",\n "ProtocolError",\n "ProxyError",\n "ReadError",\n "ReadTimeout",\n "RemoteProtocolError",\n "RequestError",\n "RequestNotRead",\n "ResponseNotRead",\n "StreamClosed",\n "StreamConsumed",\n "StreamError",\n "TimeoutException",\n "TooManyRedirects",\n "TransportError",\n "UnsupportedProtocol",\n "WriteError",\n "WriteTimeout",\n]\n\n\nclass HTTPError(Exception):\n """\n Base class for `RequestError` and `HTTPStatusError`.\n\n Useful for `try...except` blocks when issuing a request,\n and then calling `.raise_for_status()`.\n\n For example:\n\n ```\n try:\n response = httpx.get("https://www.example.com")\n response.raise_for_status()\n except httpx.HTTPError as exc:\n print(f"HTTP Exception for {exc.request.url} - {exc}")\n ```\n """\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n self._request: Request | None = None\n\n @property\n def request(self) -> Request:\n if self._request is None:\n raise RuntimeError("The .request property has not been set.")\n return self._request\n\n @request.setter\n def request(self, request: Request) -> None:\n self._request = request\n\n\nclass RequestError(HTTPError):\n """\n Base class for all exceptions that may occur when issuing a `.request()`.\n """\n\n def __init__(self, message: str, *, request: Request | None = None) -> None:\n super().__init__(message)\n # At the point an exception is raised we won't typically have a request\n # instance to associate it with.\n #\n # The 'request_context' context manager is used within the Client and\n # Response methods in order to ensure that any raised exceptions\n # have a `.request` property set on them.\n self._request = request\n\n\nclass TransportError(RequestError):\n """\n Base class for all exceptions that occur at the level of the Transport API.\n """\n\n\n# Timeout exceptions...\n\n\nclass TimeoutException(TransportError):\n """\n The base class for timeout errors.\n\n An operation has timed out.\n """\n\n\nclass ConnectTimeout(TimeoutException):\n """\n Timed out while connecting to the host.\n """\n\n\nclass ReadTimeout(TimeoutException):\n """\n Timed out while receiving data from the host.\n """\n\n\nclass WriteTimeout(TimeoutException):\n """\n Timed out while sending data to the host.\n """\n\n\nclass PoolTimeout(TimeoutException):\n """\n Timed out waiting to acquire a connection from the pool.\n """\n\n\n# Core networking exceptions...\n\n\nclass NetworkError(TransportError):\n """\n The base class for network-related errors.\n\n An error occurred while interacting with the network.\n """\n\n\nclass ReadError(NetworkError):\n """\n Failed to receive data from the network.\n """\n\n\nclass WriteError(NetworkError):\n """\n Failed to send data through the network.\n """\n\n\nclass ConnectError(NetworkError):\n """\n Failed to establish a connection.\n """\n\n\nclass CloseError(NetworkError):\n """\n Failed to close a connection.\n """\n\n\n# Other transport exceptions...\n\n\nclass ProxyError(TransportError):\n """\n An error occurred while establishing a proxy connection.\n """\n\n\nclass UnsupportedProtocol(TransportError):\n """\n Attempted to make a request to an unsupported protocol.\n\n For example issuing a request to `ftp://www.example.com`.\n """\n\n\nclass ProtocolError(TransportError):\n """\n The protocol was violated.\n """\n\n\nclass LocalProtocolError(ProtocolError):\n """\n A protocol was violated by the client.\n\n For example if the user instantiated a `Request` instance explicitly,\n failed to include the mandatory `Host:` header, and then issued it directly\n using `client.send()`.\n """\n\n\nclass RemoteProtocolError(ProtocolError):\n """\n The protocol was violated by the server.\n\n For example, returning malformed HTTP.\n """\n\n\n# Other request exceptions...\n\n\nclass DecodingError(RequestError):\n """\n Decoding of the response failed, due to a malformed encoding.\n """\n\n\nclass TooManyRedirects(RequestError):\n """\n Too many redirects.\n """\n\n\n# Client errors\n\n\nclass HTTPStatusError(HTTPError):\n """\n The response had an error HTTP status of 4xx or 5xx.\n\n May be raised when calling `response.raise_for_status()`\n """\n\n def __init__(self, message: str, *, request: Request, response: Response) -> None:\n super().__init__(message)\n self.request = request\n self.response = response\n\n\nclass InvalidURL(Exception):\n """\n URL is improperly formed or cannot be parsed.\n """\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass CookieConflict(Exception):\n """\n Attempted to lookup a cookie by name, but multiple cookies existed.\n\n Can occur when calling `response.cookies.get(...)`.\n """\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\n# Stream exceptions...\n\n# These may occur as the result of a programming error, by accessing\n# the request/response stream in an invalid manner.\n\n\nclass StreamError(RuntimeError):\n """\n The base class for stream exceptions.\n\n The developer made an error in accessing the request stream in\n an invalid way.\n """\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass StreamConsumed(StreamError):\n """\n Attempted to read or stream content, but the content has already\n been streamed.\n """\n\n def __init__(self) -> None:\n message = (\n "Attempted to read or stream some content, but the content has "\n "already been streamed. For requests, this could be due to passing "\n "a generator as request content, and then receiving a redirect "\n "response or a secondary request as part of an authentication flow."\n "For responses, this could be due to attempting to stream the response "\n "content more than once."\n )\n super().__init__(message)\n\n\nclass StreamClosed(StreamError):\n """\n Attempted to read or stream response content, but the request has been\n closed.\n """\n\n def __init__(self) -> None:\n message = (\n "Attempted to read or stream content, but the stream has " "been closed."\n )\n super().__init__(message)\n\n\nclass ResponseNotRead(StreamError):\n """\n Attempted to access streaming response content, without having called `read()`.\n """\n\n def __init__(self) -> None:\n message = (\n "Attempted to access streaming response content,"\n " without having called `read()`."\n )\n super().__init__(message)\n\n\nclass RequestNotRead(StreamError):\n """\n Attempted to access streaming request content, without having called `read()`.\n """\n\n def __init__(self) -> None:\n message = (\n "Attempted to access streaming request content,"\n " without having called `read()`."\n )\n super().__init__(message)\n\n\n@contextlib.contextmanager\ndef request_context(\n request: Request | None = None,\n) -> typing.Iterator[None]:\n """\n A context manager that can be used to attach the given request context\n to any `RequestError` exceptions that are raised within the block.\n """\n try:\n yield\n except RequestError as exc:\n if request is not None:\n exc.request = request\n raise exc\n
|
.venv\Lib\site-packages\httpx\_exceptions.py
|
_exceptions.py
|
Python
| 8,527 | 0.95 | 0.176781 | 0.064286 |
python-kit
| 813 |
2023-10-07T05:43:27.397950
|
Apache-2.0
| false |
7147f1c651f34f079756a05f11f63ed2
|
from __future__ import annotations\n\nimport functools\nimport json\nimport sys\nimport typing\n\nimport click\nimport pygments.lexers\nimport pygments.util\nimport rich.console\nimport rich.markup\nimport rich.progress\nimport rich.syntax\nimport rich.table\n\nfrom ._client import Client\nfrom ._exceptions import RequestError\nfrom ._models import Response\nfrom ._status_codes import codes\n\nif typing.TYPE_CHECKING:\n import httpcore # pragma: no cover\n\n\ndef print_help() -> None:\n console = rich.console.Console()\n\n console.print("[bold]HTTPX :butterfly:", justify="center")\n console.print()\n console.print("A next generation HTTP client.", justify="center")\n console.print()\n console.print(\n "Usage: [bold]httpx[/bold] [cyan]<URL> [OPTIONS][/cyan] ", justify="left"\n )\n console.print()\n\n table = rich.table.Table.grid(padding=1, pad_edge=True)\n table.add_column("Parameter", no_wrap=True, justify="left", style="bold")\n table.add_column("Description")\n table.add_row(\n "-m, --method [cyan]METHOD",\n "Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD.\n"\n "[Default: GET, or POST if a request body is included]",\n )\n table.add_row(\n "-p, --params [cyan]<NAME VALUE> ...",\n "Query parameters to include in the request URL.",\n )\n table.add_row(\n "-c, --content [cyan]TEXT", "Byte content to include in the request body."\n )\n table.add_row(\n "-d, --data [cyan]<NAME VALUE> ...", "Form data to include in the request body."\n )\n table.add_row(\n "-f, --files [cyan]<NAME FILENAME> ...",\n "Form files to include in the request body.",\n )\n table.add_row("-j, --json [cyan]TEXT", "JSON data to include in the request body.")\n table.add_row(\n "-h, --headers [cyan]<NAME VALUE> ...",\n "Include additional HTTP headers in the request.",\n )\n table.add_row(\n "--cookies [cyan]<NAME VALUE> ...", "Cookies to include in the request."\n )\n table.add_row(\n "--auth [cyan]<USER PASS>",\n "Username and password to include in the request. Specify '-' for the password"\n " to use a password prompt. Note that using --verbose/-v will expose"\n " the Authorization header, including the password encoding"\n " in a trivially reversible format.",\n )\n\n table.add_row(\n "--proxy [cyan]URL",\n "Send the request via a proxy. Should be the URL giving the proxy address.",\n )\n\n table.add_row(\n "--timeout [cyan]FLOAT",\n "Timeout value to use for network operations, such as establishing the"\n " connection, reading some data, etc... [Default: 5.0]",\n )\n\n table.add_row("--follow-redirects", "Automatically follow redirects.")\n table.add_row("--no-verify", "Disable SSL verification.")\n table.add_row(\n "--http2", "Send the request using HTTP/2, if the remote server supports it."\n )\n\n table.add_row(\n "--download [cyan]FILE",\n "Save the response content as a file, rather than displaying it.",\n )\n\n table.add_row("-v, --verbose", "Verbose output. Show request as well as response.")\n table.add_row("--help", "Show this message and exit.")\n console.print(table)\n\n\ndef get_lexer_for_response(response: Response) -> str:\n content_type = response.headers.get("Content-Type")\n if content_type is not None:\n mime_type, _, _ = content_type.partition(";")\n try:\n return typing.cast(\n str, pygments.lexers.get_lexer_for_mimetype(mime_type.strip()).name\n )\n except pygments.util.ClassNotFound: # pragma: no cover\n pass\n return "" # pragma: no cover\n\n\ndef format_request_headers(request: httpcore.Request, http2: bool = False) -> str:\n version = "HTTP/2" if http2 else "HTTP/1.1"\n headers = [\n (name.lower() if http2 else name, value) for name, value in request.headers\n ]\n method = request.method.decode("ascii")\n target = request.url.target.decode("ascii")\n lines = [f"{method} {target} {version}"] + [\n f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers\n ]\n return "\n".join(lines)\n\n\ndef format_response_headers(\n http_version: bytes,\n status: int,\n reason_phrase: bytes | None,\n headers: list[tuple[bytes, bytes]],\n) -> str:\n version = http_version.decode("ascii")\n reason = (\n codes.get_reason_phrase(status)\n if reason_phrase is None\n else reason_phrase.decode("ascii")\n )\n lines = [f"{version} {status} {reason}"] + [\n f"{name.decode('ascii')}: {value.decode('ascii')}" for name, value in headers\n ]\n return "\n".join(lines)\n\n\ndef print_request_headers(request: httpcore.Request, http2: bool = False) -> None:\n console = rich.console.Console()\n http_text = format_request_headers(request, http2=http2)\n syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True)\n console.print(syntax)\n syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True)\n console.print(syntax)\n\n\ndef print_response_headers(\n http_version: bytes,\n status: int,\n reason_phrase: bytes | None,\n headers: list[tuple[bytes, bytes]],\n) -> None:\n console = rich.console.Console()\n http_text = format_response_headers(http_version, status, reason_phrase, headers)\n syntax = rich.syntax.Syntax(http_text, "http", theme="ansi_dark", word_wrap=True)\n console.print(syntax)\n syntax = rich.syntax.Syntax("", "http", theme="ansi_dark", word_wrap=True)\n console.print(syntax)\n\n\ndef print_response(response: Response) -> None:\n console = rich.console.Console()\n lexer_name = get_lexer_for_response(response)\n if lexer_name:\n if lexer_name.lower() == "json":\n try:\n data = response.json()\n text = json.dumps(data, indent=4)\n except ValueError: # pragma: no cover\n text = response.text\n else:\n text = response.text\n\n syntax = rich.syntax.Syntax(text, lexer_name, theme="ansi_dark", word_wrap=True)\n console.print(syntax)\n else:\n console.print(f"<{len(response.content)} bytes of binary data>")\n\n\n_PCTRTT = typing.Tuple[typing.Tuple[str, str], ...]\n_PCTRTTT = typing.Tuple[_PCTRTT, ...]\n_PeerCertRetDictType = typing.Dict[str, typing.Union[str, _PCTRTTT, _PCTRTT]]\n\n\ndef format_certificate(cert: _PeerCertRetDictType) -> str: # pragma: no cover\n lines = []\n for key, value in cert.items():\n if isinstance(value, (list, tuple)):\n lines.append(f"* {key}:")\n for item in value:\n if key in ("subject", "issuer"):\n for sub_item in item:\n lines.append(f"* {sub_item[0]}: {sub_item[1]!r}")\n elif isinstance(item, tuple) and len(item) == 2:\n lines.append(f"* {item[0]}: {item[1]!r}")\n else:\n lines.append(f"* {item!r}")\n else:\n lines.append(f"* {key}: {value!r}")\n return "\n".join(lines)\n\n\ndef trace(\n name: str, info: typing.Mapping[str, typing.Any], verbose: bool = False\n) -> None:\n console = rich.console.Console()\n if name == "connection.connect_tcp.started" and verbose:\n host = info["host"]\n console.print(f"* Connecting to {host!r}")\n elif name == "connection.connect_tcp.complete" and verbose:\n stream = info["return_value"]\n server_addr = stream.get_extra_info("server_addr")\n console.print(f"* Connected to {server_addr[0]!r} on port {server_addr[1]}")\n elif name == "connection.start_tls.complete" and verbose: # pragma: no cover\n stream = info["return_value"]\n ssl_object = stream.get_extra_info("ssl_object")\n version = ssl_object.version()\n cipher = ssl_object.cipher()\n server_cert = ssl_object.getpeercert()\n alpn = ssl_object.selected_alpn_protocol()\n console.print(f"* SSL established using {version!r} / {cipher[0]!r}")\n console.print(f"* Selected ALPN protocol: {alpn!r}")\n if server_cert:\n console.print("* Server certificate:")\n console.print(format_certificate(server_cert))\n elif name == "http11.send_request_headers.started" and verbose:\n request = info["request"]\n print_request_headers(request, http2=False)\n elif name == "http2.send_request_headers.started" and verbose: # pragma: no cover\n request = info["request"]\n print_request_headers(request, http2=True)\n elif name == "http11.receive_response_headers.complete":\n http_version, status, reason_phrase, headers = info["return_value"]\n print_response_headers(http_version, status, reason_phrase, headers)\n elif name == "http2.receive_response_headers.complete": # pragma: no cover\n status, headers = info["return_value"]\n http_version = b"HTTP/2"\n reason_phrase = None\n print_response_headers(http_version, status, reason_phrase, headers)\n\n\ndef download_response(response: Response, download: typing.BinaryIO) -> None:\n console = rich.console.Console()\n console.print()\n content_length = response.headers.get("Content-Length")\n with rich.progress.Progress(\n "[progress.description]{task.description}",\n "[progress.percentage]{task.percentage:>3.0f}%",\n rich.progress.BarColumn(bar_width=None),\n rich.progress.DownloadColumn(),\n rich.progress.TransferSpeedColumn(),\n ) as progress:\n description = f"Downloading [bold]{rich.markup.escape(download.name)}"\n download_task = progress.add_task(\n description,\n total=int(content_length or 0),\n start=content_length is not None,\n )\n for chunk in response.iter_bytes():\n download.write(chunk)\n progress.update(download_task, completed=response.num_bytes_downloaded)\n\n\ndef validate_json(\n ctx: click.Context,\n param: click.Option | click.Parameter,\n value: typing.Any,\n) -> typing.Any:\n if value is None:\n return None\n\n try:\n return json.loads(value)\n except json.JSONDecodeError: # pragma: no cover\n raise click.BadParameter("Not valid JSON")\n\n\ndef validate_auth(\n ctx: click.Context,\n param: click.Option | click.Parameter,\n value: typing.Any,\n) -> typing.Any:\n if value == (None, None):\n return None\n\n username, password = value\n if password == "-": # pragma: no cover\n password = click.prompt("Password", hide_input=True)\n return (username, password)\n\n\ndef handle_help(\n ctx: click.Context,\n param: click.Option | click.Parameter,\n value: typing.Any,\n) -> None:\n if not value or ctx.resilient_parsing:\n return\n\n print_help()\n ctx.exit()\n\n\n@click.command(add_help_option=False)\n@click.argument("url", type=str)\n@click.option(\n "--method",\n "-m",\n "method",\n type=str,\n help=(\n "Request method, such as GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD. "\n "[Default: GET, or POST if a request body is included]"\n ),\n)\n@click.option(\n "--params",\n "-p",\n "params",\n type=(str, str),\n multiple=True,\n help="Query parameters to include in the request URL.",\n)\n@click.option(\n "--content",\n "-c",\n "content",\n type=str,\n help="Byte content to include in the request body.",\n)\n@click.option(\n "--data",\n "-d",\n "data",\n type=(str, str),\n multiple=True,\n help="Form data to include in the request body.",\n)\n@click.option(\n "--files",\n "-f",\n "files",\n type=(str, click.File(mode="rb")),\n multiple=True,\n help="Form files to include in the request body.",\n)\n@click.option(\n "--json",\n "-j",\n "json",\n type=str,\n callback=validate_json,\n help="JSON data to include in the request body.",\n)\n@click.option(\n "--headers",\n "-h",\n "headers",\n type=(str, str),\n multiple=True,\n help="Include additional HTTP headers in the request.",\n)\n@click.option(\n "--cookies",\n "cookies",\n type=(str, str),\n multiple=True,\n help="Cookies to include in the request.",\n)\n@click.option(\n "--auth",\n "auth",\n type=(str, str),\n default=(None, None),\n callback=validate_auth,\n help=(\n "Username and password to include in the request. "\n "Specify '-' for the password to use a password prompt. "\n "Note that using --verbose/-v will expose the Authorization header, "\n "including the password encoding in a trivially reversible format."\n ),\n)\n@click.option(\n "--proxy",\n "proxy",\n type=str,\n default=None,\n help="Send the request via a proxy. Should be the URL giving the proxy address.",\n)\n@click.option(\n "--timeout",\n "timeout",\n type=float,\n default=5.0,\n help=(\n "Timeout value to use for network operations, such as establishing the "\n "connection, reading some data, etc... [Default: 5.0]"\n ),\n)\n@click.option(\n "--follow-redirects",\n "follow_redirects",\n is_flag=True,\n default=False,\n help="Automatically follow redirects.",\n)\n@click.option(\n "--no-verify",\n "verify",\n is_flag=True,\n default=True,\n help="Disable SSL verification.",\n)\n@click.option(\n "--http2",\n "http2",\n type=bool,\n is_flag=True,\n default=False,\n help="Send the request using HTTP/2, if the remote server supports it.",\n)\n@click.option(\n "--download",\n type=click.File("wb"),\n help="Save the response content as a file, rather than displaying it.",\n)\n@click.option(\n "--verbose",\n "-v",\n type=bool,\n is_flag=True,\n default=False,\n help="Verbose. Show request as well as response.",\n)\n@click.option(\n "--help",\n is_flag=True,\n is_eager=True,\n expose_value=False,\n callback=handle_help,\n help="Show this message and exit.",\n)\ndef main(\n url: str,\n method: str,\n params: list[tuple[str, str]],\n content: str,\n data: list[tuple[str, str]],\n files: list[tuple[str, click.File]],\n json: str,\n headers: list[tuple[str, str]],\n cookies: list[tuple[str, str]],\n auth: tuple[str, str] | None,\n proxy: str,\n timeout: float,\n follow_redirects: bool,\n verify: bool,\n http2: bool,\n download: typing.BinaryIO | None,\n verbose: bool,\n) -> None:\n """\n An HTTP command line client.\n Sends a request and displays the response.\n """\n if not method:\n method = "POST" if content or data or files or json else "GET"\n\n try:\n with Client(proxy=proxy, timeout=timeout, http2=http2, verify=verify) as client:\n with client.stream(\n method,\n url,\n params=list(params),\n content=content,\n data=dict(data),\n files=files, # type: ignore\n json=json,\n headers=headers,\n cookies=dict(cookies),\n auth=auth,\n follow_redirects=follow_redirects,\n extensions={"trace": functools.partial(trace, verbose=verbose)},\n ) as response:\n if download is not None:\n download_response(response, download)\n else:\n response.read()\n if response.content:\n print_response(response)\n\n except RequestError as exc:\n console = rich.console.Console()\n console.print(f"[red]{type(exc).__name__}[/red]: {exc}")\n sys.exit(1)\n\n sys.exit(0 if response.is_success else 1)\n
|
.venv\Lib\site-packages\httpx\_main.py
|
_main.py
|
Python
| 15,626 | 0.95 | 0.104743 | 0 |
vue-tools
| 681 |
2025-05-08T17:09:47.003058
|
BSD-3-Clause
| false |
29054b29b3551af46e9fc6aa2a3a8135
|
from __future__ import annotations\n\nimport io\nimport mimetypes\nimport os\nimport re\nimport typing\nfrom pathlib import Path\n\nfrom ._types import (\n AsyncByteStream,\n FileContent,\n FileTypes,\n RequestData,\n RequestFiles,\n SyncByteStream,\n)\nfrom ._utils import (\n peek_filelike_length,\n primitive_value_to_str,\n to_bytes,\n)\n\n_HTML5_FORM_ENCODING_REPLACEMENTS = {'"': "%22", "\\": "\\\\"}\n_HTML5_FORM_ENCODING_REPLACEMENTS.update(\n {chr(c): "%{:02X}".format(c) for c in range(0x1F + 1) if c != 0x1B}\n)\n_HTML5_FORM_ENCODING_RE = re.compile(\n r"|".join([re.escape(c) for c in _HTML5_FORM_ENCODING_REPLACEMENTS.keys()])\n)\n\n\ndef _format_form_param(name: str, value: str) -> bytes:\n """\n Encode a name/value pair within a multipart form.\n """\n\n def replacer(match: typing.Match[str]) -> str:\n return _HTML5_FORM_ENCODING_REPLACEMENTS[match.group(0)]\n\n value = _HTML5_FORM_ENCODING_RE.sub(replacer, value)\n return f'{name}="{value}"'.encode()\n\n\ndef _guess_content_type(filename: str | None) -> str | None:\n """\n Guesses the mimetype based on a filename. Defaults to `application/octet-stream`.\n\n Returns `None` if `filename` is `None` or empty.\n """\n if filename:\n return mimetypes.guess_type(filename)[0] or "application/octet-stream"\n return None\n\n\ndef get_multipart_boundary_from_content_type(\n content_type: bytes | None,\n) -> bytes | None:\n if not content_type or not content_type.startswith(b"multipart/form-data"):\n return None\n # parse boundary according to\n # https://www.rfc-editor.org/rfc/rfc2046#section-5.1.1\n if b";" in content_type:\n for section in content_type.split(b";"):\n if section.strip().lower().startswith(b"boundary="):\n return section.strip()[len(b"boundary=") :].strip(b'"')\n return None\n\n\nclass DataField:\n """\n A single form field item, within a multipart form field.\n """\n\n def __init__(self, name: str, value: str | bytes | int | float | None) -> None:\n if not isinstance(name, str):\n raise TypeError(\n f"Invalid type for name. Expected str, got {type(name)}: {name!r}"\n )\n if value is not None and not isinstance(value, (str, bytes, int, float)):\n raise TypeError(\n "Invalid type for value. Expected primitive type,"\n f" got {type(value)}: {value!r}"\n )\n self.name = name\n self.value: str | bytes = (\n value if isinstance(value, bytes) else primitive_value_to_str(value)\n )\n\n def render_headers(self) -> bytes:\n if not hasattr(self, "_headers"):\n name = _format_form_param("name", self.name)\n self._headers = b"".join(\n [b"Content-Disposition: form-data; ", name, b"\r\n\r\n"]\n )\n\n return self._headers\n\n def render_data(self) -> bytes:\n if not hasattr(self, "_data"):\n self._data = to_bytes(self.value)\n\n return self._data\n\n def get_length(self) -> int:\n headers = self.render_headers()\n data = self.render_data()\n return len(headers) + len(data)\n\n def render(self) -> typing.Iterator[bytes]:\n yield self.render_headers()\n yield self.render_data()\n\n\nclass FileField:\n """\n A single file field item, within a multipart form field.\n """\n\n CHUNK_SIZE = 64 * 1024\n\n def __init__(self, name: str, value: FileTypes) -> None:\n self.name = name\n\n fileobj: FileContent\n\n headers: dict[str, str] = {}\n content_type: str | None = None\n\n # This large tuple based API largely mirror's requests' API\n # It would be good to think of better APIs for this that we could\n # include in httpx 2.0 since variable length tuples(especially of 4 elements)\n # are quite unwieldly\n if isinstance(value, tuple):\n if len(value) == 2:\n # neither the 3rd parameter (content_type) nor the 4th (headers)\n # was included\n filename, fileobj = value\n elif len(value) == 3:\n filename, fileobj, content_type = value\n else:\n # all 4 parameters included\n filename, fileobj, content_type, headers = value # type: ignore\n else:\n filename = Path(str(getattr(value, "name", "upload"))).name\n fileobj = value\n\n if content_type is None:\n content_type = _guess_content_type(filename)\n\n has_content_type_header = any("content-type" in key.lower() for key in headers)\n if content_type is not None and not has_content_type_header:\n # note that unlike requests, we ignore the content_type provided in the 3rd\n # tuple element if it is also included in the headers requests does\n # the opposite (it overwrites the headerwith the 3rd tuple element)\n headers["Content-Type"] = content_type\n\n if isinstance(fileobj, io.StringIO):\n raise TypeError(\n "Multipart file uploads require 'io.BytesIO', not 'io.StringIO'."\n )\n if isinstance(fileobj, io.TextIOBase):\n raise TypeError(\n "Multipart file uploads must be opened in binary mode, not text mode."\n )\n\n self.filename = filename\n self.file = fileobj\n self.headers = headers\n\n def get_length(self) -> int | None:\n headers = self.render_headers()\n\n if isinstance(self.file, (str, bytes)):\n return len(headers) + len(to_bytes(self.file))\n\n file_length = peek_filelike_length(self.file)\n\n # If we can't determine the filesize without reading it into memory,\n # then return `None` here, to indicate an unknown file length.\n if file_length is None:\n return None\n\n return len(headers) + file_length\n\n def render_headers(self) -> bytes:\n if not hasattr(self, "_headers"):\n parts = [\n b"Content-Disposition: form-data; ",\n _format_form_param("name", self.name),\n ]\n if self.filename:\n filename = _format_form_param("filename", self.filename)\n parts.extend([b"; ", filename])\n for header_name, header_value in self.headers.items():\n key, val = f"\r\n{header_name}: ".encode(), header_value.encode()\n parts.extend([key, val])\n parts.append(b"\r\n\r\n")\n self._headers = b"".join(parts)\n\n return self._headers\n\n def render_data(self) -> typing.Iterator[bytes]:\n if isinstance(self.file, (str, bytes)):\n yield to_bytes(self.file)\n return\n\n if hasattr(self.file, "seek"):\n try:\n self.file.seek(0)\n except io.UnsupportedOperation:\n pass\n\n chunk = self.file.read(self.CHUNK_SIZE)\n while chunk:\n yield to_bytes(chunk)\n chunk = self.file.read(self.CHUNK_SIZE)\n\n def render(self) -> typing.Iterator[bytes]:\n yield self.render_headers()\n yield from self.render_data()\n\n\nclass MultipartStream(SyncByteStream, AsyncByteStream):\n """\n Request content as streaming multipart encoded form data.\n """\n\n def __init__(\n self,\n data: RequestData,\n files: RequestFiles,\n boundary: bytes | None = None,\n ) -> None:\n if boundary is None:\n boundary = os.urandom(16).hex().encode("ascii")\n\n self.boundary = boundary\n self.content_type = "multipart/form-data; boundary=%s" % boundary.decode(\n "ascii"\n )\n self.fields = list(self._iter_fields(data, files))\n\n def _iter_fields(\n self, data: RequestData, files: RequestFiles\n ) -> typing.Iterator[FileField | DataField]:\n for name, value in data.items():\n if isinstance(value, (tuple, list)):\n for item in value:\n yield DataField(name=name, value=item)\n else:\n yield DataField(name=name, value=value)\n\n file_items = files.items() if isinstance(files, typing.Mapping) else files\n for name, value in file_items:\n yield FileField(name=name, value=value)\n\n def iter_chunks(self) -> typing.Iterator[bytes]:\n for field in self.fields:\n yield b"--%s\r\n" % self.boundary\n yield from field.render()\n yield b"\r\n"\n yield b"--%s--\r\n" % self.boundary\n\n def get_content_length(self) -> int | None:\n """\n Return the length of the multipart encoded content, or `None` if\n any of the files have a length that cannot be determined upfront.\n """\n boundary_length = len(self.boundary)\n length = 0\n\n for field in self.fields:\n field_length = field.get_length()\n if field_length is None:\n return None\n\n length += 2 + boundary_length + 2 # b"--{boundary}\r\n"\n length += field_length\n length += 2 # b"\r\n"\n\n length += 2 + boundary_length + 4 # b"--{boundary}--\r\n"\n return length\n\n # Content stream interface.\n\n def get_headers(self) -> dict[str, str]:\n content_length = self.get_content_length()\n content_type = self.content_type\n if content_length is None:\n return {"Transfer-Encoding": "chunked", "Content-Type": content_type}\n return {"Content-Length": str(content_length), "Content-Type": content_type}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n for chunk in self.iter_chunks():\n yield chunk\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n for chunk in self.iter_chunks():\n yield chunk\n
|
.venv\Lib\site-packages\httpx\_multipart.py
|
_multipart.py
|
Python
| 9,843 | 0.95 | 0.236667 | 0.061983 |
react-lib
| 756 |
2025-01-31T07:02:02.211695
|
MIT
| false |
fe7ae564adb097800effdfc433217964
|
from __future__ import annotations\n\nfrom enum import IntEnum\n\n__all__ = ["codes"]\n\n\nclass codes(IntEnum):\n """HTTP status codes and reason phrases\n\n Status codes from the following RFCs are all observed:\n\n * RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616\n * RFC 6585: Additional HTTP Status Codes\n * RFC 3229: Delta encoding in HTTP\n * RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518\n * RFC 5842: Binding Extensions to WebDAV\n * RFC 7238: Permanent Redirect\n * RFC 2295: Transparent Content Negotiation in HTTP\n * RFC 2774: An HTTP Extension Framework\n * RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2)\n * RFC 2324: Hyper Text Coffee Pot Control Protocol (HTCPCP/1.0)\n * RFC 7725: An HTTP Status Code to Report Legal Obstacles\n * RFC 8297: An HTTP Status Code for Indicating Hints\n * RFC 8470: Using Early Data in HTTP\n """\n\n def __new__(cls, value: int, phrase: str = "") -> codes:\n obj = int.__new__(cls, value)\n obj._value_ = value\n\n obj.phrase = phrase # type: ignore[attr-defined]\n return obj\n\n def __str__(self) -> str:\n return str(self.value)\n\n @classmethod\n def get_reason_phrase(cls, value: int) -> str:\n try:\n return codes(value).phrase # type: ignore\n except ValueError:\n return ""\n\n @classmethod\n def is_informational(cls, value: int) -> bool:\n """\n Returns `True` for 1xx status codes, `False` otherwise.\n """\n return 100 <= value <= 199\n\n @classmethod\n def is_success(cls, value: int) -> bool:\n """\n Returns `True` for 2xx status codes, `False` otherwise.\n """\n return 200 <= value <= 299\n\n @classmethod\n def is_redirect(cls, value: int) -> bool:\n """\n Returns `True` for 3xx status codes, `False` otherwise.\n """\n return 300 <= value <= 399\n\n @classmethod\n def is_client_error(cls, value: int) -> bool:\n """\n Returns `True` for 4xx status codes, `False` otherwise.\n """\n return 400 <= value <= 499\n\n @classmethod\n def is_server_error(cls, value: int) -> bool:\n """\n Returns `True` for 5xx status codes, `False` otherwise.\n """\n return 500 <= value <= 599\n\n @classmethod\n def is_error(cls, value: int) -> bool:\n """\n Returns `True` for 4xx or 5xx status codes, `False` otherwise.\n """\n return 400 <= value <= 599\n\n # informational\n CONTINUE = 100, "Continue"\n SWITCHING_PROTOCOLS = 101, "Switching Protocols"\n PROCESSING = 102, "Processing"\n EARLY_HINTS = 103, "Early Hints"\n\n # success\n OK = 200, "OK"\n CREATED = 201, "Created"\n ACCEPTED = 202, "Accepted"\n NON_AUTHORITATIVE_INFORMATION = 203, "Non-Authoritative Information"\n NO_CONTENT = 204, "No Content"\n RESET_CONTENT = 205, "Reset Content"\n PARTIAL_CONTENT = 206, "Partial Content"\n MULTI_STATUS = 207, "Multi-Status"\n ALREADY_REPORTED = 208, "Already Reported"\n IM_USED = 226, "IM Used"\n\n # redirection\n MULTIPLE_CHOICES = 300, "Multiple Choices"\n MOVED_PERMANENTLY = 301, "Moved Permanently"\n FOUND = 302, "Found"\n SEE_OTHER = 303, "See Other"\n NOT_MODIFIED = 304, "Not Modified"\n USE_PROXY = 305, "Use Proxy"\n TEMPORARY_REDIRECT = 307, "Temporary Redirect"\n PERMANENT_REDIRECT = 308, "Permanent Redirect"\n\n # client error\n BAD_REQUEST = 400, "Bad Request"\n UNAUTHORIZED = 401, "Unauthorized"\n PAYMENT_REQUIRED = 402, "Payment Required"\n FORBIDDEN = 403, "Forbidden"\n NOT_FOUND = 404, "Not Found"\n METHOD_NOT_ALLOWED = 405, "Method Not Allowed"\n NOT_ACCEPTABLE = 406, "Not Acceptable"\n PROXY_AUTHENTICATION_REQUIRED = 407, "Proxy Authentication Required"\n REQUEST_TIMEOUT = 408, "Request Timeout"\n CONFLICT = 409, "Conflict"\n GONE = 410, "Gone"\n LENGTH_REQUIRED = 411, "Length Required"\n PRECONDITION_FAILED = 412, "Precondition Failed"\n REQUEST_ENTITY_TOO_LARGE = 413, "Request Entity Too Large"\n REQUEST_URI_TOO_LONG = 414, "Request-URI Too Long"\n UNSUPPORTED_MEDIA_TYPE = 415, "Unsupported Media Type"\n REQUESTED_RANGE_NOT_SATISFIABLE = 416, "Requested Range Not Satisfiable"\n EXPECTATION_FAILED = 417, "Expectation Failed"\n IM_A_TEAPOT = 418, "I'm a teapot"\n MISDIRECTED_REQUEST = 421, "Misdirected Request"\n UNPROCESSABLE_ENTITY = 422, "Unprocessable Entity"\n LOCKED = 423, "Locked"\n FAILED_DEPENDENCY = 424, "Failed Dependency"\n TOO_EARLY = 425, "Too Early"\n UPGRADE_REQUIRED = 426, "Upgrade Required"\n PRECONDITION_REQUIRED = 428, "Precondition Required"\n TOO_MANY_REQUESTS = 429, "Too Many Requests"\n REQUEST_HEADER_FIELDS_TOO_LARGE = 431, "Request Header Fields Too Large"\n UNAVAILABLE_FOR_LEGAL_REASONS = 451, "Unavailable For Legal Reasons"\n\n # server errors\n INTERNAL_SERVER_ERROR = 500, "Internal Server Error"\n NOT_IMPLEMENTED = 501, "Not Implemented"\n BAD_GATEWAY = 502, "Bad Gateway"\n SERVICE_UNAVAILABLE = 503, "Service Unavailable"\n GATEWAY_TIMEOUT = 504, "Gateway Timeout"\n HTTP_VERSION_NOT_SUPPORTED = 505, "HTTP Version Not Supported"\n VARIANT_ALSO_NEGOTIATES = 506, "Variant Also Negotiates"\n INSUFFICIENT_STORAGE = 507, "Insufficient Storage"\n LOOP_DETECTED = 508, "Loop Detected"\n NOT_EXTENDED = 510, "Not Extended"\n NETWORK_AUTHENTICATION_REQUIRED = 511, "Network Authentication Required"\n\n\n# Include lower-case styles for `requests` compatibility.\nfor code in codes:\n setattr(codes, code._name_.lower(), int(code))\n
|
.venv\Lib\site-packages\httpx\_status_codes.py
|
_status_codes.py
|
Python
| 5,639 | 0.95 | 0.12963 | 0.136691 |
react-lib
| 769 |
2023-11-12T17:07:23.791401
|
BSD-3-Clause
| false |
5854b8d256f66ebb1cc635624dc4d739
|
"""\nType definitions for type checking purposes.\n"""\n\nfrom http.cookiejar import CookieJar\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n AsyncIterable,\n AsyncIterator,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nif TYPE_CHECKING: # pragma: no cover\n from ._auth import Auth # noqa: F401\n from ._config import Proxy, Timeout # noqa: F401\n from ._models import Cookies, Headers, Request # noqa: F401\n from ._urls import URL, QueryParams # noqa: F401\n\n\nPrimitiveData = Optional[Union[str, int, float, bool]]\n\nURLTypes = Union["URL", str]\n\nQueryParamTypes = Union[\n "QueryParams",\n Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],\n List[Tuple[str, PrimitiveData]],\n Tuple[Tuple[str, PrimitiveData], ...],\n str,\n bytes,\n]\n\nHeaderTypes = Union[\n "Headers",\n Mapping[str, str],\n Mapping[bytes, bytes],\n Sequence[Tuple[str, str]],\n Sequence[Tuple[bytes, bytes]],\n]\n\nCookieTypes = Union["Cookies", CookieJar, Dict[str, str], List[Tuple[str, str]]]\n\nTimeoutTypes = Union[\n Optional[float],\n Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],\n "Timeout",\n]\nProxyTypes = Union["URL", str, "Proxy"]\nCertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\n\nAuthTypes = Union[\n Tuple[Union[str, bytes], Union[str, bytes]],\n Callable[["Request"], "Request"],\n "Auth",\n]\n\nRequestContent = Union[str, bytes, Iterable[bytes], AsyncIterable[bytes]]\nResponseContent = Union[str, bytes, Iterable[bytes], AsyncIterable[bytes]]\nResponseExtensions = Mapping[str, Any]\n\nRequestData = Mapping[str, Any]\n\nFileContent = Union[IO[bytes], bytes, str]\nFileTypes = Union[\n # file (or bytes)\n FileContent,\n # (filename, file (or bytes))\n Tuple[Optional[str], FileContent],\n # (filename, file (or bytes), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n # (filename, file (or bytes), content_type, headers)\n Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]],\n]\nRequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]\n\nRequestExtensions = Mapping[str, Any]\n\n__all__ = ["AsyncByteStream", "SyncByteStream"]\n\n\nclass SyncByteStream:\n def __iter__(self) -> Iterator[bytes]:\n raise NotImplementedError(\n "The '__iter__' method must be implemented."\n ) # pragma: no cover\n yield b"" # pragma: no cover\n\n def close(self) -> None:\n """\n Subclasses can override this method to release any network resources\n after a request/response cycle is complete.\n """\n\n\nclass AsyncByteStream:\n async def __aiter__(self) -> AsyncIterator[bytes]:\n raise NotImplementedError(\n "The '__aiter__' method must be implemented."\n ) # pragma: no cover\n yield b"" # pragma: no cover\n\n async def aclose(self) -> None:\n pass\n
|
.venv\Lib\site-packages\httpx\_types.py
|
_types.py
|
Python
| 2,965 | 0.95 | 0.070175 | 0.043011 |
node-utils
| 962 |
2024-08-12T19:25:50.833678
|
MIT
| false |
db5e5f4c3768abfcdce89e3f46b037aa
|
"""\nAn implementation of `urlparse` that provides URL validation and normalization\nas described by RFC3986.\n\nWe rely on this implementation rather than the one in Python's stdlib, because:\n\n* It provides more complete URL validation.\n* It properly differentiates between an empty querystring and an absent querystring,\n to distinguish URLs with a trailing '?'.\n* It handles scheme, hostname, port, and path normalization.\n* It supports IDNA hostnames, normalizing them to their encoded form.\n* The API supports passing individual components, as well as the complete URL string.\n\nPreviously we relied on the excellent `rfc3986` package to handle URL parsing and\nvalidation, but this module provides a simpler alternative, with less indirection\nrequired.\n"""\n\nfrom __future__ import annotations\n\nimport ipaddress\nimport re\nimport typing\n\nimport idna\n\nfrom ._exceptions import InvalidURL\n\nMAX_URL_LENGTH = 65536\n\n# https://datatracker.ietf.org/doc/html/rfc3986.html#section-2.3\nUNRESERVED_CHARACTERS = (\n "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~"\n)\nSUB_DELIMS = "!$&'()*+,;="\n\nPERCENT_ENCODED_REGEX = re.compile("%[A-Fa-f0-9]{2}")\n\n# https://url.spec.whatwg.org/#percent-encoded-bytes\n\n# The fragment percent-encode set is the C0 control percent-encode set\n# and U+0020 SPACE, U+0022 ("), U+003C (<), U+003E (>), and U+0060 (`).\nFRAG_SAFE = "".join(\n [chr(i) for i in range(0x20, 0x7F) if i not in (0x20, 0x22, 0x3C, 0x3E, 0x60)]\n)\n\n# The query percent-encode set is the C0 control percent-encode set\n# and U+0020 SPACE, U+0022 ("), U+0023 (#), U+003C (<), and U+003E (>).\nQUERY_SAFE = "".join(\n [chr(i) for i in range(0x20, 0x7F) if i not in (0x20, 0x22, 0x23, 0x3C, 0x3E)]\n)\n\n# The path percent-encode set is the query percent-encode set\n# and U+003F (?), U+0060 (`), U+007B ({), and U+007D (}).\nPATH_SAFE = "".join(\n [\n chr(i)\n for i in range(0x20, 0x7F)\n if i not in (0x20, 0x22, 0x23, 0x3C, 0x3E) + (0x3F, 0x60, 0x7B, 0x7D)\n ]\n)\n\n# The userinfo percent-encode set is the path percent-encode set\n# and U+002F (/), U+003A (:), U+003B (;), U+003D (=), U+0040 (@),\n# U+005B ([) to U+005E (^), inclusive, and U+007C (|).\nUSERNAME_SAFE = "".join(\n [\n chr(i)\n for i in range(0x20, 0x7F)\n if i\n not in (0x20, 0x22, 0x23, 0x3C, 0x3E)\n + (0x3F, 0x60, 0x7B, 0x7D)\n + (0x2F, 0x3A, 0x3B, 0x3D, 0x40, 0x5B, 0x5C, 0x5D, 0x5E, 0x7C)\n ]\n)\nPASSWORD_SAFE = "".join(\n [\n chr(i)\n for i in range(0x20, 0x7F)\n if i\n not in (0x20, 0x22, 0x23, 0x3C, 0x3E)\n + (0x3F, 0x60, 0x7B, 0x7D)\n + (0x2F, 0x3A, 0x3B, 0x3D, 0x40, 0x5B, 0x5C, 0x5D, 0x5E, 0x7C)\n ]\n)\n# Note... The terminology 'userinfo' percent-encode set in the WHATWG document\n# is used for the username and password quoting. For the joint userinfo component\n# we remove U+003A (:) from the safe set.\nUSERINFO_SAFE = "".join(\n [\n chr(i)\n for i in range(0x20, 0x7F)\n if i\n not in (0x20, 0x22, 0x23, 0x3C, 0x3E)\n + (0x3F, 0x60, 0x7B, 0x7D)\n + (0x2F, 0x3B, 0x3D, 0x40, 0x5B, 0x5C, 0x5D, 0x5E, 0x7C)\n ]\n)\n\n\n# {scheme}: (optional)\n# //{authority} (optional)\n# {path}\n# ?{query} (optional)\n# #{fragment} (optional)\nURL_REGEX = re.compile(\n (\n r"(?:(?P<scheme>{scheme}):)?"\n r"(?://(?P<authority>{authority}))?"\n r"(?P<path>{path})"\n r"(?:\?(?P<query>{query}))?"\n r"(?:#(?P<fragment>{fragment}))?"\n ).format(\n scheme="([a-zA-Z][a-zA-Z0-9+.-]*)?",\n authority="[^/?#]*",\n path="[^?#]*",\n query="[^#]*",\n fragment=".*",\n )\n)\n\n# {userinfo}@ (optional)\n# {host}\n# :{port} (optional)\nAUTHORITY_REGEX = re.compile(\n (\n r"(?:(?P<userinfo>{userinfo})@)?" r"(?P<host>{host})" r":?(?P<port>{port})?"\n ).format(\n userinfo=".*", # Any character sequence.\n host="(\\[.*\\]|[^:@]*)", # Either any character sequence excluding ':' or '@',\n # or an IPv6 address enclosed within square brackets.\n port=".*", # Any character sequence.\n )\n)\n\n\n# If we call urlparse with an individual component, then we need to regex\n# validate that component individually.\n# Note that we're duplicating the same strings as above. Shock! Horror!!\nCOMPONENT_REGEX = {\n "scheme": re.compile("([a-zA-Z][a-zA-Z0-9+.-]*)?"),\n "authority": re.compile("[^/?#]*"),\n "path": re.compile("[^?#]*"),\n "query": re.compile("[^#]*"),\n "fragment": re.compile(".*"),\n "userinfo": re.compile("[^@]*"),\n "host": re.compile("(\\[.*\\]|[^:]*)"),\n "port": re.compile(".*"),\n}\n\n\n# We use these simple regexs as a first pass before handing off to\n# the stdlib 'ipaddress' module for IP address validation.\nIPv4_STYLE_HOSTNAME = re.compile(r"^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$")\nIPv6_STYLE_HOSTNAME = re.compile(r"^\[.*\]$")\n\n\nclass ParseResult(typing.NamedTuple):\n scheme: str\n userinfo: str\n host: str\n port: int | None\n path: str\n query: str | None\n fragment: str | None\n\n @property\n def authority(self) -> str:\n return "".join(\n [\n f"{self.userinfo}@" if self.userinfo else "",\n f"[{self.host}]" if ":" in self.host else self.host,\n f":{self.port}" if self.port is not None else "",\n ]\n )\n\n @property\n def netloc(self) -> str:\n return "".join(\n [\n f"[{self.host}]" if ":" in self.host else self.host,\n f":{self.port}" if self.port is not None else "",\n ]\n )\n\n def copy_with(self, **kwargs: str | None) -> ParseResult:\n if not kwargs:\n return self\n\n defaults = {\n "scheme": self.scheme,\n "authority": self.authority,\n "path": self.path,\n "query": self.query,\n "fragment": self.fragment,\n }\n defaults.update(kwargs)\n return urlparse("", **defaults)\n\n def __str__(self) -> str:\n authority = self.authority\n return "".join(\n [\n f"{self.scheme}:" if self.scheme else "",\n f"//{authority}" if authority else "",\n self.path,\n f"?{self.query}" if self.query is not None else "",\n f"#{self.fragment}" if self.fragment is not None else "",\n ]\n )\n\n\ndef urlparse(url: str = "", **kwargs: str | None) -> ParseResult:\n # Initial basic checks on allowable URLs.\n # ---------------------------------------\n\n # Hard limit the maximum allowable URL length.\n if len(url) > MAX_URL_LENGTH:\n raise InvalidURL("URL too long")\n\n # If a URL includes any ASCII control characters including \t, \r, \n,\n # then treat it as invalid.\n if any(char.isascii() and not char.isprintable() for char in url):\n char = next(char for char in url if char.isascii() and not char.isprintable())\n idx = url.find(char)\n error = (\n f"Invalid non-printable ASCII character in URL, {char!r} at position {idx}."\n )\n raise InvalidURL(error)\n\n # Some keyword arguments require special handling.\n # ------------------------------------------------\n\n # Coerce "port" to a string, if it is provided as an integer.\n if "port" in kwargs:\n port = kwargs["port"]\n kwargs["port"] = str(port) if isinstance(port, int) else port\n\n # Replace "netloc" with "host and "port".\n if "netloc" in kwargs:\n netloc = kwargs.pop("netloc") or ""\n kwargs["host"], _, kwargs["port"] = netloc.partition(":")\n\n # Replace "username" and/or "password" with "userinfo".\n if "username" in kwargs or "password" in kwargs:\n username = quote(kwargs.pop("username", "") or "", safe=USERNAME_SAFE)\n password = quote(kwargs.pop("password", "") or "", safe=PASSWORD_SAFE)\n kwargs["userinfo"] = f"{username}:{password}" if password else username\n\n # Replace "raw_path" with "path" and "query".\n if "raw_path" in kwargs:\n raw_path = kwargs.pop("raw_path") or ""\n kwargs["path"], seperator, kwargs["query"] = raw_path.partition("?")\n if not seperator:\n kwargs["query"] = None\n\n # Ensure that IPv6 "host" addresses are always escaped with "[...]".\n if "host" in kwargs:\n host = kwargs.get("host") or ""\n if ":" in host and not (host.startswith("[") and host.endswith("]")):\n kwargs["host"] = f"[{host}]"\n\n # If any keyword arguments are provided, ensure they are valid.\n # -------------------------------------------------------------\n\n for key, value in kwargs.items():\n if value is not None:\n if len(value) > MAX_URL_LENGTH:\n raise InvalidURL(f"URL component '{key}' too long")\n\n # If a component includes any ASCII control characters including \t, \r, \n,\n # then treat it as invalid.\n if any(char.isascii() and not char.isprintable() for char in value):\n char = next(\n char for char in value if char.isascii() and not char.isprintable()\n )\n idx = value.find(char)\n error = (\n f"Invalid non-printable ASCII character in URL {key} component, "\n f"{char!r} at position {idx}."\n )\n raise InvalidURL(error)\n\n # Ensure that keyword arguments match as a valid regex.\n if not COMPONENT_REGEX[key].fullmatch(value):\n raise InvalidURL(f"Invalid URL component '{key}'")\n\n # The URL_REGEX will always match, but may have empty components.\n url_match = URL_REGEX.match(url)\n assert url_match is not None\n url_dict = url_match.groupdict()\n\n # * 'scheme', 'authority', and 'path' may be empty strings.\n # * 'query' may be 'None', indicating no trailing "?" portion.\n # Any string including the empty string, indicates a trailing "?".\n # * 'fragment' may be 'None', indicating no trailing "#" portion.\n # Any string including the empty string, indicates a trailing "#".\n scheme = kwargs.get("scheme", url_dict["scheme"]) or ""\n authority = kwargs.get("authority", url_dict["authority"]) or ""\n path = kwargs.get("path", url_dict["path"]) or ""\n query = kwargs.get("query", url_dict["query"])\n frag = kwargs.get("fragment", url_dict["fragment"])\n\n # The AUTHORITY_REGEX will always match, but may have empty components.\n authority_match = AUTHORITY_REGEX.match(authority)\n assert authority_match is not None\n authority_dict = authority_match.groupdict()\n\n # * 'userinfo' and 'host' may be empty strings.\n # * 'port' may be 'None'.\n userinfo = kwargs.get("userinfo", authority_dict["userinfo"]) or ""\n host = kwargs.get("host", authority_dict["host"]) or ""\n port = kwargs.get("port", authority_dict["port"])\n\n # Normalize and validate each component.\n # We end up with a parsed representation of the URL,\n # with components that are plain ASCII bytestrings.\n parsed_scheme: str = scheme.lower()\n parsed_userinfo: str = quote(userinfo, safe=USERINFO_SAFE)\n parsed_host: str = encode_host(host)\n parsed_port: int | None = normalize_port(port, scheme)\n\n has_scheme = parsed_scheme != ""\n has_authority = (\n parsed_userinfo != "" or parsed_host != "" or parsed_port is not None\n )\n validate_path(path, has_scheme=has_scheme, has_authority=has_authority)\n if has_scheme or has_authority:\n path = normalize_path(path)\n\n parsed_path: str = quote(path, safe=PATH_SAFE)\n parsed_query: str | None = None if query is None else quote(query, safe=QUERY_SAFE)\n parsed_frag: str | None = None if frag is None else quote(frag, safe=FRAG_SAFE)\n\n # The parsed ASCII bytestrings are our canonical form.\n # All properties of the URL are derived from these.\n return ParseResult(\n parsed_scheme,\n parsed_userinfo,\n parsed_host,\n parsed_port,\n parsed_path,\n parsed_query,\n parsed_frag,\n )\n\n\ndef encode_host(host: str) -> str:\n if not host:\n return ""\n\n elif IPv4_STYLE_HOSTNAME.match(host):\n # Validate IPv4 hostnames like #.#.#.#\n #\n # From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2\n #\n # IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet\n try:\n ipaddress.IPv4Address(host)\n except ipaddress.AddressValueError:\n raise InvalidURL(f"Invalid IPv4 address: {host!r}")\n return host\n\n elif IPv6_STYLE_HOSTNAME.match(host):\n # Validate IPv6 hostnames like [...]\n #\n # From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2\n #\n # "A host identified by an Internet Protocol literal address, version 6\n # [RFC3513] or later, is distinguished by enclosing the IP literal\n # within square brackets ("[" and "]"). This is the only place where\n # square bracket characters are allowed in the URI syntax."\n try:\n ipaddress.IPv6Address(host[1:-1])\n except ipaddress.AddressValueError:\n raise InvalidURL(f"Invalid IPv6 address: {host!r}")\n return host[1:-1]\n\n elif host.isascii():\n # Regular ASCII hostnames\n #\n # From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2\n #\n # reg-name = *( unreserved / pct-encoded / sub-delims )\n WHATWG_SAFE = '"`{}%|\\'\n return quote(host.lower(), safe=SUB_DELIMS + WHATWG_SAFE)\n\n # IDNA hostnames\n try:\n return idna.encode(host.lower()).decode("ascii")\n except idna.IDNAError:\n raise InvalidURL(f"Invalid IDNA hostname: {host!r}")\n\n\ndef normalize_port(port: str | int | None, scheme: str) -> int | None:\n # From https://tools.ietf.org/html/rfc3986#section-3.2.3\n #\n # "A scheme may define a default port. For example, the "http" scheme\n # defines a default port of "80", corresponding to its reserved TCP\n # port number. The type of port designated by the port number (e.g.,\n # TCP, UDP, SCTP) is defined by the URI scheme. URI producers and\n # normalizers should omit the port component and its ":" delimiter if\n # port is empty or if its value would be the same as that of the\n # scheme's default."\n if port is None or port == "":\n return None\n\n try:\n port_as_int = int(port)\n except ValueError:\n raise InvalidURL(f"Invalid port: {port!r}")\n\n # See https://url.spec.whatwg.org/#url-miscellaneous\n default_port = {"ftp": 21, "http": 80, "https": 443, "ws": 80, "wss": 443}.get(\n scheme\n )\n if port_as_int == default_port:\n return None\n return port_as_int\n\n\ndef validate_path(path: str, has_scheme: bool, has_authority: bool) -> None:\n """\n Path validation rules that depend on if the URL contains\n a scheme or authority component.\n\n See https://datatracker.ietf.org/doc/html/rfc3986.html#section-3.3\n """\n if has_authority:\n # If a URI contains an authority component, then the path component\n # must either be empty or begin with a slash ("/") character."\n if path and not path.startswith("/"):\n raise InvalidURL("For absolute URLs, path must be empty or begin with '/'")\n\n if not has_scheme and not has_authority:\n # If a URI does not contain an authority component, then the path cannot begin\n # with two slash characters ("//").\n if path.startswith("//"):\n raise InvalidURL("Relative URLs cannot have a path starting with '//'")\n\n # In addition, a URI reference (Section 4.1) may be a relative-path reference,\n # in which case the first path segment cannot contain a colon (":") character.\n if path.startswith(":"):\n raise InvalidURL("Relative URLs cannot have a path starting with ':'")\n\n\ndef normalize_path(path: str) -> str:\n """\n Drop "." and ".." segments from a URL path.\n\n For example:\n\n normalize_path("/path/./to/somewhere/..") == "/path/to"\n """\n # Fast return when no '.' characters in the path.\n if "." not in path:\n return path\n\n components = path.split("/")\n\n # Fast return when no '.' or '..' components in the path.\n if "." not in components and ".." not in components:\n return path\n\n # https://datatracker.ietf.org/doc/html/rfc3986#section-5.2.4\n output: list[str] = []\n for component in components:\n if component == ".":\n pass\n elif component == "..":\n if output and output != [""]:\n output.pop()\n else:\n output.append(component)\n return "/".join(output)\n\n\ndef PERCENT(string: str) -> str:\n return "".join([f"%{byte:02X}" for byte in string.encode("utf-8")])\n\n\ndef percent_encoded(string: str, safe: str) -> str:\n """\n Use percent-encoding to quote a string.\n """\n NON_ESCAPED_CHARS = UNRESERVED_CHARACTERS + safe\n\n # Fast path for strings that don't need escaping.\n if not string.rstrip(NON_ESCAPED_CHARS):\n return string\n\n return "".join(\n [char if char in NON_ESCAPED_CHARS else PERCENT(char) for char in string]\n )\n\n\ndef quote(string: str, safe: str) -> str:\n """\n Use percent-encoding to quote a string, omitting existing '%xx' escape sequences.\n\n See: https://www.rfc-editor.org/rfc/rfc3986#section-2.1\n\n * `string`: The string to be percent-escaped.\n * `safe`: A string containing characters that may be treated as safe, and do not\n need to be escaped. Unreserved characters are always treated as safe.\n See: https://www.rfc-editor.org/rfc/rfc3986#section-2.3\n """\n parts = []\n current_position = 0\n for match in re.finditer(PERCENT_ENCODED_REGEX, string):\n start_position, end_position = match.start(), match.end()\n matched_text = match.group(0)\n # Add any text up to the '%xx' escape sequence.\n if start_position != current_position:\n leading_text = string[current_position:start_position]\n parts.append(percent_encoded(leading_text, safe=safe))\n\n # Add the '%xx' escape sequence.\n parts.append(matched_text)\n current_position = end_position\n\n # Add any text after the final '%xx' escape sequence.\n if current_position != len(string):\n trailing_text = string[current_position:]\n parts.append(percent_encoded(trailing_text, safe=safe))\n\n return "".join(parts)\n
|
.venv\Lib\site-packages\httpx\_urlparse.py
|
_urlparse.py
|
Python
| 18,546 | 0.95 | 0.172676 | 0.244898 |
vue-tools
| 750 |
2024-09-21T22:22:16.917609
|
MIT
| false |
e3f497a9ded8b4ed23db05619c4e31fb
|
from .__version__ import __description__, __title__, __version__\nfrom ._api import *\nfrom ._auth import *\nfrom ._client import *\nfrom ._config import *\nfrom ._content import *\nfrom ._exceptions import *\nfrom ._models import *\nfrom ._status_codes import *\nfrom ._transports import *\nfrom ._types import *\nfrom ._urls import *\n\ntry:\n from ._main import main\nexcept ImportError: # pragma: no cover\n\n def main() -> None: # type: ignore\n import sys\n\n print(\n "The httpx command line client could not run because the required "\n "dependencies were not installed.\nMake sure you've installed "\n "everything with: pip install 'httpx[cli]'"\n )\n sys.exit(1)\n\n\n__all__ = [\n "__description__",\n "__title__",\n "__version__",\n "ASGITransport",\n "AsyncBaseTransport",\n "AsyncByteStream",\n "AsyncClient",\n "AsyncHTTPTransport",\n "Auth",\n "BaseTransport",\n "BasicAuth",\n "ByteStream",\n "Client",\n "CloseError",\n "codes",\n "ConnectError",\n "ConnectTimeout",\n "CookieConflict",\n "Cookies",\n "create_ssl_context",\n "DecodingError",\n "delete",\n "DigestAuth",\n "get",\n "head",\n "Headers",\n "HTTPError",\n "HTTPStatusError",\n "HTTPTransport",\n "InvalidURL",\n "Limits",\n "LocalProtocolError",\n "main",\n "MockTransport",\n "NetRCAuth",\n "NetworkError",\n "options",\n "patch",\n "PoolTimeout",\n "post",\n "ProtocolError",\n "Proxy",\n "ProxyError",\n "put",\n "QueryParams",\n "ReadError",\n "ReadTimeout",\n "RemoteProtocolError",\n "request",\n "Request",\n "RequestError",\n "RequestNotRead",\n "Response",\n "ResponseNotRead",\n "stream",\n "StreamClosed",\n "StreamConsumed",\n "StreamError",\n "SyncByteStream",\n "Timeout",\n "TimeoutException",\n "TooManyRedirects",\n "TransportError",\n "UnsupportedProtocol",\n "URL",\n "USE_CLIENT_DEFAULT",\n "WriteError",\n "WriteTimeout",\n "WSGITransport",\n]\n\n\n__locals = locals()\nfor __name in __all__:\n if not __name.startswith("__"):\n setattr(__locals[__name], "__module__", "httpx") # noqa\n
|
.venv\Lib\site-packages\httpx\__init__.py
|
__init__.py
|
Python
| 2,171 | 0.95 | 0.038095 | 0 |
awesome-app
| 414 |
2025-03-20T02:19:27.473049
|
Apache-2.0
| false |
fe563009b4674da74d63949ef32b634b
|
__title__ = "httpx"\n__description__ = "A next generation HTTP client, for Python 3."\n__version__ = "0.28.1"\n
|
.venv\Lib\site-packages\httpx\__version__.py
|
__version__.py
|
Python
| 108 | 0.7 | 0.333333 | 0 |
vue-tools
| 524 |
2024-11-27T22:05:29.465821
|
MIT
| false |
4ee48306a9d12e92b58c326a459590b7
|
from __future__ import annotations\n\nimport typing\n\nfrom .._models import Request, Response\nfrom .._types import AsyncByteStream\nfrom .base import AsyncBaseTransport\n\nif typing.TYPE_CHECKING: # pragma: no cover\n import asyncio\n\n import trio\n\n Event = typing.Union[asyncio.Event, trio.Event]\n\n\n_Message = typing.MutableMapping[str, typing.Any]\n_Receive = typing.Callable[[], typing.Awaitable[_Message]]\n_Send = typing.Callable[\n [typing.MutableMapping[str, typing.Any]], typing.Awaitable[None]\n]\n_ASGIApp = typing.Callable[\n [typing.MutableMapping[str, typing.Any], _Receive, _Send], typing.Awaitable[None]\n]\n\n__all__ = ["ASGITransport"]\n\n\ndef is_running_trio() -> bool:\n try:\n # sniffio is a dependency of trio.\n\n # See https://github.com/python-trio/trio/issues/2802\n import sniffio\n\n if sniffio.current_async_library() == "trio":\n return True\n except ImportError: # pragma: nocover\n pass\n\n return False\n\n\ndef create_event() -> Event:\n if is_running_trio():\n import trio\n\n return trio.Event()\n\n import asyncio\n\n return asyncio.Event()\n\n\nclass ASGIResponseStream(AsyncByteStream):\n def __init__(self, body: list[bytes]) -> None:\n self._body = body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield b"".join(self._body)\n\n\nclass ASGITransport(AsyncBaseTransport):\n """\n A custom AsyncTransport that handles sending requests directly to an ASGI app.\n\n ```python\n transport = httpx.ASGITransport(\n app=app,\n root_path="/submount",\n client=("1.2.3.4", 123)\n )\n client = httpx.AsyncClient(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `root_path` - The root path on which the ASGI application should be mounted.\n * `client` - A two-tuple indicating the client IP and port of incoming requests.\n ```\n """\n\n def __init__(\n self,\n app: _ASGIApp,\n raise_app_exceptions: bool = True,\n root_path: str = "",\n client: tuple[str, int] = ("127.0.0.1", 123),\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.root_path = root_path\n self.client = client\n\n async def handle_async_request(\n self,\n request: Request,\n ) -> Response:\n assert isinstance(request.stream, AsyncByteStream)\n\n # ASGI scope.\n scope = {\n "type": "http",\n "asgi": {"version": "3.0"},\n "http_version": "1.1",\n "method": request.method,\n "headers": [(k.lower(), v) for (k, v) in request.headers.raw],\n "scheme": request.url.scheme,\n "path": request.url.path,\n "raw_path": request.url.raw_path.split(b"?")[0],\n "query_string": request.url.query,\n "server": (request.url.host, request.url.port),\n "client": self.client,\n "root_path": self.root_path,\n }\n\n # Request.\n request_body_chunks = request.stream.__aiter__()\n request_complete = False\n\n # Response.\n status_code = None\n response_headers = None\n body_parts = []\n response_started = False\n response_complete = create_event()\n\n # ASGI callables.\n\n async def receive() -> dict[str, typing.Any]:\n nonlocal request_complete\n\n if request_complete:\n await response_complete.wait()\n return {"type": "http.disconnect"}\n\n try:\n body = await request_body_chunks.__anext__()\n except StopAsyncIteration:\n request_complete = True\n return {"type": "http.request", "body": b"", "more_body": False}\n return {"type": "http.request", "body": body, "more_body": True}\n\n async def send(message: typing.MutableMapping[str, typing.Any]) -> None:\n nonlocal status_code, response_headers, response_started\n\n if message["type"] == "http.response.start":\n assert not response_started\n\n status_code = message["status"]\n response_headers = message.get("headers", [])\n response_started = True\n\n elif message["type"] == "http.response.body":\n assert not response_complete.is_set()\n body = message.get("body", b"")\n more_body = message.get("more_body", False)\n\n if body and request.method != "HEAD":\n body_parts.append(body)\n\n if not more_body:\n response_complete.set()\n\n try:\n await self.app(scope, receive, send)\n except Exception: # noqa: PIE-786\n if self.raise_app_exceptions:\n raise\n\n response_complete.set()\n if status_code is None:\n status_code = 500\n if response_headers is None:\n response_headers = {}\n\n assert response_complete.is_set()\n assert status_code is not None\n assert response_headers is not None\n\n stream = ASGIResponseStream(body_parts)\n\n return Response(status_code, headers=response_headers, stream=stream)\n
|
.venv\Lib\site-packages\httpx\_transports\asgi.py
|
asgi.py
|
Python
| 5,501 | 0.95 | 0.139037 | 0.070922 |
node-utils
| 605 |
2024-05-13T05:39:35.908444
|
Apache-2.0
| false |
0b387a9f984c603ea32cd8551357803b
|
from __future__ import annotations\n\nimport typing\nfrom types import TracebackType\n\nfrom .._models import Request, Response\n\nT = typing.TypeVar("T", bound="BaseTransport")\nA = typing.TypeVar("A", bound="AsyncBaseTransport")\n\n__all__ = ["AsyncBaseTransport", "BaseTransport"]\n\n\nclass BaseTransport:\n def __enter__(self: T) -> T:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: TracebackType | None = None,\n ) -> None:\n self.close()\n\n def handle_request(self, request: Request) -> Response:\n """\n Send a single HTTP request and return a response.\n\n Developers shouldn't typically ever need to call into this API directly,\n since the Client class provides all the higher level user-facing API\n niceties.\n\n In order to properly release any network resources, the response\n stream should *either* be consumed immediately, with a call to\n `response.stream.read()`, or else the `handle_request` call should\n be followed with a try/finally block to ensuring the stream is\n always closed.\n\n Example usage:\n\n with httpx.HTTPTransport() as transport:\n req = httpx.Request(\n method=b"GET",\n url=(b"https", b"www.example.com", 443, b"/"),\n headers=[(b"Host", b"www.example.com")],\n )\n resp = transport.handle_request(req)\n body = resp.stream.read()\n print(resp.status_code, resp.headers, body)\n\n\n Takes a `Request` instance as the only argument.\n\n Returns a `Response` instance.\n """\n raise NotImplementedError(\n "The 'handle_request' method must be implemented."\n ) # pragma: no cover\n\n def close(self) -> None:\n pass\n\n\nclass AsyncBaseTransport:\n async def __aenter__(self: A) -> A:\n return self\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: TracebackType | None = None,\n ) -> None:\n await self.aclose()\n\n async def handle_async_request(\n self,\n request: Request,\n ) -> Response:\n raise NotImplementedError(\n "The 'handle_async_request' method must be implemented."\n ) # pragma: no cover\n\n async def aclose(self) -> None:\n pass\n
|
.venv\Lib\site-packages\httpx\_transports\base.py
|
base.py
|
Python
| 2,523 | 0.95 | 0.139535 | 0 |
python-kit
| 829 |
2025-06-16T05:25:24.831769
|
BSD-3-Clause
| false |
d3c39ff073dd5d22b4f3adcddcba8508
|
"""\nCustom transports, with nicely configured defaults.\n\nThe following additional keyword arguments are currently supported by httpcore...\n\n* uds: str\n* local_address: str\n* retries: int\n\nExample usages...\n\n# Disable HTTP/2 on a single specific domain.\nmounts = {\n "all://": httpx.HTTPTransport(http2=True),\n "all://*example.org": httpx.HTTPTransport()\n}\n\n# Using advanced httpcore configuration, with connection retries.\ntransport = httpx.HTTPTransport(retries=1)\nclient = httpx.Client(transport=transport)\n\n# Using advanced httpcore configuration, with unix domain sockets.\ntransport = httpx.HTTPTransport(uds="socket.uds")\nclient = httpx.Client(transport=transport)\n"""\n\nfrom __future__ import annotations\n\nimport contextlib\nimport typing\nfrom types import TracebackType\n\nif typing.TYPE_CHECKING:\n import ssl # pragma: no cover\n\n import httpx # pragma: no cover\n\nfrom .._config import DEFAULT_LIMITS, Limits, Proxy, create_ssl_context\nfrom .._exceptions import (\n ConnectError,\n ConnectTimeout,\n LocalProtocolError,\n NetworkError,\n PoolTimeout,\n ProtocolError,\n ProxyError,\n ReadError,\n ReadTimeout,\n RemoteProtocolError,\n TimeoutException,\n UnsupportedProtocol,\n WriteError,\n WriteTimeout,\n)\nfrom .._models import Request, Response\nfrom .._types import AsyncByteStream, CertTypes, ProxyTypes, SyncByteStream\nfrom .._urls import URL\nfrom .base import AsyncBaseTransport, BaseTransport\n\nT = typing.TypeVar("T", bound="HTTPTransport")\nA = typing.TypeVar("A", bound="AsyncHTTPTransport")\n\nSOCKET_OPTION = typing.Union[\n typing.Tuple[int, int, int],\n typing.Tuple[int, int, typing.Union[bytes, bytearray]],\n typing.Tuple[int, int, None, int],\n]\n\n__all__ = ["AsyncHTTPTransport", "HTTPTransport"]\n\nHTTPCORE_EXC_MAP: dict[type[Exception], type[httpx.HTTPError]] = {}\n\n\ndef _load_httpcore_exceptions() -> dict[type[Exception], type[httpx.HTTPError]]:\n import httpcore\n\n return {\n httpcore.TimeoutException: TimeoutException,\n httpcore.ConnectTimeout: ConnectTimeout,\n httpcore.ReadTimeout: ReadTimeout,\n httpcore.WriteTimeout: WriteTimeout,\n httpcore.PoolTimeout: PoolTimeout,\n httpcore.NetworkError: NetworkError,\n httpcore.ConnectError: ConnectError,\n httpcore.ReadError: ReadError,\n httpcore.WriteError: WriteError,\n httpcore.ProxyError: ProxyError,\n httpcore.UnsupportedProtocol: UnsupportedProtocol,\n httpcore.ProtocolError: ProtocolError,\n httpcore.LocalProtocolError: LocalProtocolError,\n httpcore.RemoteProtocolError: RemoteProtocolError,\n }\n\n\n@contextlib.contextmanager\ndef map_httpcore_exceptions() -> typing.Iterator[None]:\n global HTTPCORE_EXC_MAP\n if len(HTTPCORE_EXC_MAP) == 0:\n HTTPCORE_EXC_MAP = _load_httpcore_exceptions()\n try:\n yield\n except Exception as exc:\n mapped_exc = None\n\n for from_exc, to_exc in HTTPCORE_EXC_MAP.items():\n if not isinstance(exc, from_exc):\n continue\n # We want to map to the most specific exception we can find.\n # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to\n # `httpx.ReadTimeout`, not just `httpx.TimeoutException`.\n if mapped_exc is None or issubclass(to_exc, mapped_exc):\n mapped_exc = to_exc\n\n if mapped_exc is None: # pragma: no cover\n raise\n\n message = str(exc)\n raise mapped_exc(message) from exc\n\n\nclass ResponseStream(SyncByteStream):\n def __init__(self, httpcore_stream: typing.Iterable[bytes]) -> None:\n self._httpcore_stream = httpcore_stream\n\n def __iter__(self) -> typing.Iterator[bytes]:\n with map_httpcore_exceptions():\n for part in self._httpcore_stream:\n yield part\n\n def close(self) -> None:\n if hasattr(self._httpcore_stream, "close"):\n self._httpcore_stream.close()\n\n\nclass HTTPTransport(BaseTransport):\n def __init__(\n self,\n verify: ssl.SSLContext | str | bool = True,\n cert: CertTypes | None = None,\n trust_env: bool = True,\n http1: bool = True,\n http2: bool = False,\n limits: Limits = DEFAULT_LIMITS,\n proxy: ProxyTypes | None = None,\n uds: str | None = None,\n local_address: str | None = None,\n retries: int = 0,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> None:\n import httpcore\n\n proxy = Proxy(url=proxy) if isinstance(proxy, (str, URL)) else proxy\n ssl_context = create_ssl_context(verify=verify, cert=cert, trust_env=trust_env)\n\n if proxy is None:\n self._pool = httpcore.ConnectionPool(\n ssl_context=ssl_context,\n max_connections=limits.max_connections,\n max_keepalive_connections=limits.max_keepalive_connections,\n keepalive_expiry=limits.keepalive_expiry,\n http1=http1,\n http2=http2,\n uds=uds,\n local_address=local_address,\n retries=retries,\n socket_options=socket_options,\n )\n elif proxy.url.scheme in ("http", "https"):\n self._pool = httpcore.HTTPProxy(\n proxy_url=httpcore.URL(\n scheme=proxy.url.raw_scheme,\n host=proxy.url.raw_host,\n port=proxy.url.port,\n target=proxy.url.raw_path,\n ),\n proxy_auth=proxy.raw_auth,\n proxy_headers=proxy.headers.raw,\n ssl_context=ssl_context,\n proxy_ssl_context=proxy.ssl_context,\n max_connections=limits.max_connections,\n max_keepalive_connections=limits.max_keepalive_connections,\n keepalive_expiry=limits.keepalive_expiry,\n http1=http1,\n http2=http2,\n socket_options=socket_options,\n )\n elif proxy.url.scheme in ("socks5", "socks5h"):\n try:\n import socksio # noqa\n except ImportError: # pragma: no cover\n raise ImportError(\n "Using SOCKS proxy, but the 'socksio' package is not installed. "\n "Make sure to install httpx using `pip install httpx[socks]`."\n ) from None\n\n self._pool = httpcore.SOCKSProxy(\n proxy_url=httpcore.URL(\n scheme=proxy.url.raw_scheme,\n host=proxy.url.raw_host,\n port=proxy.url.port,\n target=proxy.url.raw_path,\n ),\n proxy_auth=proxy.raw_auth,\n ssl_context=ssl_context,\n max_connections=limits.max_connections,\n max_keepalive_connections=limits.max_keepalive_connections,\n keepalive_expiry=limits.keepalive_expiry,\n http1=http1,\n http2=http2,\n )\n else: # pragma: no cover\n raise ValueError(\n "Proxy protocol must be either 'http', 'https', 'socks5', or 'socks5h',"\n f" but got {proxy.url.scheme!r}."\n )\n\n def __enter__(self: T) -> T: # Use generics for subclass support.\n self._pool.__enter__()\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: TracebackType | None = None,\n ) -> None:\n with map_httpcore_exceptions():\n self._pool.__exit__(exc_type, exc_value, traceback)\n\n def handle_request(\n self,\n request: Request,\n ) -> Response:\n assert isinstance(request.stream, SyncByteStream)\n import httpcore\n\n req = httpcore.Request(\n method=request.method,\n url=httpcore.URL(\n scheme=request.url.raw_scheme,\n host=request.url.raw_host,\n port=request.url.port,\n target=request.url.raw_path,\n ),\n headers=request.headers.raw,\n content=request.stream,\n extensions=request.extensions,\n )\n with map_httpcore_exceptions():\n resp = self._pool.handle_request(req)\n\n assert isinstance(resp.stream, typing.Iterable)\n\n return Response(\n status_code=resp.status,\n headers=resp.headers,\n stream=ResponseStream(resp.stream),\n extensions=resp.extensions,\n )\n\n def close(self) -> None:\n self._pool.close()\n\n\nclass AsyncResponseStream(AsyncByteStream):\n def __init__(self, httpcore_stream: typing.AsyncIterable[bytes]) -> None:\n self._httpcore_stream = httpcore_stream\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n with map_httpcore_exceptions():\n async for part in self._httpcore_stream:\n yield part\n\n async def aclose(self) -> None:\n if hasattr(self._httpcore_stream, "aclose"):\n await self._httpcore_stream.aclose()\n\n\nclass AsyncHTTPTransport(AsyncBaseTransport):\n def __init__(\n self,\n verify: ssl.SSLContext | str | bool = True,\n cert: CertTypes | None = None,\n trust_env: bool = True,\n http1: bool = True,\n http2: bool = False,\n limits: Limits = DEFAULT_LIMITS,\n proxy: ProxyTypes | None = None,\n uds: str | None = None,\n local_address: str | None = None,\n retries: int = 0,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> None:\n import httpcore\n\n proxy = Proxy(url=proxy) if isinstance(proxy, (str, URL)) else proxy\n ssl_context = create_ssl_context(verify=verify, cert=cert, trust_env=trust_env)\n\n if proxy is None:\n self._pool = httpcore.AsyncConnectionPool(\n ssl_context=ssl_context,\n max_connections=limits.max_connections,\n max_keepalive_connections=limits.max_keepalive_connections,\n keepalive_expiry=limits.keepalive_expiry,\n http1=http1,\n http2=http2,\n uds=uds,\n local_address=local_address,\n retries=retries,\n socket_options=socket_options,\n )\n elif proxy.url.scheme in ("http", "https"):\n self._pool = httpcore.AsyncHTTPProxy(\n proxy_url=httpcore.URL(\n scheme=proxy.url.raw_scheme,\n host=proxy.url.raw_host,\n port=proxy.url.port,\n target=proxy.url.raw_path,\n ),\n proxy_auth=proxy.raw_auth,\n proxy_headers=proxy.headers.raw,\n proxy_ssl_context=proxy.ssl_context,\n ssl_context=ssl_context,\n max_connections=limits.max_connections,\n max_keepalive_connections=limits.max_keepalive_connections,\n keepalive_expiry=limits.keepalive_expiry,\n http1=http1,\n http2=http2,\n socket_options=socket_options,\n )\n elif proxy.url.scheme in ("socks5", "socks5h"):\n try:\n import socksio # noqa\n except ImportError: # pragma: no cover\n raise ImportError(\n "Using SOCKS proxy, but the 'socksio' package is not installed. "\n "Make sure to install httpx using `pip install httpx[socks]`."\n ) from None\n\n self._pool = httpcore.AsyncSOCKSProxy(\n proxy_url=httpcore.URL(\n scheme=proxy.url.raw_scheme,\n host=proxy.url.raw_host,\n port=proxy.url.port,\n target=proxy.url.raw_path,\n ),\n proxy_auth=proxy.raw_auth,\n ssl_context=ssl_context,\n max_connections=limits.max_connections,\n max_keepalive_connections=limits.max_keepalive_connections,\n keepalive_expiry=limits.keepalive_expiry,\n http1=http1,\n http2=http2,\n )\n else: # pragma: no cover\n raise ValueError(\n "Proxy protocol must be either 'http', 'https', 'socks5', or 'socks5h',"\n " but got {proxy.url.scheme!r}."\n )\n\n async def __aenter__(self: A) -> A: # Use generics for subclass support.\n await self._pool.__aenter__()\n return self\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: TracebackType | None = None,\n ) -> None:\n with map_httpcore_exceptions():\n await self._pool.__aexit__(exc_type, exc_value, traceback)\n\n async def handle_async_request(\n self,\n request: Request,\n ) -> Response:\n assert isinstance(request.stream, AsyncByteStream)\n import httpcore\n\n req = httpcore.Request(\n method=request.method,\n url=httpcore.URL(\n scheme=request.url.raw_scheme,\n host=request.url.raw_host,\n port=request.url.port,\n target=request.url.raw_path,\n ),\n headers=request.headers.raw,\n content=request.stream,\n extensions=request.extensions,\n )\n with map_httpcore_exceptions():\n resp = await self._pool.handle_async_request(req)\n\n assert isinstance(resp.stream, typing.AsyncIterable)\n\n return Response(\n status_code=resp.status,\n headers=resp.headers,\n stream=AsyncResponseStream(resp.stream),\n extensions=resp.extensions,\n )\n\n async def aclose(self) -> None:\n await self._pool.aclose()\n
|
.venv\Lib\site-packages\httpx\_transports\default.py
|
default.py
|
Python
| 13,983 | 0.95 | 0.103448 | 0.025641 |
node-utils
| 705 |
2024-08-22T12:07:28.219218
|
BSD-3-Clause
| false |
64e4f3d1df53f2525699172a8b0275b8
|
from __future__ import annotations\n\nimport typing\n\nfrom .._models import Request, Response\nfrom .base import AsyncBaseTransport, BaseTransport\n\nSyncHandler = typing.Callable[[Request], Response]\nAsyncHandler = typing.Callable[[Request], typing.Coroutine[None, None, Response]]\n\n\n__all__ = ["MockTransport"]\n\n\nclass MockTransport(AsyncBaseTransport, BaseTransport):\n def __init__(self, handler: SyncHandler | AsyncHandler) -> None:\n self.handler = handler\n\n def handle_request(\n self,\n request: Request,\n ) -> Response:\n request.read()\n response = self.handler(request)\n if not isinstance(response, Response): # pragma: no cover\n raise TypeError("Cannot use an async handler in a sync Client")\n return response\n\n async def handle_async_request(\n self,\n request: Request,\n ) -> Response:\n await request.aread()\n response = self.handler(request)\n\n # Allow handler to *optionally* be an `async` function.\n # If it is, then the `response` variable need to be awaited to actually\n # return the result.\n\n if not isinstance(response, Response):\n response = await response\n\n return response\n
|
.venv\Lib\site-packages\httpx\_transports\mock.py
|
mock.py
|
Python
| 1,232 | 0.95 | 0.162791 | 0.096774 |
node-utils
| 26 |
2025-01-22T04:20:32.967745
|
MIT
| false |
03f5e346cdab53e7345c204cfc2c7340
|
from __future__ import annotations\n\nimport io\nimport itertools\nimport sys\nimport typing\n\nfrom .._models import Request, Response\nfrom .._types import SyncByteStream\nfrom .base import BaseTransport\n\nif typing.TYPE_CHECKING:\n from _typeshed import OptExcInfo # pragma: no cover\n from _typeshed.wsgi import WSGIApplication # pragma: no cover\n\n_T = typing.TypeVar("_T")\n\n\n__all__ = ["WSGITransport"]\n\n\ndef _skip_leading_empty_chunks(body: typing.Iterable[_T]) -> typing.Iterable[_T]:\n body = iter(body)\n for chunk in body:\n if chunk:\n return itertools.chain([chunk], body)\n return []\n\n\nclass WSGIByteStream(SyncByteStream):\n def __init__(self, result: typing.Iterable[bytes]) -> None:\n self._close = getattr(result, "close", None)\n self._result = _skip_leading_empty_chunks(result)\n\n def __iter__(self) -> typing.Iterator[bytes]:\n for part in self._result:\n yield part\n\n def close(self) -> None:\n if self._close is not None:\n self._close()\n\n\nclass WSGITransport(BaseTransport):\n """\n A custom transport that handles sending requests directly to an WSGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.Client(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the WSGITransport class:\n\n ```\n transport = httpx.WSGITransport(\n app=app,\n script_name="/submount",\n remote_addr="1.2.3.4"\n )\n client = httpx.Client(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The WSGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `script_name` - The root path on which the WSGI application should be mounted.\n * `remote_addr` - A string indicating the client IP of incoming requests.\n ```\n """\n\n def __init__(\n self,\n app: WSGIApplication,\n raise_app_exceptions: bool = True,\n script_name: str = "",\n remote_addr: str = "127.0.0.1",\n wsgi_errors: typing.TextIO | None = None,\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.script_name = script_name\n self.remote_addr = remote_addr\n self.wsgi_errors = wsgi_errors\n\n def handle_request(self, request: Request) -> Response:\n request.read()\n wsgi_input = io.BytesIO(request.content)\n\n port = request.url.port or {"http": 80, "https": 443}[request.url.scheme]\n environ = {\n "wsgi.version": (1, 0),\n "wsgi.url_scheme": request.url.scheme,\n "wsgi.input": wsgi_input,\n "wsgi.errors": self.wsgi_errors or sys.stderr,\n "wsgi.multithread": True,\n "wsgi.multiprocess": False,\n "wsgi.run_once": False,\n "REQUEST_METHOD": request.method,\n "SCRIPT_NAME": self.script_name,\n "PATH_INFO": request.url.path,\n "QUERY_STRING": request.url.query.decode("ascii"),\n "SERVER_NAME": request.url.host,\n "SERVER_PORT": str(port),\n "SERVER_PROTOCOL": "HTTP/1.1",\n "REMOTE_ADDR": self.remote_addr,\n }\n for header_key, header_value in request.headers.raw:\n key = header_key.decode("ascii").upper().replace("-", "_")\n if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"):\n key = "HTTP_" + key\n environ[key] = header_value.decode("ascii")\n\n seen_status = None\n seen_response_headers = None\n seen_exc_info = None\n\n def start_response(\n status: str,\n response_headers: list[tuple[str, str]],\n exc_info: OptExcInfo | None = None,\n ) -> typing.Callable[[bytes], typing.Any]:\n nonlocal seen_status, seen_response_headers, seen_exc_info\n seen_status = status\n seen_response_headers = response_headers\n seen_exc_info = exc_info\n return lambda _: None\n\n result = self.app(environ, start_response)\n\n stream = WSGIByteStream(result)\n\n assert seen_status is not None\n assert seen_response_headers is not None\n if seen_exc_info and seen_exc_info[0] and self.raise_app_exceptions:\n raise seen_exc_info[1]\n\n status_code = int(seen_status.split()[0])\n headers = [\n (key.encode("ascii"), value.encode("ascii"))\n for key, value in seen_response_headers\n ]\n\n return Response(status_code, headers=headers, stream=stream)\n
|
.venv\Lib\site-packages\httpx\_transports\wsgi.py
|
wsgi.py
|
Python
| 4,825 | 0.95 | 0.14094 | 0.033333 |
awesome-app
| 428 |
2025-04-07T06:03:06.910358
|
GPL-3.0
| false |
2e9cf654f93ca73d52a96fbbf599b7c2
|
from .asgi import *\nfrom .base import *\nfrom .default import *\nfrom .mock import *\nfrom .wsgi import *\n\n__all__ = [\n "ASGITransport",\n "AsyncBaseTransport",\n "BaseTransport",\n "AsyncHTTPTransport",\n "HTTPTransport",\n "MockTransport",\n "WSGITransport",\n]\n
|
.venv\Lib\site-packages\httpx\_transports\__init__.py
|
__init__.py
|
Python
| 275 | 0.85 | 0 | 0 |
vue-tools
| 162 |
2024-08-04T17:31:17.747359
|
BSD-3-Clause
| false |
ba6c6034aed2609beef5e03cb27ad148
|
\n\n
|
.venv\Lib\site-packages\httpx\_transports\__pycache__\asgi.cpython-313.pyc
|
asgi.cpython-313.pyc
|
Other
| 7,738 | 0.8 | 0.023256 | 0.063291 |
react-lib
| 854 |
2025-05-24T13:37:56.992839
|
BSD-3-Clause
| false |
1df36c031f09885760deaffba35742e2
|
\n\n
|
.venv\Lib\site-packages\httpx\_transports\__pycache__\base.cpython-313.pyc
|
base.cpython-313.pyc
|
Other
| 3,778 | 0.95 | 0.026667 | 0 |
python-kit
| 924 |
2025-06-30T00:25:28.502463
|
MIT
| false |
c9a2f8797995f0079ecef899679b5584
|
\n\n
|
.venv\Lib\site-packages\httpx\_transports\__pycache__\default.cpython-313.pyc
|
default.cpython-313.pyc
|
Other
| 17,303 | 0.8 | 0 | 0.031746 |
vue-tools
| 891 |
2023-07-18T06:54:52.962541
|
MIT
| false |
26bc79a645e2dacc06a1f6cbd49d1553
|
\n\n
|
.venv\Lib\site-packages\httpx\_transports\__pycache__\mock.cpython-313.pyc
|
mock.cpython-313.pyc
|
Other
| 2,020 | 0.8 | 0 | 0 |
vue-tools
| 613 |
2024-12-04T19:30:01.792645
|
Apache-2.0
| false |
b5ee28f43ac6937761ad0303eb8bf818
|
\n\n
|
.venv\Lib\site-packages\httpx\_transports\__pycache__\wsgi.cpython-313.pyc
|
wsgi.cpython-313.pyc
|
Other
| 6,895 | 0.95 | 0.038462 | 0.056338 |
node-utils
| 485 |
2024-04-28T18:29:40.374693
|
MIT
| false |
841ad84e6e03860bb314fdd37a1fce79
|
\n\n
|
.venv\Lib\site-packages\httpx\_transports\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 438 | 0.7 | 0 | 0 |
python-kit
| 424 |
2025-05-17T04:13:11.834965
|
MIT
| false |
33e9668335f5ee27aced93bc80871c56
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_api.cpython-313.pyc
|
_api.cpython-313.pyc
|
Other
| 10,040 | 0.95 | 0.033898 | 0.15814 |
react-lib
| 936 |
2024-10-28T10:07:32.288116
|
BSD-3-Clause
| false |
eafac5fe5c220e3d47117e764368a6e1
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_auth.cpython-313.pyc
|
_auth.cpython-313.pyc
|
Other
| 15,619 | 0.95 | 0.017647 | 0 |
node-utils
| 2 |
2024-01-22T07:29:01.565265
|
GPL-3.0
| false |
2ee4e430a72faef9017040562755a7d0
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_client.cpython-313.pyc
|
_client.cpython-313.pyc
|
Other
| 63,483 | 0.75 | 0.029014 | 0.054313 |
awesome-app
| 242 |
2024-12-12T15:54:57.480732
|
MIT
| false |
456ad9643755a966aafe1421f5f5f81e
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_config.cpython-313.pyc
|
_config.cpython-313.pyc
|
Other
| 11,395 | 0.8 | 0.014493 | 0.046512 |
node-utils
| 285 |
2023-12-03T17:58:24.456789
|
MIT
| false |
cb96cb3c50025c87e6fe6b30a66a53c7
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_content.cpython-313.pyc
|
_content.cpython-313.pyc
|
Other
| 10,749 | 0.8 | 0.022222 | 0.011364 |
react-lib
| 971 |
2024-02-12T16:39:23.389597
|
BSD-3-Clause
| false |
dc8ca590c337687fc6ef3c4dbb3c97b9
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_decoders.cpython-313.pyc
|
_decoders.cpython-313.pyc
|
Other
| 17,259 | 0.95 | 0.022901 | 0 |
awesome-app
| 607 |
2025-04-13T00:03:38.718176
|
GPL-3.0
| false |
47e1b58aff5fe5a49e65479c4ddb96ff
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_exceptions.cpython-313.pyc
|
_exceptions.cpython-313.pyc
|
Other
| 12,358 | 0.95 | 0.116402 | 0.028409 |
awesome-app
| 698 |
2024-08-16T04:45:12.391987
|
GPL-3.0
| false |
8e0174321f3a2b70a062ec397544fb23
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_main.cpython-313.pyc
|
_main.cpython-313.pyc
|
Other
| 20,632 | 0.8 | 0.019608 | 0.004484 |
node-utils
| 804 |
2025-06-07T00:49:51.762217
|
BSD-3-Clause
| false |
2e768621551c0a379f45e1e0ef9ec3cd
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_models.cpython-313.pyc
|
_models.cpython-313.pyc
|
Other
| 58,580 | 0.6 | 0.037398 | 0.005111 |
node-utils
| 864 |
2024-03-07T21:15:02.833826
|
BSD-3-Clause
| false |
29aaf82af163ba2bf7e247885e0549fe
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_multipart.cpython-313.pyc
|
_multipart.cpython-313.pyc
|
Other
| 13,971 | 0.95 | 0.035088 | 0 |
awesome-app
| 161 |
2024-08-05T00:29:05.367400
|
BSD-3-Clause
| false |
811c07cc3d9d017a025a2c2f69a67476
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_status_codes.cpython-313.pyc
|
_status_codes.cpython-313.pyc
|
Other
| 7,163 | 0.8 | 0.123077 | 0.209677 |
python-kit
| 702 |
2024-05-24T23:07:18.455790
|
BSD-3-Clause
| false |
6c3ffe7629e728bc3e84407f729fe7a4
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_types.cpython-313.pyc
|
_types.cpython-313.pyc
|
Other
| 3,898 | 0.8 | 0.018868 | 0.039216 |
vue-tools
| 393 |
2025-04-08T06:52:09.558989
|
BSD-3-Clause
| false |
a3a9ca48b1fa251859af39ec9a9581ee
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_urlparse.cpython-313.pyc
|
_urlparse.cpython-313.pyc
|
Other
| 18,101 | 0.95 | 0.004831 | 0.042553 |
react-lib
| 92 |
2024-01-05T09:22:57.744691
|
MIT
| false |
04dba02497ef61096db069dd64f85977
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_urls.cpython-313.pyc
|
_urls.cpython-313.pyc
|
Other
| 26,943 | 0.95 | 0.039474 | 0.022013 |
vue-tools
| 445 |
2024-03-25T16:47:46.976246
|
Apache-2.0
| false |
2caadbf980f64f2c945bad1be768e7fe
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\_utils.cpython-313.pyc
|
_utils.cpython-313.pyc
|
Other
| 9,540 | 0.95 | 0.022388 | 0.04 |
python-kit
| 618 |
2023-11-24T02:17:04.084794
|
Apache-2.0
| false |
39bb3596c1cd66c116bc6af8bf803e07
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 2,137 | 0.85 | 0 | 0 |
python-kit
| 511 |
2024-09-18T03:58:21.124456
|
Apache-2.0
| false |
6026bf841557b6aecf0f3bd66bf8359c
|
\n\n
|
.venv\Lib\site-packages\httpx\__pycache__\__version__.cpython-313.pyc
|
__version__.cpython-313.pyc
|
Other
| 309 | 0.7 | 0.5 | 0 |
node-utils
| 461 |
2024-07-17T06:27:59.535752
|
GPL-3.0
| false |
d96060362876db67ea2a340269e31778
|
[console_scripts]\nhttpx = httpx:main\n
|
.venv\Lib\site-packages\httpx-0.28.1.dist-info\entry_points.txt
|
entry_points.txt
|
Other
| 37 | 0.5 | 0 | 0 |
vue-tools
| 789 |
2023-07-22T11:15:10.489929
|
BSD-3-Clause
| false |
204e3e08e0463bda04a93cd1e1d9d232
|
pip\n
|
.venv\Lib\site-packages\httpx-0.28.1.dist-info\INSTALLER
|
INSTALLER
|
Other
| 4 | 0.5 | 0 | 0 |
vue-tools
| 351 |
2025-01-30T13:20:12.047476
|
Apache-2.0
| false |
365c9bfeb7d89244f2ce01c1de44cb85
|
Metadata-Version: 2.3\nName: httpx\nVersion: 0.28.1\nSummary: The next generation HTTP client.\nProject-URL: Changelog, https://github.com/encode/httpx/blob/master/CHANGELOG.md\nProject-URL: Documentation, https://www.python-httpx.org\nProject-URL: Homepage, https://github.com/encode/httpx\nProject-URL: Source, https://github.com/encode/httpx\nAuthor-email: Tom Christie <tom@tomchristie.com>\nLicense: BSD-3-Clause\nClassifier: Development Status :: 4 - Beta\nClassifier: Environment :: Web Environment\nClassifier: Framework :: AsyncIO\nClassifier: Framework :: Trio\nClassifier: Intended Audience :: Developers\nClassifier: License :: OSI Approved :: BSD License\nClassifier: Operating System :: OS Independent\nClassifier: Programming Language :: Python :: 3\nClassifier: Programming Language :: Python :: 3 :: Only\nClassifier: Programming Language :: Python :: 3.8\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: 3.11\nClassifier: Programming Language :: Python :: 3.12\nClassifier: Topic :: Internet :: WWW/HTTP\nRequires-Python: >=3.8\nRequires-Dist: anyio\nRequires-Dist: certifi\nRequires-Dist: httpcore==1.*\nRequires-Dist: idna\nProvides-Extra: brotli\nRequires-Dist: brotli; (platform_python_implementation == 'CPython') and extra == 'brotli'\nRequires-Dist: brotlicffi; (platform_python_implementation != 'CPython') and extra == 'brotli'\nProvides-Extra: cli\nRequires-Dist: click==8.*; extra == 'cli'\nRequires-Dist: pygments==2.*; extra == 'cli'\nRequires-Dist: rich<14,>=10; extra == 'cli'\nProvides-Extra: http2\nRequires-Dist: h2<5,>=3; extra == 'http2'\nProvides-Extra: socks\nRequires-Dist: socksio==1.*; extra == 'socks'\nProvides-Extra: zstd\nRequires-Dist: zstandard>=0.18.0; extra == 'zstd'\nDescription-Content-Type: text/markdown\n\n<p align="center">\n <a href="https://www.python-httpx.org/"><img width="350" height="208" src="https://raw.githubusercontent.com/encode/httpx/master/docs/img/butterfly.png" alt='HTTPX'></a>\n</p>\n\n<p align="center"><strong>HTTPX</strong> <em>- A next-generation HTTP client for Python.</em></p>\n\n<p align="center">\n<a href="https://github.com/encode/httpx/actions">\n <img src="https://github.com/encode/httpx/workflows/Test%20Suite/badge.svg" alt="Test Suite">\n</a>\n<a href="https://pypi.org/project/httpx/">\n <img src="https://badge.fury.io/py/httpx.svg" alt="Package version">\n</a>\n</p>\n\nHTTPX is a fully featured HTTP client library for Python 3. It includes **an integrated command line client**, has support for both **HTTP/1.1 and HTTP/2**, and provides both **sync and async APIs**.\n\n---\n\nInstall HTTPX using pip:\n\n```shell\n$ pip install httpx\n```\n\nNow, let's get started:\n\n```pycon\n>>> import httpx\n>>> r = httpx.get('https://www.example.org/')\n>>> r\n<Response [200 OK]>\n>>> r.status_code\n200\n>>> r.headers['content-type']\n'text/html; charset=UTF-8'\n>>> r.text\n'<!doctype html>\n<html>\n<head>\n<title>Example Domain</title>...'\n```\n\nOr, using the command-line client.\n\n```shell\n$ pip install 'httpx[cli]' # The command line client is an optional dependency.\n```\n\nWhich now allows us to use HTTPX directly from the command-line...\n\n<p align="center">\n <img width="700" src="https://raw.githubusercontent.com/encode/httpx/master/docs/img/httpx-help.png" alt='httpx --help'>\n</p>\n\nSending a request...\n\n<p align="center">\n <img width="700" src="https://raw.githubusercontent.com/encode/httpx/master/docs/img/httpx-request.png" alt='httpx http://httpbin.org/json'>\n</p>\n\n## Features\n\nHTTPX builds on the well-established usability of `requests`, and gives you:\n\n* A broadly [requests-compatible API](https://www.python-httpx.org/compatibility/).\n* An integrated command-line client.\n* HTTP/1.1 [and HTTP/2 support](https://www.python-httpx.org/http2/).\n* Standard synchronous interface, but with [async support if you need it](https://www.python-httpx.org/async/).\n* Ability to make requests directly to [WSGI applications](https://www.python-httpx.org/advanced/transports/#wsgi-transport) or [ASGI applications](https://www.python-httpx.org/advanced/transports/#asgi-transport).\n* Strict timeouts everywhere.\n* Fully type annotated.\n* 100% test coverage.\n\nPlus all the standard features of `requests`...\n\n* International Domains and URLs\n* Keep-Alive & Connection Pooling\n* Sessions with Cookie Persistence\n* Browser-style SSL Verification\n* Basic/Digest Authentication\n* Elegant Key/Value Cookies\n* Automatic Decompression\n* Automatic Content Decoding\n* Unicode Response Bodies\n* Multipart File Uploads\n* HTTP(S) Proxy Support\n* Connection Timeouts\n* Streaming Downloads\n* .netrc Support\n* Chunked Requests\n\n## Installation\n\nInstall with pip:\n\n```shell\n$ pip install httpx\n```\n\nOr, to include the optional HTTP/2 support, use:\n\n```shell\n$ pip install httpx[http2]\n```\n\nHTTPX requires Python 3.8+.\n\n## Documentation\n\nProject documentation is available at [https://www.python-httpx.org/](https://www.python-httpx.org/).\n\nFor a run-through of all the basics, head over to the [QuickStart](https://www.python-httpx.org/quickstart/).\n\nFor more advanced topics, see the [Advanced Usage](https://www.python-httpx.org/advanced/) section, the [async support](https://www.python-httpx.org/async/) section, or the [HTTP/2](https://www.python-httpx.org/http2/) section.\n\nThe [Developer Interface](https://www.python-httpx.org/api/) provides a comprehensive API reference.\n\nTo find out about tools that integrate with HTTPX, see [Third Party Packages](https://www.python-httpx.org/third_party_packages/).\n\n## Contribute\n\nIf you want to contribute with HTTPX check out the [Contributing Guide](https://www.python-httpx.org/contributing/) to learn how to start.\n\n## Dependencies\n\nThe HTTPX project relies on these excellent libraries:\n\n* `httpcore` - The underlying transport implementation for `httpx`.\n * `h11` - HTTP/1.1 support.\n* `certifi` - SSL certificates.\n* `idna` - Internationalized domain name support.\n* `sniffio` - Async library autodetection.\n\nAs well as these optional installs:\n\n* `h2` - HTTP/2 support. *(Optional, with `httpx[http2]`)*\n* `socksio` - SOCKS proxy support. *(Optional, with `httpx[socks]`)*\n* `rich` - Rich terminal support. *(Optional, with `httpx[cli]`)*\n* `click` - Command line client support. *(Optional, with `httpx[cli]`)*\n* `brotli` or `brotlicffi` - Decoding for "brotli" compressed responses. *(Optional, with `httpx[brotli]`)*\n* `zstandard` - Decoding for "zstd" compressed responses. *(Optional, with `httpx[zstd]`)*\n\nA huge amount of credit is due to `requests` for the API layout that\nmuch of this work follows, as well as to `urllib3` for plenty of design\ninspiration around the lower-level networking details.\n\n---\n\n<p align="center"><i>HTTPX is <a href="https://github.com/encode/httpx/blob/master/LICENSE.md">BSD licensed</a> code.<br/>Designed & crafted with care.</i><br/>— 🦋 —</p>\n\n## Release Information\n\n### Fixed\n\n* Reintroduced supposedly-private `URLTypes` shortcut. (#2673)\n\n\n---\n\n[Full changelog](https://github.com/encode/httpx/blob/master/CHANGELOG.md)\n
|
.venv\Lib\site-packages\httpx-0.28.1.dist-info\METADATA
|
METADATA
|
Other
| 7,052 | 0.95 | 0.044335 | 0.270968 |
vue-tools
| 509 |
2024-05-22T17:49:44.322196
|
GPL-3.0
| false |
766d6fe8917d3898739fb7aea680d080
|
../../Scripts/httpx.exe,sha256=xrJI7E2-xd4rEI5tWLBiHdwrz8DvZmulyClKQAYk0Hg,108406\nhttpx-0.28.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\nhttpx-0.28.1.dist-info/METADATA,sha256=_rubD48-gNV8gZnDBPNcQzboWB0dGNeYPJJ2a4J5OyU,7052\nhttpx-0.28.1.dist-info/RECORD,,\nhttpx-0.28.1.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87\nhttpx-0.28.1.dist-info/entry_points.txt,sha256=2lVkdQmxLA1pNMgSN2eV89o90HCZezhmNwsy6ryKDSA,37\nhttpx-0.28.1.dist-info/licenses/LICENSE.md,sha256=TsWdVE8StfU5o6cW_TIaxYzNgDC0ZSIfLIgCAM3yjY0,1508\nhttpx/__init__.py,sha256=CsaZe6yZj0rHg6322AWKWHGTMVr9txgEfD5P3_Rrz60,2171\nhttpx/__pycache__/__init__.cpython-313.pyc,,\nhttpx/__pycache__/__version__.cpython-313.pyc,,\nhttpx/__pycache__/_api.cpython-313.pyc,,\nhttpx/__pycache__/_auth.cpython-313.pyc,,\nhttpx/__pycache__/_client.cpython-313.pyc,,\nhttpx/__pycache__/_config.cpython-313.pyc,,\nhttpx/__pycache__/_content.cpython-313.pyc,,\nhttpx/__pycache__/_decoders.cpython-313.pyc,,\nhttpx/__pycache__/_exceptions.cpython-313.pyc,,\nhttpx/__pycache__/_main.cpython-313.pyc,,\nhttpx/__pycache__/_models.cpython-313.pyc,,\nhttpx/__pycache__/_multipart.cpython-313.pyc,,\nhttpx/__pycache__/_status_codes.cpython-313.pyc,,\nhttpx/__pycache__/_types.cpython-313.pyc,,\nhttpx/__pycache__/_urlparse.cpython-313.pyc,,\nhttpx/__pycache__/_urls.cpython-313.pyc,,\nhttpx/__pycache__/_utils.cpython-313.pyc,,\nhttpx/__version__.py,sha256=LoUyYeOXTieGzuP_64UL0wxdtxjuu_QbOvE7NOg-IqU,108\nhttpx/_api.py,sha256=r_Zgs4jIpcPJLqK5dbbSayqo_iVMKFaxZCd-oOHxLEs,11743\nhttpx/_auth.py,sha256=Yr3QwaUSK17rGYx-7j-FdicFIzz4Y9FFV-1F4-7RXX4,11891\nhttpx/_client.py,sha256=xD-UG67-WMkeltAAOeGGj-cZ2RRTAm19sWRxlFY7_40,65714\nhttpx/_config.py,sha256=pPp2U-wicfcKsF-KYRE1LYdt3e6ERGeIoXZ8Gjo3LWc,8547\nhttpx/_content.py,sha256=LGGzrJTR3OvN4Mb1GVVNLXkXJH-6oKlwAttO9p5w_yg,8161\nhttpx/_decoders.py,sha256=p0dX8I0NEHexs3UGp4SsZutiMhsXrrWl6-GnqVb0iKM,12041\nhttpx/_exceptions.py,sha256=bxW7fxzgVMAdNTbwT0Vnq04gJDW1_gI_GFiQPuMyjL0,8527\nhttpx/_main.py,sha256=Cg9GMabiTT_swaDfUgIRitSwxLRMSwUDOm7LdSGqlA4,15626\nhttpx/_models.py,sha256=4__Guyv1gLxuZChwim8kfQNiIOcJ9acreFOSurvZfms,44700\nhttpx/_multipart.py,sha256=KOHEZZl6oohg9mPaKyyu345qq1rJLg35TUG3YAzXB3Y,9843\nhttpx/_status_codes.py,sha256=DYn-2ufBgMeXy5s8x3_TB7wjAuAAMewTakPrm5rXEsc,5639\nhttpx/_transports/__init__.py,sha256=GbUoBSAOp7z-l-9j5YhMhR3DMIcn6FVLhj072O3Nnno,275\nhttpx/_transports/__pycache__/__init__.cpython-313.pyc,,\nhttpx/_transports/__pycache__/asgi.cpython-313.pyc,,\nhttpx/_transports/__pycache__/base.cpython-313.pyc,,\nhttpx/_transports/__pycache__/default.cpython-313.pyc,,\nhttpx/_transports/__pycache__/mock.cpython-313.pyc,,\nhttpx/_transports/__pycache__/wsgi.cpython-313.pyc,,\nhttpx/_transports/asgi.py,sha256=HRfiDYMPt4wQH2gFgHZg4c-i3sblo6bL5GTqcET-xz8,5501\nhttpx/_transports/base.py,sha256=kZS_VMbViYfF570pogUCJ1bulz-ybfL51Pqs9yktebU,2523\nhttpx/_transports/default.py,sha256=AzeaRUyVwCccTyyNJexDf0n1dFfzzydpdIQgvw7PLnk,13983\nhttpx/_transports/mock.py,sha256=PTo0d567RITXxGrki6kN7_67wwAxfwiMDcuXJiZCjEo,1232\nhttpx/_transports/wsgi.py,sha256=NcPX3Xap_EwCFZWO_OaSyQNuInCYx1QMNbO8GAei6jY,4825\nhttpx/_types.py,sha256=Jyh41GQq7AOev8IOWKDAg7zCbvHAfufmW5g_PiTtErY,2965\nhttpx/_urlparse.py,sha256=ZAmH47ONfkxrrj-PPYhGeiHjb6AjKCS-ANWIN4OL_KY,18546\nhttpx/_urls.py,sha256=dX99VR1DSOHpgo9Aq7PzYO4FKdxqKjwyNp8grf8dHN0,21550\nhttpx/_utils.py,sha256=_TVeqAKvxJkKHdz7dFeb4s0LZqQXgeFkXSgfiHBK_1o,8285\nhttpx/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\n
|
.venv\Lib\site-packages\httpx-0.28.1.dist-info\RECORD
|
RECORD
|
Other
| 3,530 | 0.7 | 0 | 0 |
awesome-app
| 987 |
2024-04-26T17:57:34.715108
|
BSD-3-Clause
| false |
4a2fec4d555c3dcb7fa0ab334dadfae8
|
Wheel-Version: 1.0\nGenerator: hatchling 1.26.3\nRoot-Is-Purelib: true\nTag: py3-none-any\n
|
.venv\Lib\site-packages\httpx-0.28.1.dist-info\WHEEL
|
WHEEL
|
Other
| 87 | 0.5 | 0 | 0 |
awesome-app
| 747 |
2023-08-04T02:54:01.672098
|
GPL-3.0
| false |
9f4a18bd1c234fe6e89f81926b18e5d4
|
Copyright © 2019, [Encode OSS Ltd](https://www.encode.io/).\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n
|
.venv\Lib\site-packages\httpx-0.28.1.dist-info\licenses\LICENSE.md
|
LICENSE.md
|
Markdown
| 1,508 | 0.8 | 0 | 0.428571 |
awesome-app
| 578 |
2025-06-23T16:15:00.041169
|
Apache-2.0
| false |
c624803bdf6fc1c4ce39f5ae11d7bd05
|
import inspect\nfrom dataclasses import _MISSING_TYPE, MISSING, Field, field, fields\nfrom functools import wraps\nfrom typing import (\n Any,\n Callable,\n Dict,\n List,\n Literal,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n get_args,\n get_origin,\n overload,\n)\n\nfrom .errors import (\n StrictDataclassClassValidationError,\n StrictDataclassDefinitionError,\n StrictDataclassFieldValidationError,\n)\n\n\nValidator_T = Callable[[Any], None]\nT = TypeVar("T")\n\n\n# The overload decorator helps type checkers understand the different return types\n@overload\ndef strict(cls: Type[T]) -> Type[T]: ...\n\n\n@overload\ndef strict(*, accept_kwargs: bool = False) -> Callable[[Type[T]], Type[T]]: ...\n\n\ndef strict(\n cls: Optional[Type[T]] = None, *, accept_kwargs: bool = False\n) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:\n """\n Decorator to add strict validation to a dataclass.\n\n This decorator must be used on top of `@dataclass` to ensure IDEs and static typing tools\n recognize the class as a dataclass.\n\n Can be used with or without arguments:\n - `@strict`\n - `@strict(accept_kwargs=True)`\n\n Args:\n cls:\n The class to convert to a strict dataclass.\n accept_kwargs (`bool`, *optional*):\n If True, allows arbitrary keyword arguments in `__init__`. Defaults to False.\n\n Returns:\n The enhanced dataclass with strict validation on field assignment.\n\n Example:\n ```py\n >>> from dataclasses import dataclass\n >>> from huggingface_hub.dataclasses import as_validated_field, strict, validated_field\n\n >>> @as_validated_field\n >>> def positive_int(value: int):\n ... if not value >= 0:\n ... raise ValueError(f"Value must be positive, got {value}")\n\n >>> @strict(accept_kwargs=True)\n ... @dataclass\n ... class User:\n ... name: str\n ... age: int = positive_int(default=10)\n\n # Initialize\n >>> User(name="John")\n User(name='John', age=10)\n\n # Extra kwargs are accepted\n >>> User(name="John", age=30, lastname="Doe")\n User(name='John', age=30, *lastname='Doe')\n\n # Invalid type => raises\n >>> User(name="John", age="30")\n huggingface_hub.errors.StrictDataclassFieldValidationError: Validation error for field 'age':\n TypeError: Field 'age' expected int, got str (value: '30')\n\n # Invalid value => raises\n >>> User(name="John", age=-1)\n huggingface_hub.errors.StrictDataclassFieldValidationError: Validation error for field 'age':\n ValueError: Value must be positive, got -1\n ```\n """\n\n def wrap(cls: Type[T]) -> Type[T]:\n if not hasattr(cls, "__dataclass_fields__"):\n raise StrictDataclassDefinitionError(\n f"Class '{cls.__name__}' must be a dataclass before applying @strict."\n )\n\n # List and store validators\n field_validators: Dict[str, List[Validator_T]] = {}\n for f in fields(cls): # type: ignore [arg-type]\n validators = []\n validators.append(_create_type_validator(f))\n custom_validator = f.metadata.get("validator")\n if custom_validator is not None:\n if not isinstance(custom_validator, list):\n custom_validator = [custom_validator]\n for validator in custom_validator:\n if not _is_validator(validator):\n raise StrictDataclassDefinitionError(\n f"Invalid validator for field '{f.name}': {validator}. Must be a callable taking a single argument."\n )\n validators.extend(custom_validator)\n field_validators[f.name] = validators\n cls.__validators__ = field_validators # type: ignore\n\n # Override __setattr__ to validate fields on assignment\n original_setattr = cls.__setattr__\n\n def __strict_setattr__(self: Any, name: str, value: Any) -> None:\n """Custom __setattr__ method for strict dataclasses."""\n # Run all validators\n for validator in self.__validators__.get(name, []):\n try:\n validator(value)\n except (ValueError, TypeError) as e:\n raise StrictDataclassFieldValidationError(field=name, cause=e) from e\n\n # If validation passed, set the attribute\n original_setattr(self, name, value)\n\n cls.__setattr__ = __strict_setattr__ # type: ignore[method-assign]\n\n if accept_kwargs:\n # (optional) Override __init__ to accept arbitrary keyword arguments\n original_init = cls.__init__\n\n @wraps(original_init)\n def __init__(self, **kwargs: Any) -> None:\n # Extract only the fields that are part of the dataclass\n dataclass_fields = {f.name for f in fields(cls)} # type: ignore [arg-type]\n standard_kwargs = {k: v for k, v in kwargs.items() if k in dataclass_fields}\n\n # Call the original __init__ with standard fields\n original_init(self, **standard_kwargs)\n\n # Add any additional kwargs as attributes\n for name, value in kwargs.items():\n if name not in dataclass_fields:\n self.__setattr__(name, value)\n\n cls.__init__ = __init__ # type: ignore[method-assign]\n\n # (optional) Override __repr__ to include additional kwargs\n original_repr = cls.__repr__\n\n @wraps(original_repr)\n def __repr__(self) -> str:\n # Call the original __repr__ to get the standard fields\n standard_repr = original_repr(self)\n\n # Get additional kwargs\n additional_kwargs = [\n # add a '*' in front of additional kwargs to let the user know they are not part of the dataclass\n f"*{k}={v!r}"\n for k, v in self.__dict__.items()\n if k not in cls.__dataclass_fields__ # type: ignore [attr-defined]\n ]\n additional_repr = ", ".join(additional_kwargs)\n\n # Combine both representations\n return f"{standard_repr[:-1]}, {additional_repr})" if additional_kwargs else standard_repr\n\n cls.__repr__ = __repr__ # type: ignore [method-assign]\n\n # List all public methods starting with `validate_` => class validators.\n class_validators = []\n\n for name in dir(cls):\n if not name.startswith("validate_"):\n continue\n method = getattr(cls, name)\n if not callable(method):\n continue\n if len(inspect.signature(method).parameters) != 1:\n raise StrictDataclassDefinitionError(\n f"Class '{cls.__name__}' has a class validator '{name}' that takes more than one argument."\n " Class validators must take only 'self' as an argument. Methods starting with 'validate_'"\n " are considered to be class validators."\n )\n class_validators.append(method)\n\n cls.__class_validators__ = class_validators # type: ignore [attr-defined]\n\n # Add `validate` method to the class, but first check if it already exists\n def validate(self: T) -> None:\n """Run class validators on the instance."""\n for validator in cls.__class_validators__: # type: ignore [attr-defined]\n try:\n validator(self)\n except (ValueError, TypeError) as e:\n raise StrictDataclassClassValidationError(validator=validator.__name__, cause=e) from e\n\n # Hack to be able to raise if `.validate()` already exists except if it was created by this decorator on a parent class\n # (in which case we just override it)\n validate.__is_defined_by_strict_decorator__ = True # type: ignore [attr-defined]\n\n if hasattr(cls, "validate"):\n if not getattr(cls.validate, "__is_defined_by_strict_decorator__", False): # type: ignore [attr-defined]\n raise StrictDataclassDefinitionError(\n f"Class '{cls.__name__}' already implements a method called 'validate'."\n " This method name is reserved when using the @strict decorator on a dataclass."\n " If you want to keep your own method, please rename it."\n )\n\n cls.validate = validate # type: ignore\n\n # Run class validators after initialization\n initial_init = cls.__init__\n\n @wraps(initial_init)\n def init_with_validate(self, *args, **kwargs) -> None:\n """Run class validators after initialization."""\n initial_init(self, *args, **kwargs) # type: ignore [call-arg]\n cls.validate(self) # type: ignore [attr-defined]\n\n setattr(cls, "__init__", init_with_validate)\n\n return cls\n\n # Return wrapped class or the decorator itself\n return wrap(cls) if cls is not None else wrap\n\n\ndef validated_field(\n validator: Union[List[Validator_T], Validator_T],\n default: Union[Any, _MISSING_TYPE] = MISSING,\n default_factory: Union[Callable[[], Any], _MISSING_TYPE] = MISSING,\n init: bool = True,\n repr: bool = True,\n hash: Optional[bool] = None,\n compare: bool = True,\n metadata: Optional[Dict] = None,\n **kwargs: Any,\n) -> Any:\n """\n Create a dataclass field with a custom validator.\n\n Useful to apply several checks to a field. If only applying one rule, check out the [`as_validated_field`] decorator.\n\n Args:\n validator (`Callable` or `List[Callable]`):\n A method that takes a value as input and raises ValueError/TypeError if the value is invalid.\n Can be a list of validators to apply multiple checks.\n **kwargs:\n Additional arguments to pass to `dataclasses.field()`.\n\n Returns:\n A field with the validator attached in metadata\n """\n if not isinstance(validator, list):\n validator = [validator]\n if metadata is None:\n metadata = {}\n metadata["validator"] = validator\n return field( # type: ignore\n default=default, # type: ignore [arg-type]\n default_factory=default_factory, # type: ignore [arg-type]\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n **kwargs,\n )\n\n\ndef as_validated_field(validator: Validator_T):\n """\n Decorates a validator function as a [`validated_field`] (i.e. a dataclass field with a custom validator).\n\n Args:\n validator (`Callable`):\n A method that takes a value as input and raises ValueError/TypeError if the value is invalid.\n """\n\n def _inner(\n default: Union[Any, _MISSING_TYPE] = MISSING,\n default_factory: Union[Callable[[], Any], _MISSING_TYPE] = MISSING,\n init: bool = True,\n repr: bool = True,\n hash: Optional[bool] = None,\n compare: bool = True,\n metadata: Optional[Dict] = None,\n **kwargs: Any,\n ):\n return validated_field(\n validator,\n default=default,\n default_factory=default_factory,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n **kwargs,\n )\n\n return _inner\n\n\ndef type_validator(name: str, value: Any, expected_type: Any) -> None:\n """Validate that 'value' matches 'expected_type'."""\n origin = get_origin(expected_type)\n args = get_args(expected_type)\n\n if expected_type is Any:\n return\n elif validator := _BASIC_TYPE_VALIDATORS.get(origin):\n validator(name, value, args)\n elif isinstance(expected_type, type): # simple types\n _validate_simple_type(name, value, expected_type)\n else:\n raise TypeError(f"Unsupported type for field '{name}': {expected_type}")\n\n\ndef _validate_union(name: str, value: Any, args: Tuple[Any, ...]) -> None:\n """Validate that value matches one of the types in a Union."""\n errors = []\n for t in args:\n try:\n type_validator(name, value, t)\n return # Valid if any type matches\n except TypeError as e:\n errors.append(str(e))\n\n raise TypeError(\n f"Field '{name}' with value {repr(value)} doesn't match any type in {args}. Errors: {'; '.join(errors)}"\n )\n\n\ndef _validate_literal(name: str, value: Any, args: Tuple[Any, ...]) -> None:\n """Validate Literal type."""\n if value not in args:\n raise TypeError(f"Field '{name}' expected one of {args}, got {value}")\n\n\ndef _validate_list(name: str, value: Any, args: Tuple[Any, ...]) -> None:\n """Validate List[T] type."""\n if not isinstance(value, list):\n raise TypeError(f"Field '{name}' expected a list, got {type(value).__name__}")\n\n # Validate each item in the list\n item_type = args[0]\n for i, item in enumerate(value):\n try:\n type_validator(f"{name}[{i}]", item, item_type)\n except TypeError as e:\n raise TypeError(f"Invalid item at index {i} in list '{name}'") from e\n\n\ndef _validate_dict(name: str, value: Any, args: Tuple[Any, ...]) -> None:\n """Validate Dict[K, V] type."""\n if not isinstance(value, dict):\n raise TypeError(f"Field '{name}' expected a dict, got {type(value).__name__}")\n\n # Validate keys and values\n key_type, value_type = args\n for k, v in value.items():\n try:\n type_validator(f"{name}.key", k, key_type)\n type_validator(f"{name}[{k!r}]", v, value_type)\n except TypeError as e:\n raise TypeError(f"Invalid key or value in dict '{name}'") from e\n\n\ndef _validate_tuple(name: str, value: Any, args: Tuple[Any, ...]) -> None:\n """Validate Tuple type."""\n if not isinstance(value, tuple):\n raise TypeError(f"Field '{name}' expected a tuple, got {type(value).__name__}")\n\n # Handle variable-length tuples: Tuple[T, ...]\n if len(args) == 2 and args[1] is Ellipsis:\n for i, item in enumerate(value):\n try:\n type_validator(f"{name}[{i}]", item, args[0])\n except TypeError as e:\n raise TypeError(f"Invalid item at index {i} in tuple '{name}'") from e\n # Handle fixed-length tuples: Tuple[T1, T2, ...]\n elif len(args) != len(value):\n raise TypeError(f"Field '{name}' expected a tuple of length {len(args)}, got {len(value)}")\n else:\n for i, (item, expected) in enumerate(zip(value, args)):\n try:\n type_validator(f"{name}[{i}]", item, expected)\n except TypeError as e:\n raise TypeError(f"Invalid item at index {i} in tuple '{name}'") from e\n\n\ndef _validate_set(name: str, value: Any, args: Tuple[Any, ...]) -> None:\n """Validate Set[T] type."""\n if not isinstance(value, set):\n raise TypeError(f"Field '{name}' expected a set, got {type(value).__name__}")\n\n # Validate each item in the set\n item_type = args[0]\n for i, item in enumerate(value):\n try:\n type_validator(f"{name} item", item, item_type)\n except TypeError as e:\n raise TypeError(f"Invalid item in set '{name}'") from e\n\n\ndef _validate_simple_type(name: str, value: Any, expected_type: type) -> None:\n """Validate simple type (int, str, etc.)."""\n if not isinstance(value, expected_type):\n raise TypeError(\n f"Field '{name}' expected {expected_type.__name__}, got {type(value).__name__} (value: {repr(value)})"\n )\n\n\ndef _create_type_validator(field: Field) -> Validator_T:\n """Create a type validator function for a field."""\n # Hacky: we cannot use a lambda here because of reference issues\n\n def validator(value: Any) -> None:\n type_validator(field.name, value, field.type)\n\n return validator\n\n\ndef _is_validator(validator: Any) -> bool:\n """Check if a function is a validator.\n\n A validator is a Callable that can be called with a single positional argument.\n The validator can have more arguments with default values.\n\n Basically, returns True if `validator(value)` is possible.\n """\n if not callable(validator):\n return False\n\n signature = inspect.signature(validator)\n parameters = list(signature.parameters.values())\n if len(parameters) == 0:\n return False\n if parameters[0].kind not in (\n inspect.Parameter.POSITIONAL_OR_KEYWORD,\n inspect.Parameter.POSITIONAL_ONLY,\n inspect.Parameter.VAR_POSITIONAL,\n ):\n return False\n for parameter in parameters[1:]:\n if parameter.default == inspect.Parameter.empty:\n return False\n return True\n\n\n_BASIC_TYPE_VALIDATORS = {\n Union: _validate_union,\n Literal: _validate_literal,\n list: _validate_list,\n dict: _validate_dict,\n tuple: _validate_tuple,\n set: _validate_set,\n}\n\n\n__all__ = [\n "strict",\n "validated_field",\n "Validator_T",\n "StrictDataclassClassValidationError",\n "StrictDataclassDefinitionError",\n "StrictDataclassFieldValidationError",\n]\n
|
.venv\Lib\site-packages\huggingface_hub\dataclasses.py
|
dataclasses.py
|
Python
| 17,224 | 0.95 | 0.222453 | 0.090206 |
awesome-app
| 792 |
2025-02-08T12:00:57.298842
|
BSD-3-Clause
| false |
4871708798b2379dde2fdc2397a0f59e
|
import collections.abc as collections\nimport json\nimport os\nimport warnings\nfrom functools import wraps\nfrom pathlib import Path\nfrom shutil import copytree\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom huggingface_hub import ModelHubMixin, snapshot_download\nfrom huggingface_hub.utils import (\n get_tf_version,\n is_graphviz_available,\n is_pydot_available,\n is_tf_available,\n yaml_dump,\n)\n\nfrom . import constants\nfrom .hf_api import HfApi\nfrom .utils import SoftTemporaryDirectory, logging, validate_hf_hub_args\nfrom .utils._typing import CallableT\n\n\nlogger = logging.get_logger(__name__)\n\nkeras = None\nif is_tf_available():\n # Depending on which version of TensorFlow is installed, we need to import\n # keras from the correct location.\n # See https://github.com/tensorflow/tensorflow/releases/tag/v2.16.1.\n # Note: saving a keras model only works with Keras<3.0.\n try:\n import tf_keras as keras # type: ignore\n except ImportError:\n import tensorflow as tf # type: ignore\n\n keras = tf.keras\n\n\ndef _requires_keras_2_model(fn: CallableT) -> CallableT:\n # Wrapper to raise if user tries to save a Keras 3.x model\n @wraps(fn)\n def _inner(model, *args, **kwargs):\n if not hasattr(model, "history"): # hacky way to check if model is Keras 2.x\n raise NotImplementedError(\n f"Cannot use '{fn.__name__}': Keras 3.x is not supported."\n " Please save models manually and upload them using `upload_folder` or `huggingface-cli upload`."\n )\n return fn(model, *args, **kwargs)\n\n return _inner # type: ignore [return-value]\n\n\ndef _flatten_dict(dictionary, parent_key=""):\n """Flatten a nested dictionary.\n Reference: https://stackoverflow.com/a/6027615/10319735\n\n Args:\n dictionary (`dict`):\n The nested dictionary to be flattened.\n parent_key (`str`):\n The parent key to be prefixed to the children keys.\n Necessary for recursing over the nested dictionary.\n\n Returns:\n The flattened dictionary.\n """\n items = []\n for key, value in dictionary.items():\n new_key = f"{parent_key}.{key}" if parent_key else key\n if isinstance(value, collections.MutableMapping):\n items.extend(\n _flatten_dict(\n value,\n new_key,\n ).items()\n )\n else:\n items.append((new_key, value))\n return dict(items)\n\n\ndef _create_hyperparameter_table(model):\n """Parse hyperparameter dictionary into a markdown table."""\n table = None\n if model.optimizer is not None:\n optimizer_params = model.optimizer.get_config()\n # flatten the configuration\n optimizer_params = _flatten_dict(optimizer_params)\n optimizer_params["training_precision"] = keras.mixed_precision.global_policy().name\n table = "| Hyperparameters | Value |\n| :-- | :-- |\n"\n for key, value in optimizer_params.items():\n table += f"| {key} | {value} |\n"\n return table\n\n\ndef _plot_network(model, save_directory):\n keras.utils.plot_model(\n model,\n to_file=f"{save_directory}/model.png",\n show_shapes=False,\n show_dtype=False,\n show_layer_names=True,\n rankdir="TB",\n expand_nested=False,\n dpi=96,\n layer_range=None,\n )\n\n\ndef _create_model_card(\n model,\n repo_dir: Path,\n plot_model: bool = True,\n metadata: Optional[dict] = None,\n):\n """\n Creates a model card for the repository.\n\n Do not overwrite an existing README.md file.\n """\n readme_path = repo_dir / "README.md"\n if readme_path.exists():\n return\n\n hyperparameters = _create_hyperparameter_table(model)\n if plot_model and is_graphviz_available() and is_pydot_available():\n _plot_network(model, repo_dir)\n if metadata is None:\n metadata = {}\n metadata["library_name"] = "keras"\n model_card: str = "---\n"\n model_card += yaml_dump(metadata, default_flow_style=False)\n model_card += "---\n"\n model_card += "\n## Model description\n\nMore information needed\n"\n model_card += "\n## Intended uses & limitations\n\nMore information needed\n"\n model_card += "\n## Training and evaluation data\n\nMore information needed\n"\n if hyperparameters is not None:\n model_card += "\n## Training procedure\n"\n model_card += "\n### Training hyperparameters\n"\n model_card += "\nThe following hyperparameters were used during training:\n\n"\n model_card += hyperparameters\n model_card += "\n"\n if plot_model and os.path.exists(f"{repo_dir}/model.png"):\n model_card += "\n ## Model Plot\n"\n model_card += "\n<details>"\n model_card += "\n<summary>View Model Plot</summary>\n"\n path_to_plot = "./model.png"\n model_card += f"\n\n"\n model_card += "\n</details>"\n\n readme_path.write_text(model_card)\n\n\n@_requires_keras_2_model\ndef save_pretrained_keras(\n model,\n save_directory: Union[str, Path],\n config: Optional[Dict[str, Any]] = None,\n include_optimizer: bool = False,\n plot_model: bool = True,\n tags: Optional[Union[list, str]] = None,\n **model_save_kwargs,\n):\n """\n Saves a Keras model to save_directory in SavedModel format. Use this if\n you're using the Functional or Sequential APIs.\n\n Args:\n model (`Keras.Model`):\n The [Keras\n model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)\n you'd like to save. The model must be compiled and built.\n save_directory (`str` or `Path`):\n Specify directory in which you want to save the Keras model.\n config (`dict`, *optional*):\n Configuration object to be saved alongside the model weights.\n include_optimizer(`bool`, *optional*, defaults to `False`):\n Whether or not to include optimizer in serialization.\n plot_model (`bool`, *optional*, defaults to `True`):\n Setting this to `True` will plot the model and put it in the model\n card. Requires graphviz and pydot to be installed.\n tags (Union[`str`,`list`], *optional*):\n List of tags that are related to model or string of a single tag. See example tags\n [here](https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1).\n model_save_kwargs(`dict`, *optional*):\n model_save_kwargs will be passed to\n [`tf.keras.models.save_model()`](https://www.tensorflow.org/api_docs/python/tf/keras/models/save_model).\n """\n if keras is None:\n raise ImportError("Called a Tensorflow-specific function but could not import it.")\n\n if not model.built:\n raise ValueError("Model should be built before trying to save")\n\n save_directory = Path(save_directory)\n save_directory.mkdir(parents=True, exist_ok=True)\n\n # saving config\n if config:\n if not isinstance(config, dict):\n raise RuntimeError(f"Provided config to save_pretrained_keras should be a dict. Got: '{type(config)}'")\n\n with (save_directory / constants.CONFIG_NAME).open("w") as f:\n json.dump(config, f)\n\n metadata = {}\n if isinstance(tags, list):\n metadata["tags"] = tags\n elif isinstance(tags, str):\n metadata["tags"] = [tags]\n\n task_name = model_save_kwargs.pop("task_name", None)\n if task_name is not None:\n warnings.warn(\n "`task_name` input argument is deprecated. Pass `tags` instead.",\n FutureWarning,\n )\n if "tags" in metadata:\n metadata["tags"].append(task_name)\n else:\n metadata["tags"] = [task_name]\n\n if model.history is not None:\n if model.history.history != {}:\n path = save_directory / "history.json"\n if path.exists():\n warnings.warn(\n "`history.json` file already exists, it will be overwritten by the history of this version.",\n UserWarning,\n )\n with path.open("w", encoding="utf-8") as f:\n json.dump(model.history.history, f, indent=2, sort_keys=True)\n\n _create_model_card(model, save_directory, plot_model, metadata)\n keras.models.save_model(model, save_directory, include_optimizer=include_optimizer, **model_save_kwargs)\n\n\ndef from_pretrained_keras(*args, **kwargs) -> "KerasModelHubMixin":\n r"""\n Instantiate a pretrained Keras model from a pre-trained model from the Hub.\n The model is expected to be in `SavedModel` format.\n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n Can be either:\n - A string, the `model id` of a pretrained model hosted inside a\n model repo on huggingface.co. Valid model ids can be located\n at the root-level, like `bert-base-uncased`, or namespaced\n under a user or organization name, like\n `dbmdz/bert-base-german-cased`.\n - You can add `revision` by appending `@` at the end of model_id\n simply like this: `dbmdz/bert-base-german-cased@main` Revision\n is the specific model version to use. It can be a branch name,\n a tag name, or a commit id, since we use a git-based system\n for storing models and other artifacts on huggingface.co, so\n `revision` can be any identifier allowed by git.\n - A path to a `directory` containing model weights saved using\n [`~transformers.PreTrainedModel.save_pretrained`], e.g.,\n `./my_model_directory/`.\n - `None` if you are both providing the configuration and state\n dictionary (resp. with keyword arguments `config` and\n `state_dict`).\n force_download (`bool`, *optional*, defaults to `False`):\n Whether to force the (re-)download of the model weights and\n configuration files, overriding the cached versions if they exist.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g.,\n `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The\n proxies are used on each request.\n token (`str` or `bool`, *optional*):\n The token to use as HTTP bearer authorization for remote files. If\n `True`, will use the token generated when running `transformers-cli\n login` (stored in `~/.huggingface`).\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model\n configuration should be cached if the standard cache should not be\n used.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether to only look at local files (i.e., do not try to download\n the model).\n model_kwargs (`Dict`, *optional*):\n model_kwargs will be passed to the model during initialization\n\n <Tip>\n\n Passing `token=True` is required when you want to use a private\n model.\n\n </Tip>\n """\n return KerasModelHubMixin.from_pretrained(*args, **kwargs)\n\n\n@validate_hf_hub_args\n@_requires_keras_2_model\ndef push_to_hub_keras(\n model,\n repo_id: str,\n *,\n config: Optional[dict] = None,\n commit_message: str = "Push Keras model using huggingface_hub.",\n private: Optional[bool] = None,\n api_endpoint: Optional[str] = None,\n token: Optional[str] = None,\n branch: Optional[str] = None,\n create_pr: Optional[bool] = None,\n allow_patterns: Optional[Union[List[str], str]] = None,\n ignore_patterns: Optional[Union[List[str], str]] = None,\n delete_patterns: Optional[Union[List[str], str]] = None,\n log_dir: Optional[str] = None,\n include_optimizer: bool = False,\n tags: Optional[Union[list, str]] = None,\n plot_model: bool = True,\n **model_save_kwargs,\n):\n """\n Upload model checkpoint to the Hub.\n\n Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use\n `delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more\n details.\n\n Args:\n model (`Keras.Model`):\n The [Keras model](`https://www.tensorflow.org/api_docs/python/tf/keras/Model`) you'd like to push to the\n Hub. The model must be compiled and built.\n repo_id (`str`):\n ID of the repository to push to (example: `"username/my-model"`).\n commit_message (`str`, *optional*, defaults to "Add Keras model"):\n Message to commit while pushing.\n private (`bool`, *optional*):\n Whether the repository created should be private.\n If `None` (default), the repo will be public unless the organization's default is private.\n api_endpoint (`str`, *optional*):\n The API endpoint to use when pushing the model to the hub.\n token (`str`, *optional*):\n The token to use as HTTP bearer authorization for remote files. If\n not set, will use the token set when logging in with\n `huggingface-cli login` (stored in `~/.huggingface`).\n branch (`str`, *optional*):\n The git branch on which to push the model. This defaults to\n the default branch as specified in your repository, which\n defaults to `"main"`.\n create_pr (`boolean`, *optional*):\n Whether or not to create a Pull Request from `branch` with that commit.\n Defaults to `False`.\n config (`dict`, *optional*):\n Configuration object to be saved alongside the model weights.\n allow_patterns (`List[str]` or `str`, *optional*):\n If provided, only files matching at least one pattern are pushed.\n ignore_patterns (`List[str]` or `str`, *optional*):\n If provided, files matching any of the patterns are not pushed.\n delete_patterns (`List[str]` or `str`, *optional*):\n If provided, remote files matching any of the patterns will be deleted from the repo.\n log_dir (`str`, *optional*):\n TensorBoard logging directory to be pushed. The Hub automatically\n hosts and displays a TensorBoard instance if log files are included\n in the repository.\n include_optimizer (`bool`, *optional*, defaults to `False`):\n Whether or not to include optimizer during serialization.\n tags (Union[`list`, `str`], *optional*):\n List of tags that are related to model or string of a single tag. See example tags\n [here](https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1).\n plot_model (`bool`, *optional*, defaults to `True`):\n Setting this to `True` will plot the model and put it in the model\n card. Requires graphviz and pydot to be installed.\n model_save_kwargs(`dict`, *optional*):\n model_save_kwargs will be passed to\n [`tf.keras.models.save_model()`](https://www.tensorflow.org/api_docs/python/tf/keras/models/save_model).\n\n Returns:\n The url of the commit of your model in the given repository.\n """\n api = HfApi(endpoint=api_endpoint)\n repo_id = api.create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True).repo_id\n\n # Push the files to the repo in a single commit\n with SoftTemporaryDirectory() as tmp:\n saved_path = Path(tmp) / repo_id\n save_pretrained_keras(\n model,\n saved_path,\n config=config,\n include_optimizer=include_optimizer,\n tags=tags,\n plot_model=plot_model,\n **model_save_kwargs,\n )\n\n # If `log_dir` provided, delete remote logs and upload new ones\n if log_dir is not None:\n delete_patterns = (\n []\n if delete_patterns is None\n else (\n [delete_patterns] # convert `delete_patterns` to a list\n if isinstance(delete_patterns, str)\n else delete_patterns\n )\n )\n delete_patterns.append("logs/*")\n copytree(log_dir, saved_path / "logs")\n\n return api.upload_folder(\n repo_type="model",\n repo_id=repo_id,\n folder_path=saved_path,\n commit_message=commit_message,\n token=token,\n revision=branch,\n create_pr=create_pr,\n allow_patterns=allow_patterns,\n ignore_patterns=ignore_patterns,\n delete_patterns=delete_patterns,\n )\n\n\nclass KerasModelHubMixin(ModelHubMixin):\n """\n Implementation of [`ModelHubMixin`] to provide model Hub upload/download\n capabilities to Keras models.\n\n\n ```python\n >>> import tensorflow as tf\n >>> from huggingface_hub import KerasModelHubMixin\n\n\n >>> class MyModel(tf.keras.Model, KerasModelHubMixin):\n ... def __init__(self, **kwargs):\n ... super().__init__()\n ... self.config = kwargs.pop("config", None)\n ... self.dummy_inputs = ...\n ... self.layer = ...\n\n ... def call(self, *args):\n ... return ...\n\n\n >>> # Initialize and compile the model as you normally would\n >>> model = MyModel()\n >>> model.compile(...)\n >>> # Build the graph by training it or passing dummy inputs\n >>> _ = model(model.dummy_inputs)\n >>> # Save model weights to local directory\n >>> model.save_pretrained("my-awesome-model")\n >>> # Push model weights to the Hub\n >>> model.push_to_hub("my-awesome-model")\n >>> # Download and initialize weights from the Hub\n >>> model = MyModel.from_pretrained("username/super-cool-model")\n ```\n """\n\n def _save_pretrained(self, save_directory):\n save_pretrained_keras(self, save_directory)\n\n @classmethod\n def _from_pretrained(\n cls,\n model_id,\n revision,\n cache_dir,\n force_download,\n proxies,\n resume_download,\n local_files_only,\n token,\n config: Optional[Dict[str, Any]] = None,\n **model_kwargs,\n ):\n """Here we just call [`from_pretrained_keras`] function so both the mixin and\n functional APIs stay in sync.\n\n TODO - Some args above aren't used since we are calling\n snapshot_download instead of hf_hub_download.\n """\n if keras is None:\n raise ImportError("Called a TensorFlow-specific function but could not import it.")\n\n # Root is either a local filepath matching model_id or a cached snapshot\n if not os.path.isdir(model_id):\n storage_folder = snapshot_download(\n repo_id=model_id,\n revision=revision,\n cache_dir=cache_dir,\n library_name="keras",\n library_version=get_tf_version(),\n )\n else:\n storage_folder = model_id\n\n # TODO: change this in a future PR. We are not returning a KerasModelHubMixin instance here...\n model = keras.models.load_model(storage_folder)\n\n # For now, we add a new attribute, config, to store the config loaded from the hub/a local dir.\n model.config = config\n\n return model\n
|
.venv\Lib\site-packages\huggingface_hub\keras_mixin.py
|
keras_mixin.py
|
Python
| 19,574 | 0.95 | 0.122 | 0.038902 |
awesome-app
| 918 |
2024-11-12T02:51:07.671023
|
Apache-2.0
| false |
35e043c62086bd22e05c8b0b2b5f6e3b
|
import time\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Dict, Optional, Union\n\nfrom huggingface_hub.errors import InferenceEndpointError, InferenceEndpointTimeoutError\n\nfrom .utils import get_session, logging, parse_datetime\n\n\nif TYPE_CHECKING:\n from .hf_api import HfApi\n from .inference._client import InferenceClient\n from .inference._generated._async_client import AsyncInferenceClient\n\nlogger = logging.get_logger(__name__)\n\n\nclass InferenceEndpointStatus(str, Enum):\n PENDING = "pending"\n INITIALIZING = "initializing"\n UPDATING = "updating"\n UPDATE_FAILED = "updateFailed"\n RUNNING = "running"\n PAUSED = "paused"\n FAILED = "failed"\n SCALED_TO_ZERO = "scaledToZero"\n\n\nclass InferenceEndpointType(str, Enum):\n PUBlIC = "public"\n PROTECTED = "protected"\n PRIVATE = "private"\n\n\n@dataclass\nclass InferenceEndpoint:\n """\n Contains information about a deployed Inference Endpoint.\n\n Args:\n name (`str`):\n The unique name of the Inference Endpoint.\n namespace (`str`):\n The namespace where the Inference Endpoint is located.\n repository (`str`):\n The name of the model repository deployed on this Inference Endpoint.\n status ([`InferenceEndpointStatus`]):\n The current status of the Inference Endpoint.\n url (`str`, *optional*):\n The URL of the Inference Endpoint, if available. Only a deployed Inference Endpoint will have a URL.\n framework (`str`):\n The machine learning framework used for the model.\n revision (`str`):\n The specific model revision deployed on the Inference Endpoint.\n task (`str`):\n The task associated with the deployed model.\n created_at (`datetime.datetime`):\n The timestamp when the Inference Endpoint was created.\n updated_at (`datetime.datetime`):\n The timestamp of the last update of the Inference Endpoint.\n type ([`InferenceEndpointType`]):\n The type of the Inference Endpoint (public, protected, private).\n raw (`Dict`):\n The raw dictionary data returned from the API.\n token (`str` or `bool`, *optional*):\n Authentication token for the Inference Endpoint, if set when requesting the API. Will default to the\n locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server.\n\n Example:\n ```python\n >>> from huggingface_hub import get_inference_endpoint\n >>> endpoint = get_inference_endpoint("my-text-to-image")\n >>> endpoint\n InferenceEndpoint(name='my-text-to-image', ...)\n\n # Get status\n >>> endpoint.status\n 'running'\n >>> endpoint.url\n 'https://my-text-to-image.region.vendor.endpoints.huggingface.cloud'\n\n # Run inference\n >>> endpoint.client.text_to_image(...)\n\n # Pause endpoint to save $$$\n >>> endpoint.pause()\n\n # ...\n # Resume and wait for deployment\n >>> endpoint.resume()\n >>> endpoint.wait()\n >>> endpoint.client.text_to_image(...)\n ```\n """\n\n # Field in __repr__\n name: str = field(init=False)\n namespace: str\n repository: str = field(init=False)\n status: InferenceEndpointStatus = field(init=False)\n health_route: str = field(init=False)\n url: Optional[str] = field(init=False)\n\n # Other fields\n framework: str = field(repr=False, init=False)\n revision: str = field(repr=False, init=False)\n task: str = field(repr=False, init=False)\n created_at: datetime = field(repr=False, init=False)\n updated_at: datetime = field(repr=False, init=False)\n type: InferenceEndpointType = field(repr=False, init=False)\n\n # Raw dict from the API\n raw: Dict = field(repr=False)\n\n # Internal fields\n _token: Union[str, bool, None] = field(repr=False, compare=False)\n _api: "HfApi" = field(repr=False, compare=False)\n\n @classmethod\n def from_raw(\n cls, raw: Dict, namespace: str, token: Union[str, bool, None] = None, api: Optional["HfApi"] = None\n ) -> "InferenceEndpoint":\n """Initialize object from raw dictionary."""\n if api is None:\n from .hf_api import HfApi\n\n api = HfApi()\n if token is None:\n token = api.token\n\n # All other fields are populated in __post_init__\n return cls(raw=raw, namespace=namespace, _token=token, _api=api)\n\n def __post_init__(self) -> None:\n """Populate fields from raw dictionary."""\n self._populate_from_raw()\n\n @property\n def client(self) -> "InferenceClient":\n """Returns a client to make predictions on this Inference Endpoint.\n\n Returns:\n [`InferenceClient`]: an inference client pointing to the deployed endpoint.\n\n Raises:\n [`InferenceEndpointError`]: If the Inference Endpoint is not yet deployed.\n """\n if self.url is None:\n raise InferenceEndpointError(\n "Cannot create a client for this Inference Endpoint as it is not yet deployed. "\n "Please wait for the Inference Endpoint to be deployed using `endpoint.wait()` and try again."\n )\n from .inference._client import InferenceClient\n\n return InferenceClient(\n model=self.url,\n token=self._token, # type: ignore[arg-type] # boolean token shouldn't be possible. In practice it's ok.\n )\n\n @property\n def async_client(self) -> "AsyncInferenceClient":\n """Returns a client to make predictions on this Inference Endpoint.\n\n Returns:\n [`AsyncInferenceClient`]: an asyncio-compatible inference client pointing to the deployed endpoint.\n\n Raises:\n [`InferenceEndpointError`]: If the Inference Endpoint is not yet deployed.\n """\n if self.url is None:\n raise InferenceEndpointError(\n "Cannot create a client for this Inference Endpoint as it is not yet deployed. "\n "Please wait for the Inference Endpoint to be deployed using `endpoint.wait()` and try again."\n )\n from .inference._generated._async_client import AsyncInferenceClient\n\n return AsyncInferenceClient(\n model=self.url,\n token=self._token, # type: ignore[arg-type] # boolean token shouldn't be possible. In practice it's ok.\n )\n\n def wait(self, timeout: Optional[int] = None, refresh_every: int = 5) -> "InferenceEndpoint":\n """Wait for the Inference Endpoint to be deployed.\n\n Information from the server will be fetched every 1s. If the Inference Endpoint is not deployed after `timeout`\n seconds, a [`InferenceEndpointTimeoutError`] will be raised. The [`InferenceEndpoint`] will be mutated in place with the latest\n data.\n\n Args:\n timeout (`int`, *optional*):\n The maximum time to wait for the Inference Endpoint to be deployed, in seconds. If `None`, will wait\n indefinitely.\n refresh_every (`int`, *optional*):\n The time to wait between each fetch of the Inference Endpoint status, in seconds. Defaults to 5s.\n\n Returns:\n [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.\n\n Raises:\n [`InferenceEndpointError`]\n If the Inference Endpoint ended up in a failed state.\n [`InferenceEndpointTimeoutError`]\n If the Inference Endpoint is not deployed after `timeout` seconds.\n """\n if timeout is not None and timeout < 0:\n raise ValueError("`timeout` cannot be negative.")\n if refresh_every <= 0:\n raise ValueError("`refresh_every` must be positive.")\n\n start = time.time()\n while True:\n if self.status == InferenceEndpointStatus.FAILED:\n raise InferenceEndpointError(\n f"Inference Endpoint {self.name} failed to deploy. Please check the logs for more information."\n )\n if self.status == InferenceEndpointStatus.UPDATE_FAILED:\n raise InferenceEndpointError(\n f"Inference Endpoint {self.name} failed to update. Please check the logs for more information."\n )\n if self.status == InferenceEndpointStatus.RUNNING and self.url is not None:\n # Verify the endpoint is actually reachable\n _health_url = f"{self.url.rstrip('/')}/{self.health_route.lstrip('/')}"\n response = get_session().get(_health_url, headers=self._api._build_hf_headers(token=self._token))\n if response.status_code == 200:\n logger.info("Inference Endpoint is ready to be used.")\n return self\n\n if timeout is not None:\n if time.time() - start > timeout:\n raise InferenceEndpointTimeoutError("Timeout while waiting for Inference Endpoint to be deployed.")\n logger.info(f"Inference Endpoint is not deployed yet ({self.status}). Waiting {refresh_every}s...")\n time.sleep(refresh_every)\n self.fetch()\n\n def fetch(self) -> "InferenceEndpoint":\n """Fetch latest information about the Inference Endpoint.\n\n Returns:\n [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.\n """\n obj = self._api.get_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]\n self.raw = obj.raw\n self._populate_from_raw()\n return self\n\n def update(\n self,\n *,\n # Compute update\n accelerator: Optional[str] = None,\n instance_size: Optional[str] = None,\n instance_type: Optional[str] = None,\n min_replica: Optional[int] = None,\n max_replica: Optional[int] = None,\n scale_to_zero_timeout: Optional[int] = None,\n # Model update\n repository: Optional[str] = None,\n framework: Optional[str] = None,\n revision: Optional[str] = None,\n task: Optional[str] = None,\n custom_image: Optional[Dict] = None,\n secrets: Optional[Dict[str, str]] = None,\n ) -> "InferenceEndpoint":\n """Update the Inference Endpoint.\n\n This method allows the update of either the compute configuration, the deployed model, or both. All arguments are\n optional but at least one must be provided.\n\n This is an alias for [`HfApi.update_inference_endpoint`]. The current object is mutated in place with the\n latest data from the server.\n\n Args:\n accelerator (`str`, *optional*):\n The hardware accelerator to be used for inference (e.g. `"cpu"`).\n instance_size (`str`, *optional*):\n The size or type of the instance to be used for hosting the model (e.g. `"x4"`).\n instance_type (`str`, *optional*):\n The cloud instance type where the Inference Endpoint will be deployed (e.g. `"intel-icl"`).\n min_replica (`int`, *optional*):\n The minimum number of replicas (instances) to keep running for the Inference Endpoint.\n max_replica (`int`, *optional*):\n The maximum number of replicas (instances) to scale to for the Inference Endpoint.\n scale_to_zero_timeout (`int`, *optional*):\n The duration in minutes before an inactive endpoint is scaled to zero.\n\n repository (`str`, *optional*):\n The name of the model repository associated with the Inference Endpoint (e.g. `"gpt2"`).\n framework (`str`, *optional*):\n The machine learning framework used for the model (e.g. `"custom"`).\n revision (`str`, *optional*):\n The specific model revision to deploy on the Inference Endpoint (e.g. `"6c0e6080953db56375760c0471a8c5f2929baf11"`).\n task (`str`, *optional*):\n The task on which to deploy the model (e.g. `"text-classification"`).\n custom_image (`Dict`, *optional*):\n A custom Docker image to use for the Inference Endpoint. This is useful if you want to deploy an\n Inference Endpoint running on the `text-generation-inference` (TGI) framework (see examples).\n secrets (`Dict[str, str]`, *optional*):\n Secret values to inject in the container environment.\n Returns:\n [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.\n """\n # Make API call\n obj = self._api.update_inference_endpoint(\n name=self.name,\n namespace=self.namespace,\n accelerator=accelerator,\n instance_size=instance_size,\n instance_type=instance_type,\n min_replica=min_replica,\n max_replica=max_replica,\n scale_to_zero_timeout=scale_to_zero_timeout,\n repository=repository,\n framework=framework,\n revision=revision,\n task=task,\n custom_image=custom_image,\n secrets=secrets,\n token=self._token, # type: ignore [arg-type]\n )\n\n # Mutate current object\n self.raw = obj.raw\n self._populate_from_raw()\n return self\n\n def pause(self) -> "InferenceEndpoint":\n """Pause the Inference Endpoint.\n\n A paused Inference Endpoint will not be charged. It can be resumed at any time using [`InferenceEndpoint.resume`].\n This is different than scaling the Inference Endpoint to zero with [`InferenceEndpoint.scale_to_zero`], which\n would be automatically restarted when a request is made to it.\n\n This is an alias for [`HfApi.pause_inference_endpoint`]. The current object is mutated in place with the\n latest data from the server.\n\n Returns:\n [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.\n """\n obj = self._api.pause_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]\n self.raw = obj.raw\n self._populate_from_raw()\n return self\n\n def resume(self, running_ok: bool = True) -> "InferenceEndpoint":\n """Resume the Inference Endpoint.\n\n This is an alias for [`HfApi.resume_inference_endpoint`]. The current object is mutated in place with the\n latest data from the server.\n\n Args:\n running_ok (`bool`, *optional*):\n If `True`, the method will not raise an error if the Inference Endpoint is already running. Defaults to\n `True`.\n\n Returns:\n [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.\n """\n obj = self._api.resume_inference_endpoint(\n name=self.name, namespace=self.namespace, running_ok=running_ok, token=self._token\n ) # type: ignore [arg-type]\n self.raw = obj.raw\n self._populate_from_raw()\n return self\n\n def scale_to_zero(self) -> "InferenceEndpoint":\n """Scale Inference Endpoint to zero.\n\n An Inference Endpoint scaled to zero will not be charged. It will be resume on the next request to it, with a\n cold start delay. This is different than pausing the Inference Endpoint with [`InferenceEndpoint.pause`], which\n would require a manual resume with [`InferenceEndpoint.resume`].\n\n This is an alias for [`HfApi.scale_to_zero_inference_endpoint`]. The current object is mutated in place with the\n latest data from the server.\n\n Returns:\n [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.\n """\n obj = self._api.scale_to_zero_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]\n self.raw = obj.raw\n self._populate_from_raw()\n return self\n\n def delete(self) -> None:\n """Delete the Inference Endpoint.\n\n This operation is not reversible. If you don't want to be charged for an Inference Endpoint, it is preferable\n to pause it with [`InferenceEndpoint.pause`] or scale it to zero with [`InferenceEndpoint.scale_to_zero`].\n\n This is an alias for [`HfApi.delete_inference_endpoint`].\n """\n self._api.delete_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]\n\n def _populate_from_raw(self) -> None:\n """Populate fields from raw dictionary.\n\n Called in __post_init__ + each time the Inference Endpoint is updated.\n """\n # Repr fields\n self.name = self.raw["name"]\n self.repository = self.raw["model"]["repository"]\n self.status = self.raw["status"]["state"]\n self.url = self.raw["status"].get("url")\n self.health_route = self.raw["healthRoute"]\n\n # Other fields\n self.framework = self.raw["model"]["framework"]\n self.revision = self.raw["model"]["revision"]\n self.task = self.raw["model"]["task"]\n self.created_at = parse_datetime(self.raw["status"]["createdAt"])\n self.updated_at = parse_datetime(self.raw["status"]["updatedAt"])\n self.type = self.raw["type"]\n
|
.venv\Lib\site-packages\huggingface_hub\_inference_endpoints.py
|
_inference_endpoints.py
|
Python
| 17,598 | 0.95 | 0.150121 | 0.051873 |
react-lib
| 975 |
2025-02-14T01:00:36.814425
|
GPL-3.0
| false |
3b6991536d679f244f9ec00ae51966ee
|
import datetime\nimport hashlib\nimport logging\nimport os\nimport time\nimport urllib.parse\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union\n\nfrom . import constants\nfrom .hf_api import whoami\nfrom .utils import experimental, get_token\n\n\nlogger = logging.getLogger(__name__)\n\nif TYPE_CHECKING:\n import fastapi\n\n\n@dataclass\nclass OAuthOrgInfo:\n """\n Information about an organization linked to a user logged in with OAuth.\n\n Attributes:\n sub (`str`):\n Unique identifier for the org. OpenID Connect field.\n name (`str`):\n The org's full name. OpenID Connect field.\n preferred_username (`str`):\n The org's username. OpenID Connect field.\n picture (`str`):\n The org's profile picture URL. OpenID Connect field.\n is_enterprise (`bool`):\n Whether the org is an enterprise org. Hugging Face field.\n can_pay (`Optional[bool]`, *optional*):\n Whether the org has a payment method set up. Hugging Face field.\n role_in_org (`Optional[str]`, *optional*):\n The user's role in the org. Hugging Face field.\n pending_sso (`Optional[bool]`, *optional*):\n Indicates if the user granted the OAuth app access to the org but didn't complete SSO. Hugging Face field.\n missing_mfa (`Optional[bool]`, *optional*):\n Indicates if the user granted the OAuth app access to the org but didn't complete MFA. Hugging Face field.\n """\n\n sub: str\n name: str\n preferred_username: str\n picture: str\n is_enterprise: bool\n can_pay: Optional[bool] = None\n role_in_org: Optional[str] = None\n pending_sso: Optional[bool] = None\n missing_mfa: Optional[bool] = None\n\n\n@dataclass\nclass OAuthUserInfo:\n """\n Information about a user logged in with OAuth.\n\n Attributes:\n sub (`str`):\n Unique identifier for the user, even in case of rename. OpenID Connect field.\n name (`str`):\n The user's full name. OpenID Connect field.\n preferred_username (`str`):\n The user's username. OpenID Connect field.\n email_verified (`Optional[bool]`, *optional*):\n Indicates if the user's email is verified. OpenID Connect field.\n email (`Optional[str]`, *optional*):\n The user's email address. OpenID Connect field.\n picture (`str`):\n The user's profile picture URL. OpenID Connect field.\n profile (`str`):\n The user's profile URL. OpenID Connect field.\n website (`Optional[str]`, *optional*):\n The user's website URL. OpenID Connect field.\n is_pro (`bool`):\n Whether the user is a pro user. Hugging Face field.\n can_pay (`Optional[bool]`, *optional*):\n Whether the user has a payment method set up. Hugging Face field.\n orgs (`Optional[List[OrgInfo]]`, *optional*):\n List of organizations the user is part of. Hugging Face field.\n """\n\n sub: str\n name: str\n preferred_username: str\n email_verified: Optional[bool]\n email: Optional[str]\n picture: str\n profile: str\n website: Optional[str]\n is_pro: bool\n can_pay: Optional[bool]\n orgs: Optional[List[OAuthOrgInfo]]\n\n\n@dataclass\nclass OAuthInfo:\n """\n Information about the OAuth login.\n\n Attributes:\n access_token (`str`):\n The access token.\n access_token_expires_at (`datetime.datetime`):\n The expiration date of the access token.\n user_info ([`OAuthUserInfo`]):\n The user information.\n state (`str`, *optional*):\n State passed to the OAuth provider in the original request to the OAuth provider.\n scope (`str`):\n Granted scope.\n """\n\n access_token: str\n access_token_expires_at: datetime.datetime\n user_info: OAuthUserInfo\n state: Optional[str]\n scope: str\n\n\n@experimental\ndef attach_huggingface_oauth(app: "fastapi.FastAPI", route_prefix: str = "/"):\n """\n Add OAuth endpoints to a FastAPI app to enable OAuth login with Hugging Face.\n\n How to use:\n - Call this method on your FastAPI app to add the OAuth endpoints.\n - Inside your route handlers, call `parse_huggingface_oauth(request)` to retrieve the OAuth info.\n - If user is logged in, an [`OAuthInfo`] object is returned with the user's info. If not, `None` is returned.\n - In your app, make sure to add links to `/oauth/huggingface/login` and `/oauth/huggingface/logout` for the user to log in and out.\n\n Example:\n ```py\n from huggingface_hub import attach_huggingface_oauth, parse_huggingface_oauth\n\n # Create a FastAPI app\n app = FastAPI()\n\n # Add OAuth endpoints to the FastAPI app\n attach_huggingface_oauth(app)\n\n # Add a route that greets the user if they are logged in\n @app.get("/")\n def greet_json(request: Request):\n # Retrieve the OAuth info from the request\n oauth_info = parse_huggingface_oauth(request) # e.g. OAuthInfo dataclass\n if oauth_info is None:\n return {"msg": "Not logged in!"}\n return {"msg": f"Hello, {oauth_info.user_info.preferred_username}!"}\n ```\n """\n # TODO: handle generic case (handling OAuth in a non-Space environment with custom dev values) (low priority)\n\n # Add SessionMiddleware to the FastAPI app to store the OAuth info in the session.\n # Session Middleware requires a secret key to sign the cookies. Let's use a hash\n # of the OAuth secret key to make it unique to the Space + updated in case OAuth\n # config gets updated. When ran locally, we use an empty string as a secret key.\n try:\n from starlette.middleware.sessions import SessionMiddleware\n except ImportError as e:\n raise ImportError(\n "Cannot initialize OAuth to due a missing library. Please run `pip install huggingface_hub[oauth]` or add "\n "`huggingface_hub[oauth]` to your requirements.txt file in order to install the required dependencies."\n ) from e\n session_secret = (constants.OAUTH_CLIENT_SECRET or "") + "-v1"\n app.add_middleware(\n SessionMiddleware, # type: ignore[arg-type]\n secret_key=hashlib.sha256(session_secret.encode()).hexdigest(),\n same_site="none",\n https_only=True,\n ) # type: ignore\n\n # Add OAuth endpoints to the FastAPI app:\n # - {route_prefix}/oauth/huggingface/login\n # - {route_prefix}/oauth/huggingface/callback\n # - {route_prefix}/oauth/huggingface/logout\n # If the app is running in a Space, OAuth is enabled normally.\n # Otherwise, we mock the endpoints to make the user log in with a fake user profile - without any calls to hf.co.\n route_prefix = route_prefix.strip("/")\n if os.getenv("SPACE_ID") is not None:\n logger.info("OAuth is enabled in the Space. Adding OAuth routes.")\n _add_oauth_routes(app, route_prefix=route_prefix)\n else:\n logger.info("App is not running in a Space. Adding mocked OAuth routes.")\n _add_mocked_oauth_routes(app, route_prefix=route_prefix)\n\n\ndef parse_huggingface_oauth(request: "fastapi.Request") -> Optional[OAuthInfo]:\n """\n Returns the information from a logged in user as a [`OAuthInfo`] object.\n\n For flexibility and future-proofing, this method is very lax in its parsing and does not raise errors.\n Missing fields are set to `None` without a warning.\n\n Return `None`, if the user is not logged in (no info in session cookie).\n\n See [`attach_huggingface_oauth`] for an example on how to use this method.\n """\n if "oauth_info" not in request.session:\n logger.debug("No OAuth info in session.")\n return None\n\n logger.debug("Parsing OAuth info from session.")\n oauth_data = request.session["oauth_info"]\n user_data = oauth_data.get("userinfo", {})\n orgs_data = user_data.get("orgs", [])\n\n orgs = (\n [\n OAuthOrgInfo(\n sub=org.get("sub"),\n name=org.get("name"),\n preferred_username=org.get("preferred_username"),\n picture=org.get("picture"),\n is_enterprise=org.get("isEnterprise"),\n can_pay=org.get("canPay"),\n role_in_org=org.get("roleInOrg"),\n pending_sso=org.get("pendingSSO"),\n missing_mfa=org.get("missingMFA"),\n )\n for org in orgs_data\n ]\n if orgs_data\n else None\n )\n\n user_info = OAuthUserInfo(\n sub=user_data.get("sub"),\n name=user_data.get("name"),\n preferred_username=user_data.get("preferred_username"),\n email_verified=user_data.get("email_verified"),\n email=user_data.get("email"),\n picture=user_data.get("picture"),\n profile=user_data.get("profile"),\n website=user_data.get("website"),\n is_pro=user_data.get("isPro"),\n can_pay=user_data.get("canPay"),\n orgs=orgs,\n )\n\n return OAuthInfo(\n access_token=oauth_data.get("access_token"),\n access_token_expires_at=datetime.datetime.fromtimestamp(oauth_data.get("expires_at")),\n user_info=user_info,\n state=oauth_data.get("state"),\n scope=oauth_data.get("scope"),\n )\n\n\ndef _add_oauth_routes(app: "fastapi.FastAPI", route_prefix: str) -> None:\n """Add OAuth routes to the FastAPI app (login, callback handler and logout)."""\n try:\n import fastapi\n from authlib.integrations.base_client.errors import MismatchingStateError\n from authlib.integrations.starlette_client import OAuth\n from fastapi.responses import RedirectResponse\n except ImportError as e:\n raise ImportError(\n "Cannot initialize OAuth to due a missing library. Please run `pip install huggingface_hub[oauth]` or add "\n "`huggingface_hub[oauth]` to your requirements.txt file."\n ) from e\n\n # Check environment variables\n msg = (\n "OAuth is required but '{}' environment variable is not set. Make sure you've enabled OAuth in your Space by"\n " setting `hf_oauth: true` in the Space metadata."\n )\n if constants.OAUTH_CLIENT_ID is None:\n raise ValueError(msg.format("OAUTH_CLIENT_ID"))\n if constants.OAUTH_CLIENT_SECRET is None:\n raise ValueError(msg.format("OAUTH_CLIENT_SECRET"))\n if constants.OAUTH_SCOPES is None:\n raise ValueError(msg.format("OAUTH_SCOPES"))\n if constants.OPENID_PROVIDER_URL is None:\n raise ValueError(msg.format("OPENID_PROVIDER_URL"))\n\n # Register OAuth server\n oauth = OAuth()\n oauth.register(\n name="huggingface",\n client_id=constants.OAUTH_CLIENT_ID,\n client_secret=constants.OAUTH_CLIENT_SECRET,\n client_kwargs={"scope": constants.OAUTH_SCOPES},\n server_metadata_url=constants.OPENID_PROVIDER_URL + "/.well-known/openid-configuration",\n )\n\n login_uri, callback_uri, logout_uri = _get_oauth_uris(route_prefix)\n\n # Register OAuth endpoints\n @app.get(login_uri)\n async def oauth_login(request: fastapi.Request) -> RedirectResponse:\n """Endpoint that redirects to HF OAuth page."""\n redirect_uri = _generate_redirect_uri(request)\n return await oauth.huggingface.authorize_redirect(request, redirect_uri) # type: ignore\n\n @app.get(callback_uri)\n async def oauth_redirect_callback(request: fastapi.Request) -> RedirectResponse:\n """Endpoint that handles the OAuth callback."""\n try:\n oauth_info = await oauth.huggingface.authorize_access_token(request) # type: ignore\n except MismatchingStateError:\n # Parse query params\n nb_redirects = int(request.query_params.get("_nb_redirects", 0))\n target_url = request.query_params.get("_target_url")\n\n # Build redirect URI with the same query params as before and bump nb_redirects count\n query_params: Dict[str, Union[int, str]] = {"_nb_redirects": nb_redirects + 1}\n if target_url:\n query_params["_target_url"] = target_url\n\n redirect_uri = f"{login_uri}?{urllib.parse.urlencode(query_params)}"\n\n # If the user is redirected more than 3 times, it is very likely that the cookie is not working properly.\n # (e.g. browser is blocking third-party cookies in iframe). In this case, redirect the user in the\n # non-iframe view.\n if nb_redirects > constants.OAUTH_MAX_REDIRECTS:\n host = os.environ.get("SPACE_HOST")\n if host is None: # cannot happen in a Space\n raise RuntimeError(\n "App is not running in a Space (SPACE_HOST environment variable is not set). Cannot redirect to non-iframe view."\n ) from None\n host_url = "https://" + host.rstrip("/")\n return RedirectResponse(host_url + redirect_uri)\n\n # Redirect the user to the login page again\n return RedirectResponse(redirect_uri)\n\n # OAuth login worked => store the user info in the session and redirect\n logger.debug("Successfully logged in with OAuth. Storing user info in session.")\n request.session["oauth_info"] = oauth_info\n return RedirectResponse(_get_redirect_target(request))\n\n @app.get(logout_uri)\n async def oauth_logout(request: fastapi.Request) -> RedirectResponse:\n """Endpoint that logs out the user (e.g. delete info from cookie session)."""\n logger.debug("Logged out with OAuth. Removing user info from session.")\n request.session.pop("oauth_info", None)\n return RedirectResponse(_get_redirect_target(request))\n\n\ndef _add_mocked_oauth_routes(app: "fastapi.FastAPI", route_prefix: str = "/") -> None:\n """Add fake oauth routes if app is run locally and OAuth is enabled.\n\n Using OAuth will have the same behavior as in a Space but instead of authenticating with HF, a mocked user profile\n is added to the session.\n """\n try:\n import fastapi\n from fastapi.responses import RedirectResponse\n from starlette.datastructures import URL\n except ImportError as e:\n raise ImportError(\n "Cannot initialize OAuth to due a missing library. Please run `pip install huggingface_hub[oauth]` or add "\n "`huggingface_hub[oauth]` to your requirements.txt file."\n ) from e\n\n warnings.warn(\n "OAuth is not supported outside of a Space environment. To help you debug your app locally, the oauth endpoints"\n " are mocked to return your profile and token. To make it work, your machine must be logged in to Huggingface."\n )\n mocked_oauth_info = _get_mocked_oauth_info()\n\n login_uri, callback_uri, logout_uri = _get_oauth_uris(route_prefix)\n\n # Define OAuth routes\n @app.get(login_uri)\n async def oauth_login(request: fastapi.Request) -> RedirectResponse:\n """Fake endpoint that redirects to HF OAuth page."""\n # Define target (where to redirect after login)\n redirect_uri = _generate_redirect_uri(request)\n return RedirectResponse(callback_uri + "?" + urllib.parse.urlencode({"_target_url": redirect_uri}))\n\n @app.get(callback_uri)\n async def oauth_redirect_callback(request: fastapi.Request) -> RedirectResponse:\n """Endpoint that handles the OAuth callback."""\n request.session["oauth_info"] = mocked_oauth_info\n return RedirectResponse(_get_redirect_target(request))\n\n @app.get(logout_uri)\n async def oauth_logout(request: fastapi.Request) -> RedirectResponse:\n """Endpoint that logs out the user (e.g. delete cookie session)."""\n request.session.pop("oauth_info", None)\n logout_url = URL("/").include_query_params(**request.query_params)\n return RedirectResponse(url=logout_url, status_code=302) # see https://github.com/gradio-app/gradio/pull/9659\n\n\ndef _generate_redirect_uri(request: "fastapi.Request") -> str:\n if "_target_url" in request.query_params:\n # if `_target_url` already in query params => respect it\n target = request.query_params["_target_url"]\n else:\n # otherwise => keep query params\n target = "/?" + urllib.parse.urlencode(request.query_params)\n\n redirect_uri = request.url_for("oauth_redirect_callback").include_query_params(_target_url=target)\n redirect_uri_as_str = str(redirect_uri)\n if redirect_uri.netloc.endswith(".hf.space"):\n # In Space, FastAPI redirect as http but we want https\n redirect_uri_as_str = redirect_uri_as_str.replace("http://", "https://")\n return redirect_uri_as_str\n\n\ndef _get_redirect_target(request: "fastapi.Request", default_target: str = "/") -> str:\n return request.query_params.get("_target_url", default_target)\n\n\ndef _get_mocked_oauth_info() -> Dict:\n token = get_token()\n if token is None:\n raise ValueError(\n "Your machine must be logged in to HF to debug an OAuth app locally. Please"\n " run `huggingface-cli login` or set `HF_TOKEN` as environment variable "\n "with one of your access token. You can generate a new token in your "\n "settings page (https://huggingface.co/settings/tokens)."\n )\n\n user = whoami()\n if user["type"] != "user":\n raise ValueError(\n "Your machine is not logged in with a personal account. Please use a "\n "personal access token. You can generate a new token in your settings page"\n " (https://huggingface.co/settings/tokens)."\n )\n\n return {\n "access_token": token,\n "token_type": "bearer",\n "expires_in": 8 * 60 * 60, # 8 hours\n "id_token": "FOOBAR",\n "scope": "openid profile",\n "refresh_token": "hf_oauth__refresh_token",\n "expires_at": int(time.time()) + 8 * 60 * 60, # 8 hours\n "userinfo": {\n "sub": "0123456789",\n "name": user["fullname"],\n "preferred_username": user["name"],\n "profile": f"https://huggingface.co/{user['name']}",\n "picture": user["avatarUrl"],\n "website": "",\n "aud": "00000000-0000-0000-0000-000000000000",\n "auth_time": 1691672844,\n "nonce": "aaaaaaaaaaaaaaaaaaa",\n "iat": 1691672844,\n "exp": 1691676444,\n "iss": "https://huggingface.co",\n },\n }\n\n\ndef _get_oauth_uris(route_prefix: str = "/") -> Tuple[str, str, str]:\n route_prefix = route_prefix.strip("/")\n if route_prefix:\n route_prefix = f"/{route_prefix}"\n return (\n f"{route_prefix}/oauth/huggingface/login",\n f"{route_prefix}/oauth/huggingface/callback",\n f"{route_prefix}/oauth/huggingface/logout",\n )\n
|
.venv\Lib\site-packages\huggingface_hub\_oauth.py
|
_oauth.py
|
Python
| 18,802 | 0.95 | 0.109914 | 0.075377 |
node-utils
| 991 |
2023-08-02T11:34:24.698429
|
BSD-3-Clause
| false |
c3149e806708f4ba0b17ee3291639fa9
|
# coding=utf-8\n# Copyright 2019-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import Dict, Optional\n\nfrom huggingface_hub.utils import parse_datetime\n\n\nclass SpaceStage(str, Enum):\n """\n Enumeration of possible stage of a Space on the Hub.\n\n Value can be compared to a string:\n ```py\n assert SpaceStage.BUILDING == "BUILDING"\n ```\n\n Taken from https://github.com/huggingface/moon-landing/blob/main/server/repo_types/SpaceInfo.ts#L61 (private url).\n """\n\n # Copied from moon-landing > server > repo_types > SpaceInfo.ts (private repo)\n NO_APP_FILE = "NO_APP_FILE"\n CONFIG_ERROR = "CONFIG_ERROR"\n BUILDING = "BUILDING"\n BUILD_ERROR = "BUILD_ERROR"\n RUNNING = "RUNNING"\n RUNNING_BUILDING = "RUNNING_BUILDING"\n RUNTIME_ERROR = "RUNTIME_ERROR"\n DELETING = "DELETING"\n STOPPED = "STOPPED"\n PAUSED = "PAUSED"\n\n\nclass SpaceHardware(str, Enum):\n """\n Enumeration of hardwares available to run your Space on the Hub.\n\n Value can be compared to a string:\n ```py\n assert SpaceHardware.CPU_BASIC == "cpu-basic"\n ```\n\n Taken from https://github.com/huggingface-internal/moon-landing/blob/main/server/repo_types/SpaceHardwareFlavor.ts (private url).\n """\n\n # CPU\n CPU_BASIC = "cpu-basic"\n CPU_UPGRADE = "cpu-upgrade"\n CPU_XL = "cpu-xl"\n\n # ZeroGPU\n ZERO_A10G = "zero-a10g"\n\n # GPU\n T4_SMALL = "t4-small"\n T4_MEDIUM = "t4-medium"\n L4X1 = "l4x1"\n L4X4 = "l4x4"\n L40SX1 = "l40sx1"\n L40SX4 = "l40sx4"\n L40SX8 = "l40sx8"\n A10G_SMALL = "a10g-small"\n A10G_LARGE = "a10g-large"\n A10G_LARGEX2 = "a10g-largex2"\n A10G_LARGEX4 = "a10g-largex4"\n A100_LARGE = "a100-large"\n H100 = "h100"\n H100X8 = "h100x8"\n\n\nclass SpaceStorage(str, Enum):\n """\n Enumeration of persistent storage available for your Space on the Hub.\n\n Value can be compared to a string:\n ```py\n assert SpaceStorage.SMALL == "small"\n ```\n\n Taken from https://github.com/huggingface/moon-landing/blob/main/server/repo_types/SpaceHardwareFlavor.ts#L24 (private url).\n """\n\n SMALL = "small"\n MEDIUM = "medium"\n LARGE = "large"\n\n\n@dataclass\nclass SpaceRuntime:\n """\n Contains information about the current runtime of a Space.\n\n Args:\n stage (`str`):\n Current stage of the space. Example: RUNNING.\n hardware (`str` or `None`):\n Current hardware of the space. Example: "cpu-basic". Can be `None` if Space\n is `BUILDING` for the first time.\n requested_hardware (`str` or `None`):\n Requested hardware. Can be different than `hardware` especially if the request\n has just been made. Example: "t4-medium". Can be `None` if no hardware has\n been requested yet.\n sleep_time (`int` or `None`):\n Number of seconds the Space will be kept alive after the last request. By default (if value is `None`), the\n Space will never go to sleep if it's running on an upgraded hardware, while it will go to sleep after 48\n hours on a free 'cpu-basic' hardware. For more details, see https://huggingface.co/docs/hub/spaces-gpus#sleep-time.\n raw (`dict`):\n Raw response from the server. Contains more information about the Space\n runtime like number of replicas, number of cpu, memory size,...\n """\n\n stage: SpaceStage\n hardware: Optional[SpaceHardware]\n requested_hardware: Optional[SpaceHardware]\n sleep_time: Optional[int]\n storage: Optional[SpaceStorage]\n raw: Dict\n\n def __init__(self, data: Dict) -> None:\n self.stage = data["stage"]\n self.hardware = data.get("hardware", {}).get("current")\n self.requested_hardware = data.get("hardware", {}).get("requested")\n self.sleep_time = data.get("gcTimeout")\n self.storage = data.get("storage")\n self.raw = data\n\n\n@dataclass\nclass SpaceVariable:\n """\n Contains information about the current variables of a Space.\n\n Args:\n key (`str`):\n Variable key. Example: `"MODEL_REPO_ID"`\n value (`str`):\n Variable value. Example: `"the_model_repo_id"`.\n description (`str` or None):\n Description of the variable. Example: `"Model Repo ID of the implemented model"`.\n updatedAt (`datetime` or None):\n datetime of the last update of the variable (if the variable has been updated at least once).\n """\n\n key: str\n value: str\n description: Optional[str]\n updated_at: Optional[datetime]\n\n def __init__(self, key: str, values: Dict) -> None:\n self.key = key\n self.value = values["value"]\n self.description = values.get("description")\n updated_at = values.get("updatedAt")\n self.updated_at = parse_datetime(updated_at) if updated_at is not None else None\n
|
.venv\Lib\site-packages\huggingface_hub\_space_api.py
|
_space_api.py
|
Python
| 5,470 | 0.95 | 0.107143 | 0.128571 |
awesome-app
| 84 |
2024-01-23T08:53:38.825685
|
GPL-3.0
| false |
06b154334eb79a22400d245c0a516750
|
# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains a logger to push training logs to the Hub, using Tensorboard."""\n\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\nfrom ._commit_scheduler import CommitScheduler\nfrom .errors import EntryNotFoundError\nfrom .repocard import ModelCard\nfrom .utils import experimental\n\n\n# Depending on user's setup, SummaryWriter can come either from 'tensorboardX'\n# or from 'torch.utils.tensorboard'. Both are compatible so let's try to load\n# from either of them.\ntry:\n from tensorboardX import SummaryWriter\n\n is_summary_writer_available = True\n\nexcept ImportError:\n try:\n from torch.utils.tensorboard import SummaryWriter\n\n is_summary_writer_available = False\n except ImportError:\n # Dummy class to avoid failing at import. Will raise on instance creation.\n SummaryWriter = object\n is_summary_writer_available = False\n\nif TYPE_CHECKING:\n from tensorboardX import SummaryWriter\n\n\nclass HFSummaryWriter(SummaryWriter):\n """\n Wrapper around the tensorboard's `SummaryWriter` to push training logs to the Hub.\n\n Data is logged locally and then pushed to the Hub asynchronously. Pushing data to the Hub is done in a separate\n thread to avoid blocking the training script. In particular, if the upload fails for any reason (e.g. a connection\n issue), the main script will not be interrupted. Data is automatically pushed to the Hub every `commit_every`\n minutes (default to every 5 minutes).\n\n <Tip warning={true}>\n\n `HFSummaryWriter` is experimental. Its API is subject to change in the future without prior notice.\n\n </Tip>\n\n Args:\n repo_id (`str`):\n The id of the repo to which the logs will be pushed.\n logdir (`str`, *optional*):\n The directory where the logs will be written. If not specified, a local directory will be created by the\n underlying `SummaryWriter` object.\n commit_every (`int` or `float`, *optional*):\n The frequency (in minutes) at which the logs will be pushed to the Hub. Defaults to 5 minutes.\n squash_history (`bool`, *optional*):\n Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is\n useful to avoid degraded performances on the repo when it grows too large.\n repo_type (`str`, *optional*):\n The type of the repo to which the logs will be pushed. Defaults to "model".\n repo_revision (`str`, *optional*):\n The revision of the repo to which the logs will be pushed. Defaults to "main".\n repo_private (`bool`, *optional*):\n Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.\n path_in_repo (`str`, *optional*):\n The path to the folder in the repo where the logs will be pushed. Defaults to "tensorboard/".\n repo_allow_patterns (`List[str]` or `str`, *optional*):\n A list of patterns to include in the upload. Defaults to `"*.tfevents.*"`. Check out the\n [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.\n repo_ignore_patterns (`List[str]` or `str`, *optional*):\n A list of patterns to exclude in the upload. Check out the\n [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.\n token (`str`, *optional*):\n Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more\n details\n kwargs:\n Additional keyword arguments passed to `SummaryWriter`.\n\n Examples:\n ```diff\n # Taken from https://pytorch.org/docs/stable/tensorboard.html\n - from torch.utils.tensorboard import SummaryWriter\n + from huggingface_hub import HFSummaryWriter\n\n import numpy as np\n\n - writer = SummaryWriter()\n + writer = HFSummaryWriter(repo_id="username/my-trained-model")\n\n for n_iter in range(100):\n writer.add_scalar('Loss/train', np.random.random(), n_iter)\n writer.add_scalar('Loss/test', np.random.random(), n_iter)\n writer.add_scalar('Accuracy/train', np.random.random(), n_iter)\n writer.add_scalar('Accuracy/test', np.random.random(), n_iter)\n ```\n\n ```py\n >>> from huggingface_hub import HFSummaryWriter\n\n # Logs are automatically pushed every 15 minutes (5 by default) + when exiting the context manager\n >>> with HFSummaryWriter(repo_id="test_hf_logger", commit_every=15) as logger:\n ... logger.add_scalar("a", 1)\n ... logger.add_scalar("b", 2)\n ```\n """\n\n @experimental\n def __new__(cls, *args, **kwargs) -> "HFSummaryWriter":\n if not is_summary_writer_available:\n raise ImportError(\n "You must have `tensorboard` installed to use `HFSummaryWriter`. Please run `pip install --upgrade"\n " tensorboardX` first."\n )\n return super().__new__(cls)\n\n def __init__(\n self,\n repo_id: str,\n *,\n logdir: Optional[str] = None,\n commit_every: Union[int, float] = 5,\n squash_history: bool = False,\n repo_type: Optional[str] = None,\n repo_revision: Optional[str] = None,\n repo_private: Optional[bool] = None,\n path_in_repo: Optional[str] = "tensorboard",\n repo_allow_patterns: Optional[Union[List[str], str]] = "*.tfevents.*",\n repo_ignore_patterns: Optional[Union[List[str], str]] = None,\n token: Optional[str] = None,\n **kwargs,\n ):\n # Initialize SummaryWriter\n super().__init__(logdir=logdir, **kwargs)\n\n # Check logdir has been correctly initialized and fail early otherwise. In practice, SummaryWriter takes care of it.\n if not isinstance(self.logdir, str):\n raise ValueError(f"`self.logdir` must be a string. Got '{self.logdir}' of type {type(self.logdir)}.")\n\n # Append logdir name to `path_in_repo`\n if path_in_repo is None or path_in_repo == "":\n path_in_repo = Path(self.logdir).name\n else:\n path_in_repo = path_in_repo.strip("/") + "/" + Path(self.logdir).name\n\n # Initialize scheduler\n self.scheduler = CommitScheduler(\n folder_path=self.logdir,\n path_in_repo=path_in_repo,\n repo_id=repo_id,\n repo_type=repo_type,\n revision=repo_revision,\n private=repo_private,\n token=token,\n allow_patterns=repo_allow_patterns,\n ignore_patterns=repo_ignore_patterns,\n every=commit_every,\n squash_history=squash_history,\n )\n\n # Exposing some high-level info at root level\n self.repo_id = self.scheduler.repo_id\n self.repo_type = self.scheduler.repo_type\n self.repo_revision = self.scheduler.revision\n\n # Add `hf-summary-writer` tag to the model card metadata\n try:\n card = ModelCard.load(repo_id_or_path=self.repo_id, repo_type=self.repo_type)\n except EntryNotFoundError:\n card = ModelCard("")\n tags = card.data.get("tags", [])\n if "hf-summary-writer" not in tags:\n tags.append("hf-summary-writer")\n card.data["tags"] = tags\n card.push_to_hub(repo_id=self.repo_id, repo_type=self.repo_type)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n """Push to hub in a non-blocking way when exiting the logger's context manager."""\n super().__exit__(exc_type, exc_val, exc_tb)\n future = self.scheduler.trigger()\n future.result()\n
|
.venv\Lib\site-packages\huggingface_hub\_tensorboard_logger.py
|
_tensorboard_logger.py
|
Python
| 8,358 | 0.95 | 0.113402 | 0.163636 |
react-lib
| 988 |
2023-11-17T02:28:56.891145
|
MIT
| false |
2eaadd32a0131ef57d258a9d7273d03c
|
# coding=utf-8\n# Copyright 2024-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport enum\nimport logging\nimport os\nimport queue\nimport shutil\nimport sys\nimport threading\nimport time\nimport traceback\nfrom datetime import datetime\nfrom pathlib import Path\nfrom threading import Lock\nfrom typing import TYPE_CHECKING, List, Optional, Tuple, Union\nfrom urllib.parse import quote\n\nfrom . import constants\nfrom ._commit_api import CommitOperationAdd, UploadInfo, _fetch_upload_modes\nfrom ._local_folder import LocalUploadFileMetadata, LocalUploadFilePaths, get_local_upload_paths, read_upload_metadata\nfrom .constants import DEFAULT_REVISION, REPO_TYPES\nfrom .utils import DEFAULT_IGNORE_PATTERNS, filter_repo_objects, tqdm\nfrom .utils._cache_manager import _format_size\nfrom .utils.sha import sha_fileobj\n\n\nif TYPE_CHECKING:\n from .hf_api import HfApi\n\nlogger = logging.getLogger(__name__)\n\nWAITING_TIME_IF_NO_TASKS = 10 # seconds\nMAX_NB_FILES_FETCH_UPLOAD_MODE = 100\nCOMMIT_SIZE_SCALE: List[int] = [20, 50, 75, 100, 125, 200, 250, 400, 600, 1000]\n\n\ndef upload_large_folder_internal(\n api: "HfApi",\n repo_id: str,\n folder_path: Union[str, Path],\n *,\n repo_type: str, # Repo type is required!\n revision: Optional[str] = None,\n private: Optional[bool] = None,\n allow_patterns: Optional[Union[List[str], str]] = None,\n ignore_patterns: Optional[Union[List[str], str]] = None,\n num_workers: Optional[int] = None,\n print_report: bool = True,\n print_report_every: int = 60,\n):\n """Upload a large folder to the Hub in the most resilient way possible.\n\n See [`HfApi.upload_large_folder`] for the full documentation.\n """\n # 1. Check args and setup\n if repo_type is None:\n raise ValueError(\n "For large uploads, `repo_type` is explicitly required. Please set it to `model`, `dataset` or `space`."\n " If you are using the CLI, pass it as `--repo-type=model`."\n )\n if repo_type not in REPO_TYPES:\n raise ValueError(f"Invalid repo type, must be one of {REPO_TYPES}")\n if revision is None:\n revision = DEFAULT_REVISION\n\n folder_path = Path(folder_path).expanduser().resolve()\n if not folder_path.is_dir():\n raise ValueError(f"Provided path: '{folder_path}' is not a directory")\n\n if ignore_patterns is None:\n ignore_patterns = []\n elif isinstance(ignore_patterns, str):\n ignore_patterns = [ignore_patterns]\n ignore_patterns += DEFAULT_IGNORE_PATTERNS\n\n if num_workers is None:\n nb_cores = os.cpu_count() or 1\n num_workers = max(nb_cores - 2, 2) # Use all but 2 cores, or at least 2 cores\n\n # 2. Create repo if missing\n repo_url = api.create_repo(repo_id=repo_id, repo_type=repo_type, private=private, exist_ok=True)\n logger.info(f"Repo created: {repo_url}")\n repo_id = repo_url.repo_id\n\n # 3. List files to upload\n filtered_paths_list = filter_repo_objects(\n (path.relative_to(folder_path).as_posix() for path in folder_path.glob("**/*") if path.is_file()),\n allow_patterns=allow_patterns,\n ignore_patterns=ignore_patterns,\n )\n paths_list = [get_local_upload_paths(folder_path, relpath) for relpath in filtered_paths_list]\n logger.info(f"Found {len(paths_list)} candidate files to upload")\n\n # Read metadata for each file\n items = [\n (paths, read_upload_metadata(folder_path, paths.path_in_repo))\n for paths in tqdm(paths_list, desc="Recovering from metadata files")\n ]\n\n # 4. Start workers\n status = LargeUploadStatus(items)\n threads = [\n threading.Thread(\n target=_worker_job,\n kwargs={\n "status": status,\n "api": api,\n "repo_id": repo_id,\n "repo_type": repo_type,\n "revision": revision,\n },\n )\n for _ in range(num_workers)\n ]\n\n for thread in threads:\n thread.start()\n\n # 5. Print regular reports\n if print_report:\n print("\n\n" + status.current_report())\n last_report_ts = time.time()\n while True:\n time.sleep(1)\n if time.time() - last_report_ts >= print_report_every:\n if print_report:\n _print_overwrite(status.current_report())\n last_report_ts = time.time()\n if status.is_done():\n logging.info("Is done: exiting main loop")\n break\n\n for thread in threads:\n thread.join()\n\n logger.info(status.current_report())\n logging.info("Upload is complete!")\n\n\n####################\n# Logic to manage workers and synchronize tasks\n####################\n\n\nclass WorkerJob(enum.Enum):\n SHA256 = enum.auto()\n GET_UPLOAD_MODE = enum.auto()\n PREUPLOAD_LFS = enum.auto()\n COMMIT = enum.auto()\n WAIT = enum.auto() # if no tasks are available but we don't want to exit\n\n\nJOB_ITEM_T = Tuple[LocalUploadFilePaths, LocalUploadFileMetadata]\n\n\nclass LargeUploadStatus:\n """Contains information, queues and tasks for a large upload process."""\n\n def __init__(self, items: List[JOB_ITEM_T]):\n self.items = items\n self.queue_sha256: "queue.Queue[JOB_ITEM_T]" = queue.Queue()\n self.queue_get_upload_mode: "queue.Queue[JOB_ITEM_T]" = queue.Queue()\n self.queue_preupload_lfs: "queue.Queue[JOB_ITEM_T]" = queue.Queue()\n self.queue_commit: "queue.Queue[JOB_ITEM_T]" = queue.Queue()\n self.lock = Lock()\n\n self.nb_workers_sha256: int = 0\n self.nb_workers_get_upload_mode: int = 0\n self.nb_workers_preupload_lfs: int = 0\n self.nb_workers_commit: int = 0\n self.nb_workers_waiting: int = 0\n self.last_commit_attempt: Optional[float] = None\n\n self._started_at = datetime.now()\n self._chunk_idx: int = 1\n self._chunk_lock: Lock = Lock()\n\n # Setup queues\n for item in self.items:\n paths, metadata = item\n if metadata.sha256 is None:\n self.queue_sha256.put(item)\n elif metadata.upload_mode is None:\n self.queue_get_upload_mode.put(item)\n elif metadata.upload_mode == "lfs" and not metadata.is_uploaded:\n self.queue_preupload_lfs.put(item)\n elif not metadata.is_committed:\n self.queue_commit.put(item)\n else:\n logger.debug(f"Skipping file {paths.path_in_repo} (already uploaded and committed)")\n\n def target_chunk(self) -> int:\n with self._chunk_lock:\n return COMMIT_SIZE_SCALE[self._chunk_idx]\n\n def update_chunk(self, success: bool, nb_items: int, duration: float) -> None:\n with self._chunk_lock:\n if not success:\n logger.warning(f"Failed to commit {nb_items} files at once. Will retry with less files in next batch.")\n self._chunk_idx -= 1\n elif nb_items >= COMMIT_SIZE_SCALE[self._chunk_idx] and duration < 40:\n logger.info(f"Successfully committed {nb_items} at once. Increasing the limit for next batch.")\n self._chunk_idx += 1\n\n self._chunk_idx = max(0, min(self._chunk_idx, len(COMMIT_SIZE_SCALE) - 1))\n\n def current_report(self) -> str:\n """Generate a report of the current status of the large upload."""\n nb_hashed = 0\n size_hashed = 0\n nb_preuploaded = 0\n nb_lfs = 0\n nb_lfs_unsure = 0\n size_preuploaded = 0\n nb_committed = 0\n size_committed = 0\n total_size = 0\n ignored_files = 0\n total_files = 0\n\n with self.lock:\n for _, metadata in self.items:\n if metadata.should_ignore:\n ignored_files += 1\n continue\n total_size += metadata.size\n total_files += 1\n if metadata.sha256 is not None:\n nb_hashed += 1\n size_hashed += metadata.size\n if metadata.upload_mode == "lfs":\n nb_lfs += 1\n if metadata.upload_mode is None:\n nb_lfs_unsure += 1\n if metadata.is_uploaded:\n nb_preuploaded += 1\n size_preuploaded += metadata.size\n if metadata.is_committed:\n nb_committed += 1\n size_committed += metadata.size\n total_size_str = _format_size(total_size)\n\n now = datetime.now()\n now_str = now.strftime("%Y-%m-%d %H:%M:%S")\n elapsed = now - self._started_at\n elapsed_str = str(elapsed).split(".")[0] # remove milliseconds\n\n message = "\n" + "-" * 10\n message += f" {now_str} ({elapsed_str}) "\n message += "-" * 10 + "\n"\n\n message += "Files: "\n message += f"hashed {nb_hashed}/{total_files} ({_format_size(size_hashed)}/{total_size_str}) | "\n message += f"pre-uploaded: {nb_preuploaded}/{nb_lfs} ({_format_size(size_preuploaded)}/{total_size_str})"\n if nb_lfs_unsure > 0:\n message += f" (+{nb_lfs_unsure} unsure)"\n message += f" | committed: {nb_committed}/{total_files} ({_format_size(size_committed)}/{total_size_str})"\n message += f" | ignored: {ignored_files}\n"\n\n message += "Workers: "\n message += f"hashing: {self.nb_workers_sha256} | "\n message += f"get upload mode: {self.nb_workers_get_upload_mode} | "\n message += f"pre-uploading: {self.nb_workers_preupload_lfs} | "\n message += f"committing: {self.nb_workers_commit} | "\n message += f"waiting: {self.nb_workers_waiting}\n"\n message += "-" * 51\n\n return message\n\n def is_done(self) -> bool:\n with self.lock:\n return all(metadata.is_committed or metadata.should_ignore for _, metadata in self.items)\n\n\ndef _worker_job(\n status: LargeUploadStatus,\n api: "HfApi",\n repo_id: str,\n repo_type: str,\n revision: str,\n):\n """\n Main process for a worker. The worker will perform tasks based on the priority list until all files are uploaded\n and committed. If no tasks are available, the worker will wait for 10 seconds before checking again.\n\n If a task fails for any reason, the item(s) are put back in the queue for another worker to pick up.\n\n Read `upload_large_folder` docstring for more information on how tasks are prioritized.\n """\n while True:\n next_job: Optional[Tuple[WorkerJob, List[JOB_ITEM_T]]] = None\n\n # Determine next task\n next_job = _determine_next_job(status)\n if next_job is None:\n return\n job, items = next_job\n\n # Perform task\n if job == WorkerJob.SHA256:\n item = items[0] # single item\n try:\n _compute_sha256(item)\n status.queue_get_upload_mode.put(item)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n logger.error(f"Failed to compute sha256: {e}")\n traceback.format_exc()\n status.queue_sha256.put(item)\n\n with status.lock:\n status.nb_workers_sha256 -= 1\n\n elif job == WorkerJob.GET_UPLOAD_MODE:\n try:\n _get_upload_mode(items, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n logger.error(f"Failed to get upload mode: {e}")\n traceback.format_exc()\n\n # Items are either:\n # - dropped (if should_ignore)\n # - put in LFS queue (if LFS)\n # - put in commit queue (if regular)\n # - or put back (if error occurred).\n for item in items:\n _, metadata = item\n if metadata.should_ignore:\n continue\n if metadata.upload_mode == "lfs":\n status.queue_preupload_lfs.put(item)\n elif metadata.upload_mode == "regular":\n status.queue_commit.put(item)\n else:\n status.queue_get_upload_mode.put(item)\n\n with status.lock:\n status.nb_workers_get_upload_mode -= 1\n\n elif job == WorkerJob.PREUPLOAD_LFS:\n item = items[0] # single item\n try:\n _preupload_lfs(item, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision)\n status.queue_commit.put(item)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n logger.error(f"Failed to preupload LFS: {e}")\n traceback.format_exc()\n status.queue_preupload_lfs.put(item)\n\n with status.lock:\n status.nb_workers_preupload_lfs -= 1\n\n elif job == WorkerJob.COMMIT:\n start_ts = time.time()\n success = True\n try:\n _commit(items, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n logger.error(f"Failed to commit: {e}")\n traceback.format_exc()\n for item in items:\n status.queue_commit.put(item)\n success = False\n duration = time.time() - start_ts\n status.update_chunk(success, len(items), duration)\n with status.lock:\n status.last_commit_attempt = time.time()\n status.nb_workers_commit -= 1\n\n elif job == WorkerJob.WAIT:\n time.sleep(WAITING_TIME_IF_NO_TASKS)\n with status.lock:\n status.nb_workers_waiting -= 1\n\n\ndef _determine_next_job(status: LargeUploadStatus) -> Optional[Tuple[WorkerJob, List[JOB_ITEM_T]]]:\n with status.lock:\n # 1. Commit if more than 5 minutes since last commit attempt (and at least 1 file)\n if (\n status.nb_workers_commit == 0\n and status.queue_commit.qsize() > 0\n and status.last_commit_attempt is not None\n and time.time() - status.last_commit_attempt > 5 * 60\n ):\n status.nb_workers_commit += 1\n logger.debug("Job: commit (more than 5 minutes since last commit attempt)")\n return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))\n\n # 2. Commit if at least 100 files are ready to commit\n elif status.nb_workers_commit == 0 and status.queue_commit.qsize() >= 150:\n status.nb_workers_commit += 1\n logger.debug("Job: commit (>100 files ready)")\n return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))\n\n # 3. Get upload mode if at least 100 files\n elif status.queue_get_upload_mode.qsize() >= MAX_NB_FILES_FETCH_UPLOAD_MODE:\n status.nb_workers_get_upload_mode += 1\n logger.debug(f"Job: get upload mode (>{MAX_NB_FILES_FETCH_UPLOAD_MODE} files ready)")\n return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, MAX_NB_FILES_FETCH_UPLOAD_MODE))\n\n # 4. Preupload LFS file if at least 1 file and no worker is preuploading LFS\n elif status.queue_preupload_lfs.qsize() > 0 and status.nb_workers_preupload_lfs == 0:\n status.nb_workers_preupload_lfs += 1\n logger.debug("Job: preupload LFS (no other worker preuploading LFS)")\n return (WorkerJob.PREUPLOAD_LFS, _get_one(status.queue_preupload_lfs))\n\n # 5. Compute sha256 if at least 1 file and no worker is computing sha256\n elif status.queue_sha256.qsize() > 0 and status.nb_workers_sha256 == 0:\n status.nb_workers_sha256 += 1\n logger.debug("Job: sha256 (no other worker computing sha256)")\n return (WorkerJob.SHA256, _get_one(status.queue_sha256))\n\n # 6. Get upload mode if at least 1 file and no worker is getting upload mode\n elif status.queue_get_upload_mode.qsize() > 0 and status.nb_workers_get_upload_mode == 0:\n status.nb_workers_get_upload_mode += 1\n logger.debug("Job: get upload mode (no other worker getting upload mode)")\n return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, MAX_NB_FILES_FETCH_UPLOAD_MODE))\n\n # 7. Preupload LFS file if at least 1 file\n # Skip if hf_transfer is enabled and there is already a worker preuploading LFS\n elif status.queue_preupload_lfs.qsize() > 0 and (\n status.nb_workers_preupload_lfs == 0 or not constants.HF_HUB_ENABLE_HF_TRANSFER\n ):\n status.nb_workers_preupload_lfs += 1\n logger.debug("Job: preupload LFS")\n return (WorkerJob.PREUPLOAD_LFS, _get_one(status.queue_preupload_lfs))\n\n # 8. Compute sha256 if at least 1 file\n elif status.queue_sha256.qsize() > 0:\n status.nb_workers_sha256 += 1\n logger.debug("Job: sha256")\n return (WorkerJob.SHA256, _get_one(status.queue_sha256))\n\n # 9. Get upload mode if at least 1 file\n elif status.queue_get_upload_mode.qsize() > 0:\n status.nb_workers_get_upload_mode += 1\n logger.debug("Job: get upload mode")\n return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, MAX_NB_FILES_FETCH_UPLOAD_MODE))\n\n # 10. Commit if at least 1 file and 1 min since last commit attempt\n elif (\n status.nb_workers_commit == 0\n and status.queue_commit.qsize() > 0\n and status.last_commit_attempt is not None\n and time.time() - status.last_commit_attempt > 1 * 60\n ):\n status.nb_workers_commit += 1\n logger.debug("Job: commit (1 min since last commit attempt)")\n return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))\n\n # 11. Commit if at least 1 file all other queues are empty and all workers are waiting\n # e.g. when it's the last commit\n elif (\n status.nb_workers_commit == 0\n and status.queue_commit.qsize() > 0\n and status.queue_sha256.qsize() == 0\n and status.queue_get_upload_mode.qsize() == 0\n and status.queue_preupload_lfs.qsize() == 0\n and status.nb_workers_sha256 == 0\n and status.nb_workers_get_upload_mode == 0\n and status.nb_workers_preupload_lfs == 0\n ):\n status.nb_workers_commit += 1\n logger.debug("Job: commit")\n return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))\n\n # 12. If all queues are empty, exit\n elif all(metadata.is_committed or metadata.should_ignore for _, metadata in status.items):\n logger.info("All files have been processed! Exiting worker.")\n return None\n\n # 13. If no task is available, wait\n else:\n status.nb_workers_waiting += 1\n logger.debug(f"No task available, waiting... ({WAITING_TIME_IF_NO_TASKS}s)")\n return (WorkerJob.WAIT, [])\n\n\n####################\n# Atomic jobs (sha256, get_upload_mode, preupload_lfs, commit)\n####################\n\n\ndef _compute_sha256(item: JOB_ITEM_T) -> None:\n """Compute sha256 of a file and save it in metadata."""\n paths, metadata = item\n if metadata.sha256 is None:\n with paths.file_path.open("rb") as f:\n metadata.sha256 = sha_fileobj(f).hex()\n metadata.save(paths)\n\n\ndef _get_upload_mode(items: List[JOB_ITEM_T], api: "HfApi", repo_id: str, repo_type: str, revision: str) -> None:\n """Get upload mode for each file and update metadata.\n\n Also receive info if the file should be ignored.\n """\n additions = [_build_hacky_operation(item) for item in items]\n _fetch_upload_modes(\n additions=additions,\n repo_type=repo_type,\n repo_id=repo_id,\n headers=api._build_hf_headers(),\n revision=quote(revision, safe=""),\n endpoint=api.endpoint,\n )\n for item, addition in zip(items, additions):\n paths, metadata = item\n metadata.upload_mode = addition._upload_mode\n metadata.should_ignore = addition._should_ignore\n metadata.remote_oid = addition._remote_oid\n metadata.save(paths)\n\n\ndef _preupload_lfs(item: JOB_ITEM_T, api: "HfApi", repo_id: str, repo_type: str, revision: str) -> None:\n """Preupload LFS file and update metadata."""\n paths, metadata = item\n addition = _build_hacky_operation(item)\n api.preupload_lfs_files(\n repo_id=repo_id,\n repo_type=repo_type,\n revision=revision,\n additions=[addition],\n )\n\n metadata.is_uploaded = True\n metadata.save(paths)\n\n\ndef _commit(items: List[JOB_ITEM_T], api: "HfApi", repo_id: str, repo_type: str, revision: str) -> None:\n """Commit files to the repo."""\n additions = [_build_hacky_operation(item) for item in items]\n api.create_commit(\n repo_id=repo_id,\n repo_type=repo_type,\n revision=revision,\n operations=additions,\n commit_message="Add files using upload-large-folder tool",\n )\n for paths, metadata in items:\n metadata.is_committed = True\n metadata.save(paths)\n\n\n####################\n# Hacks with CommitOperationAdd to bypass checks/sha256 calculation\n####################\n\n\nclass HackyCommitOperationAdd(CommitOperationAdd):\n def __post_init__(self) -> None:\n if isinstance(self.path_or_fileobj, Path):\n self.path_or_fileobj = str(self.path_or_fileobj)\n\n\ndef _build_hacky_operation(item: JOB_ITEM_T) -> HackyCommitOperationAdd:\n paths, metadata = item\n operation = HackyCommitOperationAdd(path_in_repo=paths.path_in_repo, path_or_fileobj=paths.file_path)\n with paths.file_path.open("rb") as file:\n sample = file.peek(512)[:512]\n if metadata.sha256 is None:\n raise ValueError("sha256 must have been computed by now!")\n operation.upload_info = UploadInfo(sha256=bytes.fromhex(metadata.sha256), size=metadata.size, sample=sample)\n operation._upload_mode = metadata.upload_mode # type: ignore[assignment]\n operation._should_ignore = metadata.should_ignore\n operation._remote_oid = metadata.remote_oid\n return operation\n\n\n####################\n# Misc helpers\n####################\n\n\ndef _get_one(queue: "queue.Queue[JOB_ITEM_T]") -> List[JOB_ITEM_T]:\n return [queue.get()]\n\n\ndef _get_n(queue: "queue.Queue[JOB_ITEM_T]", n: int) -> List[JOB_ITEM_T]:\n return [queue.get() for _ in range(min(queue.qsize(), n))]\n\n\ndef _print_overwrite(report: str) -> None:\n """Print a report, overwriting the previous lines.\n\n Since tqdm in using `sys.stderr` to (re-)write progress bars, we need to use `sys.stdout`\n to print the report.\n\n Note: works well only if no other process is writing to `sys.stdout`!\n """\n report += "\n"\n # Get terminal width\n terminal_width = shutil.get_terminal_size().columns\n\n # Count number of lines that should be cleared\n nb_lines = sum(len(line) // terminal_width + 1 for line in report.splitlines())\n\n # Clear previous lines based on the number of lines in the report\n for _ in range(nb_lines):\n sys.stdout.write("\r\033[K") # Clear line\n sys.stdout.write("\033[F") # Move cursor up one line\n\n # Print the new report, filling remaining space with whitespace\n sys.stdout.write(report)\n sys.stdout.write(" " * (terminal_width - len(report.splitlines()[-1])))\n sys.stdout.flush()\n
|
.venv\Lib\site-packages\huggingface_hub\_upload_large_folder.py
|
_upload_large_folder.py
|
Python
| 24,178 | 0.95 | 0.168 | 0.114504 |
python-kit
| 516 |
2023-09-18T16:37:12.378062
|
BSD-3-Clause
| false |
de95b7d4d6b8590d3b90d98e8d260298
|
# coding=utf-8\n# Copyright 2023-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains data structures to parse the webhooks payload."""\n\nfrom typing import List, Literal, Optional\n\nfrom .utils import is_pydantic_available\n\n\nif is_pydantic_available():\n from pydantic import BaseModel\nelse:\n # Define a dummy BaseModel to avoid import errors when pydantic is not installed\n # Import error will be raised when trying to use the class\n\n class BaseModel: # type: ignore [no-redef]\n def __init__(self, *args, **kwargs) -> None:\n raise ImportError(\n "You must have `pydantic` installed to use `WebhookPayload`. This is an optional dependency that"\n " should be installed separately. Please run `pip install --upgrade pydantic` and retry."\n )\n\n\n# This is an adaptation of the ReportV3 interface implemented in moon-landing. V0, V1 and V2 have been ignored as they\n# are not in used anymore. To keep in sync when format is updated in\n# https://github.com/huggingface/moon-landing/blob/main/server/lib/HFWebhooks.ts (internal link).\n\n\nWebhookEvent_T = Literal[\n "create",\n "delete",\n "move",\n "update",\n]\nRepoChangeEvent_T = Literal[\n "add",\n "move",\n "remove",\n "update",\n]\nRepoType_T = Literal[\n "dataset",\n "model",\n "space",\n]\nDiscussionStatus_T = Literal[\n "closed",\n "draft",\n "open",\n "merged",\n]\nSupportedWebhookVersion = Literal[3]\n\n\nclass ObjectId(BaseModel):\n id: str\n\n\nclass WebhookPayloadUrl(BaseModel):\n web: str\n api: Optional[str] = None\n\n\nclass WebhookPayloadMovedTo(BaseModel):\n name: str\n owner: ObjectId\n\n\nclass WebhookPayloadWebhook(ObjectId):\n version: SupportedWebhookVersion\n\n\nclass WebhookPayloadEvent(BaseModel):\n action: WebhookEvent_T\n scope: str\n\n\nclass WebhookPayloadDiscussionChanges(BaseModel):\n base: str\n mergeCommitId: Optional[str] = None\n\n\nclass WebhookPayloadComment(ObjectId):\n author: ObjectId\n hidden: bool\n content: Optional[str] = None\n url: WebhookPayloadUrl\n\n\nclass WebhookPayloadDiscussion(ObjectId):\n num: int\n author: ObjectId\n url: WebhookPayloadUrl\n title: str\n isPullRequest: bool\n status: DiscussionStatus_T\n changes: Optional[WebhookPayloadDiscussionChanges] = None\n pinned: Optional[bool] = None\n\n\nclass WebhookPayloadRepo(ObjectId):\n owner: ObjectId\n head_sha: Optional[str] = None\n name: str\n private: bool\n subdomain: Optional[str] = None\n tags: Optional[List[str]] = None\n type: Literal["dataset", "model", "space"]\n url: WebhookPayloadUrl\n\n\nclass WebhookPayloadUpdatedRef(BaseModel):\n ref: str\n oldSha: Optional[str] = None\n newSha: Optional[str] = None\n\n\nclass WebhookPayload(BaseModel):\n event: WebhookPayloadEvent\n repo: WebhookPayloadRepo\n discussion: Optional[WebhookPayloadDiscussion] = None\n comment: Optional[WebhookPayloadComment] = None\n webhook: WebhookPayloadWebhook\n movedTo: Optional[WebhookPayloadMovedTo] = None\n updatedRefs: Optional[List[WebhookPayloadUpdatedRef]] = None\n
|
.venv\Lib\site-packages\huggingface_hub\_webhooks_payload.py
|
_webhooks_payload.py
|
Python
| 3,617 | 0.95 | 0.116788 | 0.179245 |
vue-tools
| 648 |
2023-08-01T19:54:07.101228
|
GPL-3.0
| false |
0f10231bb3b5d54dde95aa15c24def3f
|
# coding=utf-8\n# Copyright 2023-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains `WebhooksServer` and `webhook_endpoint` to create a webhook server easily."""\n\nimport atexit\nimport inspect\nimport os\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Optional\n\nfrom .utils import experimental, is_fastapi_available, is_gradio_available\n\n\nif TYPE_CHECKING:\n import gradio as gr\n from fastapi import Request\n\nif is_fastapi_available():\n from fastapi import FastAPI, Request\n from fastapi.responses import JSONResponse\nelse:\n # Will fail at runtime if FastAPI is not available\n FastAPI = Request = JSONResponse = None # type: ignore [misc, assignment]\n\n\n_global_app: Optional["WebhooksServer"] = None\n_is_local = os.environ.get("SPACE_ID") is None\n\n\n@experimental\nclass WebhooksServer:\n """\n The [`WebhooksServer`] class lets you create an instance of a Gradio app that can receive Huggingface webhooks.\n These webhooks can be registered using the [`~WebhooksServer.add_webhook`] decorator. Webhook endpoints are added to\n the app as a POST endpoint to the FastAPI router. Once all the webhooks are registered, the `launch` method has to be\n called to start the app.\n\n It is recommended to accept [`WebhookPayload`] as the first argument of the webhook function. It is a Pydantic\n model that contains all the information about the webhook event. The data will be parsed automatically for you.\n\n Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your\n WebhooksServer and deploy it on a Space.\n\n <Tip warning={true}>\n\n `WebhooksServer` is experimental. Its API is subject to change in the future.\n\n </Tip>\n\n <Tip warning={true}>\n\n You must have `gradio` installed to use `WebhooksServer` (`pip install --upgrade gradio`).\n\n </Tip>\n\n Args:\n ui (`gradio.Blocks`, optional):\n A Gradio UI instance to be used as the Space landing page. If `None`, a UI displaying instructions\n about the configured webhooks is created.\n webhook_secret (`str`, optional):\n A secret key to verify incoming webhook requests. You can set this value to any secret you want as long as\n you also configure it in your [webhooks settings panel](https://huggingface.co/settings/webhooks). You\n can also set this value as the `WEBHOOK_SECRET` environment variable. If no secret is provided, the\n webhook endpoints are opened without any security.\n\n Example:\n\n ```python\n import gradio as gr\n from huggingface_hub import WebhooksServer, WebhookPayload\n\n with gr.Blocks() as ui:\n ...\n\n app = WebhooksServer(ui=ui, webhook_secret="my_secret_key")\n\n @app.add_webhook("/say_hello")\n async def hello(payload: WebhookPayload):\n return {"message": "hello"}\n\n app.launch()\n ```\n """\n\n def __new__(cls, *args, **kwargs) -> "WebhooksServer":\n if not is_gradio_available():\n raise ImportError(\n "You must have `gradio` installed to use `WebhooksServer`. Please run `pip install --upgrade gradio`"\n " first."\n )\n if not is_fastapi_available():\n raise ImportError(\n "You must have `fastapi` installed to use `WebhooksServer`. Please run `pip install --upgrade fastapi`"\n " first."\n )\n return super().__new__(cls)\n\n def __init__(\n self,\n ui: Optional["gr.Blocks"] = None,\n webhook_secret: Optional[str] = None,\n ) -> None:\n self._ui = ui\n\n self.webhook_secret = webhook_secret or os.getenv("WEBHOOK_SECRET")\n self.registered_webhooks: Dict[str, Callable] = {}\n _warn_on_empty_secret(self.webhook_secret)\n\n def add_webhook(self, path: Optional[str] = None) -> Callable:\n """\n Decorator to add a webhook to the [`WebhooksServer`] server.\n\n Args:\n path (`str`, optional):\n The URL path to register the webhook function. If not provided, the function name will be used as the\n path. In any case, all webhooks are registered under `/webhooks`.\n\n Raises:\n ValueError: If the provided path is already registered as a webhook.\n\n Example:\n ```python\n from huggingface_hub import WebhooksServer, WebhookPayload\n\n app = WebhooksServer()\n\n @app.add_webhook\n async def trigger_training(payload: WebhookPayload):\n if payload.repo.type == "dataset" and payload.event.action == "update":\n # Trigger a training job if a dataset is updated\n ...\n\n app.launch()\n ```\n """\n # Usage: directly as decorator. Example: `@app.add_webhook`\n if callable(path):\n # If path is a function, it means it was used as a decorator without arguments\n return self.add_webhook()(path)\n\n # Usage: provide a path. Example: `@app.add_webhook(...)`\n @wraps(FastAPI.post)\n def _inner_post(*args, **kwargs):\n func = args[0]\n abs_path = f"/webhooks/{(path or func.__name__).strip('/')}"\n if abs_path in self.registered_webhooks:\n raise ValueError(f"Webhook {abs_path} already exists.")\n self.registered_webhooks[abs_path] = func\n\n return _inner_post\n\n def launch(self, prevent_thread_lock: bool = False, **launch_kwargs: Any) -> None:\n """Launch the Gradio app and register webhooks to the underlying FastAPI server.\n\n Input parameters are forwarded to Gradio when launching the app.\n """\n ui = self._ui or self._get_default_ui()\n\n # Start Gradio App\n # - as non-blocking so that webhooks can be added afterwards\n # - as shared if launch locally (to debug webhooks)\n launch_kwargs.setdefault("share", _is_local)\n self.fastapi_app, _, _ = ui.launch(prevent_thread_lock=True, **launch_kwargs)\n\n # Register webhooks to FastAPI app\n for path, func in self.registered_webhooks.items():\n # Add secret check if required\n if self.webhook_secret is not None:\n func = _wrap_webhook_to_check_secret(func, webhook_secret=self.webhook_secret)\n\n # Add route to FastAPI app\n self.fastapi_app.post(path)(func)\n\n # Print instructions and block main thread\n space_host = os.environ.get("SPACE_HOST")\n url = "https://" + space_host if space_host is not None else (ui.share_url or ui.local_url)\n if url is None:\n raise ValueError("Cannot find the URL of the app. Please provide a valid `ui` or update `gradio` version.")\n url = url.strip("/")\n message = "\nWebhooks are correctly setup and ready to use:"\n message += "\n" + "\n".join(f" - POST {url}{webhook}" for webhook in self.registered_webhooks)\n message += "\nGo to https://huggingface.co/settings/webhooks to setup your webhooks."\n print(message)\n\n if not prevent_thread_lock:\n ui.block_thread()\n\n def _get_default_ui(self) -> "gr.Blocks":\n """Default UI if not provided (lists webhooks and provides basic instructions)."""\n import gradio as gr\n\n with gr.Blocks() as ui:\n gr.Markdown("# This is an app to process 🤗 Webhooks")\n gr.Markdown(\n "Webhooks are a foundation for MLOps-related features. They allow you to listen for new changes on"\n " specific repos or to all repos belonging to particular set of users/organizations (not just your"\n " repos, but any repo). Check out this [guide](https://huggingface.co/docs/hub/webhooks) to get to"\n " know more about webhooks on the Huggingface Hub."\n )\n gr.Markdown(\n f"{len(self.registered_webhooks)} webhook(s) are registered:"\n + "\n\n"\n + "\n ".join(\n f"- [{webhook_path}]({_get_webhook_doc_url(webhook.__name__, webhook_path)})"\n for webhook_path, webhook in self.registered_webhooks.items()\n )\n )\n gr.Markdown(\n "Go to https://huggingface.co/settings/webhooks to setup your webhooks."\n + "\nYou app is running locally. Please look at the logs to check the full URL you need to set."\n if _is_local\n else (\n "\nThis app is running on a Space. You can find the corresponding URL in the options menu"\n " (top-right) > 'Embed the Space'. The URL looks like 'https://{username}-{repo_name}.hf.space'."\n )\n )\n return ui\n\n\n@experimental\ndef webhook_endpoint(path: Optional[str] = None) -> Callable:\n """Decorator to start a [`WebhooksServer`] and register the decorated function as a webhook endpoint.\n\n This is a helper to get started quickly. If you need more flexibility (custom landing page or webhook secret),\n you can use [`WebhooksServer`] directly. You can register multiple webhook endpoints (to the same server) by using\n this decorator multiple times.\n\n Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your\n server and deploy it on a Space.\n\n <Tip warning={true}>\n\n `webhook_endpoint` is experimental. Its API is subject to change in the future.\n\n </Tip>\n\n <Tip warning={true}>\n\n You must have `gradio` installed to use `webhook_endpoint` (`pip install --upgrade gradio`).\n\n </Tip>\n\n Args:\n path (`str`, optional):\n The URL path to register the webhook function. If not provided, the function name will be used as the path.\n In any case, all webhooks are registered under `/webhooks`.\n\n Examples:\n The default usage is to register a function as a webhook endpoint. The function name will be used as the path.\n The server will be started automatically at exit (i.e. at the end of the script).\n\n ```python\n from huggingface_hub import webhook_endpoint, WebhookPayload\n\n @webhook_endpoint\n async def trigger_training(payload: WebhookPayload):\n if payload.repo.type == "dataset" and payload.event.action == "update":\n # Trigger a training job if a dataset is updated\n ...\n\n # Server is automatically started at the end of the script.\n ```\n\n Advanced usage: register a function as a webhook endpoint and start the server manually. This is useful if you\n are running it in a notebook.\n\n ```python\n from huggingface_hub import webhook_endpoint, WebhookPayload\n\n @webhook_endpoint\n async def trigger_training(payload: WebhookPayload):\n if payload.repo.type == "dataset" and payload.event.action == "update":\n # Trigger a training job if a dataset is updated\n ...\n\n # Start the server manually\n trigger_training.launch()\n ```\n """\n if callable(path):\n # If path is a function, it means it was used as a decorator without arguments\n return webhook_endpoint()(path)\n\n @wraps(WebhooksServer.add_webhook)\n def _inner(func: Callable) -> Callable:\n app = _get_global_app()\n app.add_webhook(path)(func)\n if len(app.registered_webhooks) == 1:\n # Register `app.launch` to run at exit (only once)\n atexit.register(app.launch)\n\n @wraps(app.launch)\n def _launch_now():\n # Run the app directly (without waiting atexit)\n atexit.unregister(app.launch)\n app.launch()\n\n func.launch = _launch_now # type: ignore\n return func\n\n return _inner\n\n\ndef _get_global_app() -> WebhooksServer:\n global _global_app\n if _global_app is None:\n _global_app = WebhooksServer()\n return _global_app\n\n\ndef _warn_on_empty_secret(webhook_secret: Optional[str]) -> None:\n if webhook_secret is None:\n print("Webhook secret is not defined. This means your webhook endpoints will be open to everyone.")\n print(\n "To add a secret, set `WEBHOOK_SECRET` as environment variable or pass it at initialization: "\n "\n\t`app = WebhooksServer(webhook_secret='my_secret', ...)`"\n )\n print(\n "For more details about webhook secrets, please refer to"\n " https://huggingface.co/docs/hub/webhooks#webhook-secret."\n )\n else:\n print("Webhook secret is correctly defined.")\n\n\ndef _get_webhook_doc_url(webhook_name: str, webhook_path: str) -> str:\n """Returns the anchor to a given webhook in the docs (experimental)"""\n return "/docs#/default/" + webhook_name + webhook_path.replace("/", "_") + "_post"\n\n\ndef _wrap_webhook_to_check_secret(func: Callable, webhook_secret: str) -> Callable:\n """Wraps a webhook function to check the webhook secret before calling the function.\n\n This is a hacky way to add the `request` parameter to the function signature. Since FastAPI based itself on route\n parameters to inject the values to the function, we need to hack the function signature to retrieve the `Request`\n object (and hence the headers). A far cleaner solution would be to use a middleware. However, since\n `fastapi==0.90.1`, a middleware cannot be added once the app has started. And since the FastAPI app is started by\n Gradio internals (and not by us), we cannot add a middleware.\n\n This method is called only when a secret has been defined by the user. If a request is sent without the\n "x-webhook-secret", the function will return a 401 error (unauthorized). If the header is sent but is incorrect,\n the function will return a 403 error (forbidden).\n\n Inspired by https://stackoverflow.com/a/33112180.\n """\n initial_sig = inspect.signature(func)\n\n @wraps(func)\n async def _protected_func(request: Request, **kwargs):\n request_secret = request.headers.get("x-webhook-secret")\n if request_secret is None:\n return JSONResponse({"error": "x-webhook-secret header not set."}, status_code=401)\n if request_secret != webhook_secret:\n return JSONResponse({"error": "Invalid webhook secret."}, status_code=403)\n\n # Inject `request` in kwargs if required\n if "request" in initial_sig.parameters:\n kwargs["request"] = request\n\n # Handle both sync and async routes\n if inspect.iscoroutinefunction(func):\n return await func(**kwargs)\n else:\n return func(**kwargs)\n\n # Update signature to include request\n if "request" not in initial_sig.parameters:\n _protected_func.__signature__ = initial_sig.replace( # type: ignore\n parameters=(\n inspect.Parameter(name="request", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=Request),\n )\n + tuple(initial_sig.parameters.values())\n )\n\n # Return protected route\n return _protected_func\n
|
.venv\Lib\site-packages\huggingface_hub\_webhooks_server.py
|
_webhooks_server.py
|
Python
| 15,767 | 0.95 | 0.203608 | 0.121711 |
react-lib
| 765 |
2024-05-14T15:59:00.299547
|
BSD-3-Clause
| false |
e5cc78e63d25abd61eb0de2647094d99
|
# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# ***********\n# `huggingface_hub` init has 2 modes:\n# - Normal usage:\n# If imported to use it, all modules and functions are lazy-loaded. This means\n# they exist at top level in module but are imported only the first time they are\n# used. This way, `from huggingface_hub import something` will import `something`\n# quickly without the hassle of importing all the features from `huggingface_hub`.\n# - Static check:\n# If statically analyzed, all modules and functions are loaded normally. This way\n# static typing check works properly as well as autocomplete in text editors and\n# IDEs.\n#\n# The static model imports are done inside the `if TYPE_CHECKING:` statement at\n# the bottom of this file. Since module/functions imports are duplicated, it is\n# mandatory to make sure to add them twice when adding one. This is checked in the\n# `make quality` command.\n#\n# To update the static imports, please run the following command and commit the changes.\n# ```\n# # Use script\n# python utils/check_static_imports.py --update-file\n#\n# # Or run style on codebase\n# make style\n# ```\n#\n# ***********\n# Lazy loader vendored from https://github.com/scientific-python/lazy_loader\nimport importlib\nimport os\nimport sys\nfrom typing import TYPE_CHECKING\n\n\n__version__ = "0.33.2"\n\n# Alphabetical order of definitions is ensured in tests\n# WARNING: any comment added in this dictionary definition will be lost when\n# re-generating the file !\n_SUBMOD_ATTRS = {\n "_commit_scheduler": [\n "CommitScheduler",\n ],\n "_inference_endpoints": [\n "InferenceEndpoint",\n "InferenceEndpointError",\n "InferenceEndpointStatus",\n "InferenceEndpointTimeoutError",\n "InferenceEndpointType",\n ],\n "_login": [\n "auth_list",\n "auth_switch",\n "interpreter_login",\n "login",\n "logout",\n "notebook_login",\n ],\n "_oauth": [\n "OAuthInfo",\n "OAuthOrgInfo",\n "OAuthUserInfo",\n "attach_huggingface_oauth",\n "parse_huggingface_oauth",\n ],\n "_snapshot_download": [\n "snapshot_download",\n ],\n "_space_api": [\n "SpaceHardware",\n "SpaceRuntime",\n "SpaceStage",\n "SpaceStorage",\n "SpaceVariable",\n ],\n "_tensorboard_logger": [\n "HFSummaryWriter",\n ],\n "_webhooks_payload": [\n "WebhookPayload",\n "WebhookPayloadComment",\n "WebhookPayloadDiscussion",\n "WebhookPayloadDiscussionChanges",\n "WebhookPayloadEvent",\n "WebhookPayloadMovedTo",\n "WebhookPayloadRepo",\n "WebhookPayloadUrl",\n "WebhookPayloadWebhook",\n ],\n "_webhooks_server": [\n "WebhooksServer",\n "webhook_endpoint",\n ],\n "community": [\n "Discussion",\n "DiscussionComment",\n "DiscussionCommit",\n "DiscussionEvent",\n "DiscussionStatusChange",\n "DiscussionTitleChange",\n "DiscussionWithDetails",\n ],\n "constants": [\n "CONFIG_NAME",\n "FLAX_WEIGHTS_NAME",\n "HUGGINGFACE_CO_URL_HOME",\n "HUGGINGFACE_CO_URL_TEMPLATE",\n "PYTORCH_WEIGHTS_NAME",\n "REPO_TYPE_DATASET",\n "REPO_TYPE_MODEL",\n "REPO_TYPE_SPACE",\n "TF2_WEIGHTS_NAME",\n "TF_WEIGHTS_NAME",\n ],\n "fastai_utils": [\n "_save_pretrained_fastai",\n "from_pretrained_fastai",\n "push_to_hub_fastai",\n ],\n "file_download": [\n "HfFileMetadata",\n "_CACHED_NO_EXIST",\n "get_hf_file_metadata",\n "hf_hub_download",\n "hf_hub_url",\n "try_to_load_from_cache",\n ],\n "hf_api": [\n "Collection",\n "CollectionItem",\n "CommitInfo",\n "CommitOperation",\n "CommitOperationAdd",\n "CommitOperationCopy",\n "CommitOperationDelete",\n "DatasetInfo",\n "GitCommitInfo",\n "GitRefInfo",\n "GitRefs",\n "HfApi",\n "ModelInfo",\n "RepoUrl",\n "SpaceInfo",\n "User",\n "UserLikes",\n "WebhookInfo",\n "WebhookWatchedItem",\n "accept_access_request",\n "add_collection_item",\n "add_space_secret",\n "add_space_variable",\n "auth_check",\n "cancel_access_request",\n "change_discussion_status",\n "comment_discussion",\n "create_branch",\n "create_collection",\n "create_commit",\n "create_discussion",\n "create_inference_endpoint",\n "create_inference_endpoint_from_catalog",\n "create_pull_request",\n "create_repo",\n "create_tag",\n "create_webhook",\n "dataset_info",\n "delete_branch",\n "delete_collection",\n "delete_collection_item",\n "delete_file",\n "delete_folder",\n "delete_inference_endpoint",\n "delete_repo",\n "delete_space_secret",\n "delete_space_storage",\n "delete_space_variable",\n "delete_tag",\n "delete_webhook",\n "disable_webhook",\n "duplicate_space",\n "edit_discussion_comment",\n "enable_webhook",\n "file_exists",\n "get_collection",\n "get_dataset_tags",\n "get_discussion_details",\n "get_full_repo_name",\n "get_inference_endpoint",\n "get_model_tags",\n "get_paths_info",\n "get_repo_discussions",\n "get_safetensors_metadata",\n "get_space_runtime",\n "get_space_variables",\n "get_token_permission",\n "get_user_overview",\n "get_webhook",\n "grant_access",\n "list_accepted_access_requests",\n "list_collections",\n "list_datasets",\n "list_inference_catalog",\n "list_inference_endpoints",\n "list_lfs_files",\n "list_liked_repos",\n "list_models",\n "list_organization_members",\n "list_papers",\n "list_pending_access_requests",\n "list_rejected_access_requests",\n "list_repo_commits",\n "list_repo_files",\n "list_repo_likers",\n "list_repo_refs",\n "list_repo_tree",\n "list_spaces",\n "list_user_followers",\n "list_user_following",\n "list_webhooks",\n "merge_pull_request",\n "model_info",\n "move_repo",\n "paper_info",\n "parse_safetensors_file_metadata",\n "pause_inference_endpoint",\n "pause_space",\n "permanently_delete_lfs_files",\n "preupload_lfs_files",\n "reject_access_request",\n "rename_discussion",\n "repo_exists",\n "repo_info",\n "repo_type_and_id_from_hf_id",\n "request_space_hardware",\n "request_space_storage",\n "restart_space",\n "resume_inference_endpoint",\n "revision_exists",\n "run_as_future",\n "scale_to_zero_inference_endpoint",\n "set_space_sleep_time",\n "space_info",\n "super_squash_history",\n "unlike",\n "update_collection_item",\n "update_collection_metadata",\n "update_inference_endpoint",\n "update_repo_settings",\n "update_repo_visibility",\n "update_webhook",\n "upload_file",\n "upload_folder",\n "upload_large_folder",\n "whoami",\n ],\n "hf_file_system": [\n "HfFileSystem",\n "HfFileSystemFile",\n "HfFileSystemResolvedPath",\n "HfFileSystemStreamFile",\n ],\n "hub_mixin": [\n "ModelHubMixin",\n "PyTorchModelHubMixin",\n ],\n "inference._client": [\n "InferenceClient",\n "InferenceTimeoutError",\n ],\n "inference._generated._async_client": [\n "AsyncInferenceClient",\n ],\n "inference._generated.types": [\n "AudioClassificationInput",\n "AudioClassificationOutputElement",\n "AudioClassificationOutputTransform",\n "AudioClassificationParameters",\n "AudioToAudioInput",\n "AudioToAudioOutputElement",\n "AutomaticSpeechRecognitionEarlyStoppingEnum",\n "AutomaticSpeechRecognitionGenerationParameters",\n "AutomaticSpeechRecognitionInput",\n "AutomaticSpeechRecognitionOutput",\n "AutomaticSpeechRecognitionOutputChunk",\n "AutomaticSpeechRecognitionParameters",\n "ChatCompletionInput",\n "ChatCompletionInputFunctionDefinition",\n "ChatCompletionInputFunctionName",\n "ChatCompletionInputGrammarType",\n "ChatCompletionInputJSONSchema",\n "ChatCompletionInputMessage",\n "ChatCompletionInputMessageChunk",\n "ChatCompletionInputMessageChunkType",\n "ChatCompletionInputResponseFormatJSONObject",\n "ChatCompletionInputResponseFormatJSONSchema",\n "ChatCompletionInputResponseFormatText",\n "ChatCompletionInputStreamOptions",\n "ChatCompletionInputTool",\n "ChatCompletionInputToolCall",\n "ChatCompletionInputToolChoiceClass",\n "ChatCompletionInputToolChoiceEnum",\n "ChatCompletionInputURL",\n "ChatCompletionOutput",\n "ChatCompletionOutputComplete",\n "ChatCompletionOutputFunctionDefinition",\n "ChatCompletionOutputLogprob",\n "ChatCompletionOutputLogprobs",\n "ChatCompletionOutputMessage",\n "ChatCompletionOutputToolCall",\n "ChatCompletionOutputTopLogprob",\n "ChatCompletionOutputUsage",\n "ChatCompletionStreamOutput",\n "ChatCompletionStreamOutputChoice",\n "ChatCompletionStreamOutputDelta",\n "ChatCompletionStreamOutputDeltaToolCall",\n "ChatCompletionStreamOutputFunction",\n "ChatCompletionStreamOutputLogprob",\n "ChatCompletionStreamOutputLogprobs",\n "ChatCompletionStreamOutputTopLogprob",\n "ChatCompletionStreamOutputUsage",\n "DepthEstimationInput",\n "DepthEstimationOutput",\n "DocumentQuestionAnsweringInput",\n "DocumentQuestionAnsweringInputData",\n "DocumentQuestionAnsweringOutputElement",\n "DocumentQuestionAnsweringParameters",\n "FeatureExtractionInput",\n "FeatureExtractionInputTruncationDirection",\n "FillMaskInput",\n "FillMaskOutputElement",\n "FillMaskParameters",\n "ImageClassificationInput",\n "ImageClassificationOutputElement",\n "ImageClassificationOutputTransform",\n "ImageClassificationParameters",\n "ImageSegmentationInput",\n "ImageSegmentationOutputElement",\n "ImageSegmentationParameters",\n "ImageSegmentationSubtask",\n "ImageToImageInput",\n "ImageToImageOutput",\n "ImageToImageParameters",\n "ImageToImageTargetSize",\n "ImageToTextEarlyStoppingEnum",\n "ImageToTextGenerationParameters",\n "ImageToTextInput",\n "ImageToTextOutput",\n "ImageToTextParameters",\n "ObjectDetectionBoundingBox",\n "ObjectDetectionInput",\n "ObjectDetectionOutputElement",\n "ObjectDetectionParameters",\n "Padding",\n "QuestionAnsweringInput",\n "QuestionAnsweringInputData",\n "QuestionAnsweringOutputElement",\n "QuestionAnsweringParameters",\n "SentenceSimilarityInput",\n "SentenceSimilarityInputData",\n "SummarizationInput",\n "SummarizationOutput",\n "SummarizationParameters",\n "SummarizationTruncationStrategy",\n "TableQuestionAnsweringInput",\n "TableQuestionAnsweringInputData",\n "TableQuestionAnsweringOutputElement",\n "TableQuestionAnsweringParameters",\n "Text2TextGenerationInput",\n "Text2TextGenerationOutput",\n "Text2TextGenerationParameters",\n "Text2TextGenerationTruncationStrategy",\n "TextClassificationInput",\n "TextClassificationOutputElement",\n "TextClassificationOutputTransform",\n "TextClassificationParameters",\n "TextGenerationInput",\n "TextGenerationInputGenerateParameters",\n "TextGenerationInputGrammarType",\n "TextGenerationOutput",\n "TextGenerationOutputBestOfSequence",\n "TextGenerationOutputDetails",\n "TextGenerationOutputFinishReason",\n "TextGenerationOutputPrefillToken",\n "TextGenerationOutputToken",\n "TextGenerationStreamOutput",\n "TextGenerationStreamOutputStreamDetails",\n "TextGenerationStreamOutputToken",\n "TextToAudioEarlyStoppingEnum",\n "TextToAudioGenerationParameters",\n "TextToAudioInput",\n "TextToAudioOutput",\n "TextToAudioParameters",\n "TextToImageInput",\n "TextToImageOutput",\n "TextToImageParameters",\n "TextToSpeechEarlyStoppingEnum",\n "TextToSpeechGenerationParameters",\n "TextToSpeechInput",\n "TextToSpeechOutput",\n "TextToSpeechParameters",\n "TextToVideoInput",\n "TextToVideoOutput",\n "TextToVideoParameters",\n "TokenClassificationAggregationStrategy",\n "TokenClassificationInput",\n "TokenClassificationOutputElement",\n "TokenClassificationParameters",\n "TranslationInput",\n "TranslationOutput",\n "TranslationParameters",\n "TranslationTruncationStrategy",\n "TypeEnum",\n "VideoClassificationInput",\n "VideoClassificationOutputElement",\n "VideoClassificationOutputTransform",\n "VideoClassificationParameters",\n "VisualQuestionAnsweringInput",\n "VisualQuestionAnsweringInputData",\n "VisualQuestionAnsweringOutputElement",\n "VisualQuestionAnsweringParameters",\n "ZeroShotClassificationInput",\n "ZeroShotClassificationOutputElement",\n "ZeroShotClassificationParameters",\n "ZeroShotImageClassificationInput",\n "ZeroShotImageClassificationOutputElement",\n "ZeroShotImageClassificationParameters",\n "ZeroShotObjectDetectionBoundingBox",\n "ZeroShotObjectDetectionInput",\n "ZeroShotObjectDetectionOutputElement",\n "ZeroShotObjectDetectionParameters",\n ],\n "inference._mcp.agent": [\n "Agent",\n ],\n "inference._mcp.mcp_client": [\n "MCPClient",\n ],\n "inference_api": [\n "InferenceApi",\n ],\n "keras_mixin": [\n "KerasModelHubMixin",\n "from_pretrained_keras",\n "push_to_hub_keras",\n "save_pretrained_keras",\n ],\n "repocard": [\n "DatasetCard",\n "ModelCard",\n "RepoCard",\n "SpaceCard",\n "metadata_eval_result",\n "metadata_load",\n "metadata_save",\n "metadata_update",\n ],\n "repocard_data": [\n "CardData",\n "DatasetCardData",\n "EvalResult",\n "ModelCardData",\n "SpaceCardData",\n ],\n "repository": [\n "Repository",\n ],\n "serialization": [\n "StateDictSplit",\n "get_tf_storage_size",\n "get_torch_storage_id",\n "get_torch_storage_size",\n "load_state_dict_from_file",\n "load_torch_model",\n "save_torch_model",\n "save_torch_state_dict",\n "split_state_dict_into_shards_factory",\n "split_tf_state_dict_into_shards",\n "split_torch_state_dict_into_shards",\n ],\n "serialization._dduf": [\n "DDUFEntry",\n "export_entries_as_dduf",\n "export_folder_as_dduf",\n "read_dduf_file",\n ],\n "utils": [\n "CacheNotFound",\n "CachedFileInfo",\n "CachedRepoInfo",\n "CachedRevisionInfo",\n "CorruptedCacheException",\n "DeleteCacheStrategy",\n "HFCacheInfo",\n "HfFolder",\n "cached_assets_path",\n "configure_http_backend",\n "dump_environment_info",\n "get_session",\n "get_token",\n "logging",\n "scan_cache_dir",\n ],\n}\n\n# WARNING: __all__ is generated automatically, Any manual edit will be lost when re-generating this file !\n#\n# To update the static imports, please run the following command and commit the changes.\n# ```\n# # Use script\n# python utils/check_all_variable.py --update\n#\n# # Or run style on codebase\n# make style\n# ```\n\n__all__ = [\n "Agent",\n "AsyncInferenceClient",\n "AudioClassificationInput",\n "AudioClassificationOutputElement",\n "AudioClassificationOutputTransform",\n "AudioClassificationParameters",\n "AudioToAudioInput",\n "AudioToAudioOutputElement",\n "AutomaticSpeechRecognitionEarlyStoppingEnum",\n "AutomaticSpeechRecognitionGenerationParameters",\n "AutomaticSpeechRecognitionInput",\n "AutomaticSpeechRecognitionOutput",\n "AutomaticSpeechRecognitionOutputChunk",\n "AutomaticSpeechRecognitionParameters",\n "CONFIG_NAME",\n "CacheNotFound",\n "CachedFileInfo",\n "CachedRepoInfo",\n "CachedRevisionInfo",\n "CardData",\n "ChatCompletionInput",\n "ChatCompletionInputFunctionDefinition",\n "ChatCompletionInputFunctionName",\n "ChatCompletionInputGrammarType",\n "ChatCompletionInputJSONSchema",\n "ChatCompletionInputMessage",\n "ChatCompletionInputMessageChunk",\n "ChatCompletionInputMessageChunkType",\n "ChatCompletionInputResponseFormatJSONObject",\n "ChatCompletionInputResponseFormatJSONSchema",\n "ChatCompletionInputResponseFormatText",\n "ChatCompletionInputStreamOptions",\n "ChatCompletionInputTool",\n "ChatCompletionInputToolCall",\n "ChatCompletionInputToolChoiceClass",\n "ChatCompletionInputToolChoiceEnum",\n "ChatCompletionInputURL",\n "ChatCompletionOutput",\n "ChatCompletionOutputComplete",\n "ChatCompletionOutputFunctionDefinition",\n "ChatCompletionOutputLogprob",\n "ChatCompletionOutputLogprobs",\n "ChatCompletionOutputMessage",\n "ChatCompletionOutputToolCall",\n "ChatCompletionOutputTopLogprob",\n "ChatCompletionOutputUsage",\n "ChatCompletionStreamOutput",\n "ChatCompletionStreamOutputChoice",\n "ChatCompletionStreamOutputDelta",\n "ChatCompletionStreamOutputDeltaToolCall",\n "ChatCompletionStreamOutputFunction",\n "ChatCompletionStreamOutputLogprob",\n "ChatCompletionStreamOutputLogprobs",\n "ChatCompletionStreamOutputTopLogprob",\n "ChatCompletionStreamOutputUsage",\n "Collection",\n "CollectionItem",\n "CommitInfo",\n "CommitOperation",\n "CommitOperationAdd",\n "CommitOperationCopy",\n "CommitOperationDelete",\n "CommitScheduler",\n "CorruptedCacheException",\n "DDUFEntry",\n "DatasetCard",\n "DatasetCardData",\n "DatasetInfo",\n "DeleteCacheStrategy",\n "DepthEstimationInput",\n "DepthEstimationOutput",\n "Discussion",\n "DiscussionComment",\n "DiscussionCommit",\n "DiscussionEvent",\n "DiscussionStatusChange",\n "DiscussionTitleChange",\n "DiscussionWithDetails",\n "DocumentQuestionAnsweringInput",\n "DocumentQuestionAnsweringInputData",\n "DocumentQuestionAnsweringOutputElement",\n "DocumentQuestionAnsweringParameters",\n "EvalResult",\n "FLAX_WEIGHTS_NAME",\n "FeatureExtractionInput",\n "FeatureExtractionInputTruncationDirection",\n "FillMaskInput",\n "FillMaskOutputElement",\n "FillMaskParameters",\n "GitCommitInfo",\n "GitRefInfo",\n "GitRefs",\n "HFCacheInfo",\n "HFSummaryWriter",\n "HUGGINGFACE_CO_URL_HOME",\n "HUGGINGFACE_CO_URL_TEMPLATE",\n "HfApi",\n "HfFileMetadata",\n "HfFileSystem",\n "HfFileSystemFile",\n "HfFileSystemResolvedPath",\n "HfFileSystemStreamFile",\n "HfFolder",\n "ImageClassificationInput",\n "ImageClassificationOutputElement",\n "ImageClassificationOutputTransform",\n "ImageClassificationParameters",\n "ImageSegmentationInput",\n "ImageSegmentationOutputElement",\n "ImageSegmentationParameters",\n "ImageSegmentationSubtask",\n "ImageToImageInput",\n "ImageToImageOutput",\n "ImageToImageParameters",\n "ImageToImageTargetSize",\n "ImageToTextEarlyStoppingEnum",\n "ImageToTextGenerationParameters",\n "ImageToTextInput",\n "ImageToTextOutput",\n "ImageToTextParameters",\n "InferenceApi",\n "InferenceClient",\n "InferenceEndpoint",\n "InferenceEndpointError",\n "InferenceEndpointStatus",\n "InferenceEndpointTimeoutError",\n "InferenceEndpointType",\n "InferenceTimeoutError",\n "KerasModelHubMixin",\n "MCPClient",\n "ModelCard",\n "ModelCardData",\n "ModelHubMixin",\n "ModelInfo",\n "OAuthInfo",\n "OAuthOrgInfo",\n "OAuthUserInfo",\n "ObjectDetectionBoundingBox",\n "ObjectDetectionInput",\n "ObjectDetectionOutputElement",\n "ObjectDetectionParameters",\n "PYTORCH_WEIGHTS_NAME",\n "Padding",\n "PyTorchModelHubMixin",\n "QuestionAnsweringInput",\n "QuestionAnsweringInputData",\n "QuestionAnsweringOutputElement",\n "QuestionAnsweringParameters",\n "REPO_TYPE_DATASET",\n "REPO_TYPE_MODEL",\n "REPO_TYPE_SPACE",\n "RepoCard",\n "RepoUrl",\n "Repository",\n "SentenceSimilarityInput",\n "SentenceSimilarityInputData",\n "SpaceCard",\n "SpaceCardData",\n "SpaceHardware",\n "SpaceInfo",\n "SpaceRuntime",\n "SpaceStage",\n "SpaceStorage",\n "SpaceVariable",\n "StateDictSplit",\n "SummarizationInput",\n "SummarizationOutput",\n "SummarizationParameters",\n "SummarizationTruncationStrategy",\n "TF2_WEIGHTS_NAME",\n "TF_WEIGHTS_NAME",\n "TableQuestionAnsweringInput",\n "TableQuestionAnsweringInputData",\n "TableQuestionAnsweringOutputElement",\n "TableQuestionAnsweringParameters",\n "Text2TextGenerationInput",\n "Text2TextGenerationOutput",\n "Text2TextGenerationParameters",\n "Text2TextGenerationTruncationStrategy",\n "TextClassificationInput",\n "TextClassificationOutputElement",\n "TextClassificationOutputTransform",\n "TextClassificationParameters",\n "TextGenerationInput",\n "TextGenerationInputGenerateParameters",\n "TextGenerationInputGrammarType",\n "TextGenerationOutput",\n "TextGenerationOutputBestOfSequence",\n "TextGenerationOutputDetails",\n "TextGenerationOutputFinishReason",\n "TextGenerationOutputPrefillToken",\n "TextGenerationOutputToken",\n "TextGenerationStreamOutput",\n "TextGenerationStreamOutputStreamDetails",\n "TextGenerationStreamOutputToken",\n "TextToAudioEarlyStoppingEnum",\n "TextToAudioGenerationParameters",\n "TextToAudioInput",\n "TextToAudioOutput",\n "TextToAudioParameters",\n "TextToImageInput",\n "TextToImageOutput",\n "TextToImageParameters",\n "TextToSpeechEarlyStoppingEnum",\n "TextToSpeechGenerationParameters",\n "TextToSpeechInput",\n "TextToSpeechOutput",\n "TextToSpeechParameters",\n "TextToVideoInput",\n "TextToVideoOutput",\n "TextToVideoParameters",\n "TokenClassificationAggregationStrategy",\n "TokenClassificationInput",\n "TokenClassificationOutputElement",\n "TokenClassificationParameters",\n "TranslationInput",\n "TranslationOutput",\n "TranslationParameters",\n "TranslationTruncationStrategy",\n "TypeEnum",\n "User",\n "UserLikes",\n "VideoClassificationInput",\n "VideoClassificationOutputElement",\n "VideoClassificationOutputTransform",\n "VideoClassificationParameters",\n "VisualQuestionAnsweringInput",\n "VisualQuestionAnsweringInputData",\n "VisualQuestionAnsweringOutputElement",\n "VisualQuestionAnsweringParameters",\n "WebhookInfo",\n "WebhookPayload",\n "WebhookPayloadComment",\n "WebhookPayloadDiscussion",\n "WebhookPayloadDiscussionChanges",\n "WebhookPayloadEvent",\n "WebhookPayloadMovedTo",\n "WebhookPayloadRepo",\n "WebhookPayloadUrl",\n "WebhookPayloadWebhook",\n "WebhookWatchedItem",\n "WebhooksServer",\n "ZeroShotClassificationInput",\n "ZeroShotClassificationOutputElement",\n "ZeroShotClassificationParameters",\n "ZeroShotImageClassificationInput",\n "ZeroShotImageClassificationOutputElement",\n "ZeroShotImageClassificationParameters",\n "ZeroShotObjectDetectionBoundingBox",\n "ZeroShotObjectDetectionInput",\n "ZeroShotObjectDetectionOutputElement",\n "ZeroShotObjectDetectionParameters",\n "_CACHED_NO_EXIST",\n "_save_pretrained_fastai",\n "accept_access_request",\n "add_collection_item",\n "add_space_secret",\n "add_space_variable",\n "attach_huggingface_oauth",\n "auth_check",\n "auth_list",\n "auth_switch",\n "cached_assets_path",\n "cancel_access_request",\n "change_discussion_status",\n "comment_discussion",\n "configure_http_backend",\n "create_branch",\n "create_collection",\n "create_commit",\n "create_discussion",\n "create_inference_endpoint",\n "create_inference_endpoint_from_catalog",\n "create_pull_request",\n "create_repo",\n "create_tag",\n "create_webhook",\n "dataset_info",\n "delete_branch",\n "delete_collection",\n "delete_collection_item",\n "delete_file",\n "delete_folder",\n "delete_inference_endpoint",\n "delete_repo",\n "delete_space_secret",\n "delete_space_storage",\n "delete_space_variable",\n "delete_tag",\n "delete_webhook",\n "disable_webhook",\n "dump_environment_info",\n "duplicate_space",\n "edit_discussion_comment",\n "enable_webhook",\n "export_entries_as_dduf",\n "export_folder_as_dduf",\n "file_exists",\n "from_pretrained_fastai",\n "from_pretrained_keras",\n "get_collection",\n "get_dataset_tags",\n "get_discussion_details",\n "get_full_repo_name",\n "get_hf_file_metadata",\n "get_inference_endpoint",\n "get_model_tags",\n "get_paths_info",\n "get_repo_discussions",\n "get_safetensors_metadata",\n "get_session",\n "get_space_runtime",\n "get_space_variables",\n "get_tf_storage_size",\n "get_token",\n "get_token_permission",\n "get_torch_storage_id",\n "get_torch_storage_size",\n "get_user_overview",\n "get_webhook",\n "grant_access",\n "hf_hub_download",\n "hf_hub_url",\n "interpreter_login",\n "list_accepted_access_requests",\n "list_collections",\n "list_datasets",\n "list_inference_catalog",\n "list_inference_endpoints",\n "list_lfs_files",\n "list_liked_repos",\n "list_models",\n "list_organization_members",\n "list_papers",\n "list_pending_access_requests",\n "list_rejected_access_requests",\n "list_repo_commits",\n "list_repo_files",\n "list_repo_likers",\n "list_repo_refs",\n "list_repo_tree",\n "list_spaces",\n "list_user_followers",\n "list_user_following",\n "list_webhooks",\n "load_state_dict_from_file",\n "load_torch_model",\n "logging",\n "login",\n "logout",\n "merge_pull_request",\n "metadata_eval_result",\n "metadata_load",\n "metadata_save",\n "metadata_update",\n "model_info",\n "move_repo",\n "notebook_login",\n "paper_info",\n "parse_huggingface_oauth",\n "parse_safetensors_file_metadata",\n "pause_inference_endpoint",\n "pause_space",\n "permanently_delete_lfs_files",\n "preupload_lfs_files",\n "push_to_hub_fastai",\n "push_to_hub_keras",\n "read_dduf_file",\n "reject_access_request",\n "rename_discussion",\n "repo_exists",\n "repo_info",\n "repo_type_and_id_from_hf_id",\n "request_space_hardware",\n "request_space_storage",\n "restart_space",\n "resume_inference_endpoint",\n "revision_exists",\n "run_as_future",\n "save_pretrained_keras",\n "save_torch_model",\n "save_torch_state_dict",\n "scale_to_zero_inference_endpoint",\n "scan_cache_dir",\n "set_space_sleep_time",\n "snapshot_download",\n "space_info",\n "split_state_dict_into_shards_factory",\n "split_tf_state_dict_into_shards",\n "split_torch_state_dict_into_shards",\n "super_squash_history",\n "try_to_load_from_cache",\n "unlike",\n "update_collection_item",\n "update_collection_metadata",\n "update_inference_endpoint",\n "update_repo_settings",\n "update_repo_visibility",\n "update_webhook",\n "upload_file",\n "upload_folder",\n "upload_large_folder",\n "webhook_endpoint",\n "whoami",\n]\n\n\ndef _attach(package_name, submodules=None, submod_attrs=None):\n """Attach lazily loaded submodules, functions, or other attributes.\n\n Typically, modules import submodules and attributes as follows:\n\n ```py\n import mysubmodule\n import anothersubmodule\n\n from .foo import someattr\n ```\n\n The idea is to replace a package's `__getattr__`, `__dir__`, such that all imports\n work exactly the way they would with normal imports, except that the import occurs\n upon first use.\n\n The typical way to call this function, replacing the above imports, is:\n\n ```python\n __getattr__, __dir__ = lazy.attach(\n __name__,\n ['mysubmodule', 'anothersubmodule'],\n {'foo': ['someattr']}\n )\n ```\n This functionality requires Python 3.7 or higher.\n\n Args:\n package_name (`str`):\n Typically use `__name__`.\n submodules (`set`):\n List of submodules to attach.\n submod_attrs (`dict`):\n Dictionary of submodule -> list of attributes / functions.\n These attributes are imported as they are used.\n\n Returns:\n __getattr__, __dir__, __all__\n\n """\n if submod_attrs is None:\n submod_attrs = {}\n\n if submodules is None:\n submodules = set()\n else:\n submodules = set(submodules)\n\n attr_to_modules = {attr: mod for mod, attrs in submod_attrs.items() for attr in attrs}\n\n def __getattr__(name):\n if name in submodules:\n try:\n return importlib.import_module(f"{package_name}.{name}")\n except Exception as e:\n print(f"Error importing {package_name}.{name}: {e}")\n raise\n elif name in attr_to_modules:\n submod_path = f"{package_name}.{attr_to_modules[name]}"\n try:\n submod = importlib.import_module(submod_path)\n except Exception as e:\n print(f"Error importing {submod_path}: {e}")\n raise\n attr = getattr(submod, name)\n\n # If the attribute lives in a file (module) with the same\n # name as the attribute, ensure that the attribute and *not*\n # the module is accessible on the package.\n if name == attr_to_modules[name]:\n pkg = sys.modules[package_name]\n pkg.__dict__[name] = attr\n\n return attr\n else:\n raise AttributeError(f"No {package_name} attribute {name}")\n\n def __dir__():\n return __all__\n\n return __getattr__, __dir__\n\n\n__getattr__, __dir__ = _attach(__name__, submodules=[], submod_attrs=_SUBMOD_ATTRS)\n\nif os.environ.get("EAGER_IMPORT", ""):\n for attr in __all__:\n __getattr__(attr)\n\n# WARNING: any content below this statement is generated automatically. Any manual edit\n# will be lost when re-generating this file !\n#\n# To update the static imports, please run the following command and commit the changes.\n# ```\n# # Use script\n# python utils/check_static_imports.py --update\n#\n# # Or run style on codebase\n# make style\n# ```\nif TYPE_CHECKING: # pragma: no cover\n from ._commit_scheduler import CommitScheduler # noqa: F401\n from ._inference_endpoints import (\n InferenceEndpoint, # noqa: F401\n InferenceEndpointError, # noqa: F401\n InferenceEndpointStatus, # noqa: F401\n InferenceEndpointTimeoutError, # noqa: F401\n InferenceEndpointType, # noqa: F401\n )\n from ._login import (\n auth_list, # noqa: F401\n auth_switch, # noqa: F401\n interpreter_login, # noqa: F401\n login, # noqa: F401\n logout, # noqa: F401\n notebook_login, # noqa: F401\n )\n from ._oauth import (\n OAuthInfo, # noqa: F401\n OAuthOrgInfo, # noqa: F401\n OAuthUserInfo, # noqa: F401\n attach_huggingface_oauth, # noqa: F401\n parse_huggingface_oauth, # noqa: F401\n )\n from ._snapshot_download import snapshot_download # noqa: F401\n from ._space_api import (\n SpaceHardware, # noqa: F401\n SpaceRuntime, # noqa: F401\n SpaceStage, # noqa: F401\n SpaceStorage, # noqa: F401\n SpaceVariable, # noqa: F401\n )\n from ._tensorboard_logger import HFSummaryWriter # noqa: F401\n from ._webhooks_payload import (\n WebhookPayload, # noqa: F401\n WebhookPayloadComment, # noqa: F401\n WebhookPayloadDiscussion, # noqa: F401\n WebhookPayloadDiscussionChanges, # noqa: F401\n WebhookPayloadEvent, # noqa: F401\n WebhookPayloadMovedTo, # noqa: F401\n WebhookPayloadRepo, # noqa: F401\n WebhookPayloadUrl, # noqa: F401\n WebhookPayloadWebhook, # noqa: F401\n )\n from ._webhooks_server import (\n WebhooksServer, # noqa: F401\n webhook_endpoint, # noqa: F401\n )\n from .community import (\n Discussion, # noqa: F401\n DiscussionComment, # noqa: F401\n DiscussionCommit, # noqa: F401\n DiscussionEvent, # noqa: F401\n DiscussionStatusChange, # noqa: F401\n DiscussionTitleChange, # noqa: F401\n DiscussionWithDetails, # noqa: F401\n )\n from .constants import (\n CONFIG_NAME, # noqa: F401\n FLAX_WEIGHTS_NAME, # noqa: F401\n HUGGINGFACE_CO_URL_HOME, # noqa: F401\n HUGGINGFACE_CO_URL_TEMPLATE, # noqa: F401\n PYTORCH_WEIGHTS_NAME, # noqa: F401\n REPO_TYPE_DATASET, # noqa: F401\n REPO_TYPE_MODEL, # noqa: F401\n REPO_TYPE_SPACE, # noqa: F401\n TF2_WEIGHTS_NAME, # noqa: F401\n TF_WEIGHTS_NAME, # noqa: F401\n )\n from .fastai_utils import (\n _save_pretrained_fastai, # noqa: F401\n from_pretrained_fastai, # noqa: F401\n push_to_hub_fastai, # noqa: F401\n )\n from .file_download import (\n _CACHED_NO_EXIST, # noqa: F401\n HfFileMetadata, # noqa: F401\n get_hf_file_metadata, # noqa: F401\n hf_hub_download, # noqa: F401\n hf_hub_url, # noqa: F401\n try_to_load_from_cache, # noqa: F401\n )\n from .hf_api import (\n Collection, # noqa: F401\n CollectionItem, # noqa: F401\n CommitInfo, # noqa: F401\n CommitOperation, # noqa: F401\n CommitOperationAdd, # noqa: F401\n CommitOperationCopy, # noqa: F401\n CommitOperationDelete, # noqa: F401\n DatasetInfo, # noqa: F401\n GitCommitInfo, # noqa: F401\n GitRefInfo, # noqa: F401\n GitRefs, # noqa: F401\n HfApi, # noqa: F401\n ModelInfo, # noqa: F401\n RepoUrl, # noqa: F401\n SpaceInfo, # noqa: F401\n User, # noqa: F401\n UserLikes, # noqa: F401\n WebhookInfo, # noqa: F401\n WebhookWatchedItem, # noqa: F401\n accept_access_request, # noqa: F401\n add_collection_item, # noqa: F401\n add_space_secret, # noqa: F401\n add_space_variable, # noqa: F401\n auth_check, # noqa: F401\n cancel_access_request, # noqa: F401\n change_discussion_status, # noqa: F401\n comment_discussion, # noqa: F401\n create_branch, # noqa: F401\n create_collection, # noqa: F401\n create_commit, # noqa: F401\n create_discussion, # noqa: F401\n create_inference_endpoint, # noqa: F401\n create_inference_endpoint_from_catalog, # noqa: F401\n create_pull_request, # noqa: F401\n create_repo, # noqa: F401\n create_tag, # noqa: F401\n create_webhook, # noqa: F401\n dataset_info, # noqa: F401\n delete_branch, # noqa: F401\n delete_collection, # noqa: F401\n delete_collection_item, # noqa: F401\n delete_file, # noqa: F401\n delete_folder, # noqa: F401\n delete_inference_endpoint, # noqa: F401\n delete_repo, # noqa: F401\n delete_space_secret, # noqa: F401\n delete_space_storage, # noqa: F401\n delete_space_variable, # noqa: F401\n delete_tag, # noqa: F401\n delete_webhook, # noqa: F401\n disable_webhook, # noqa: F401\n duplicate_space, # noqa: F401\n edit_discussion_comment, # noqa: F401\n enable_webhook, # noqa: F401\n file_exists, # noqa: F401\n get_collection, # noqa: F401\n get_dataset_tags, # noqa: F401\n get_discussion_details, # noqa: F401\n get_full_repo_name, # noqa: F401\n get_inference_endpoint, # noqa: F401\n get_model_tags, # noqa: F401\n get_paths_info, # noqa: F401\n get_repo_discussions, # noqa: F401\n get_safetensors_metadata, # noqa: F401\n get_space_runtime, # noqa: F401\n get_space_variables, # noqa: F401\n get_token_permission, # noqa: F401\n get_user_overview, # noqa: F401\n get_webhook, # noqa: F401\n grant_access, # noqa: F401\n list_accepted_access_requests, # noqa: F401\n list_collections, # noqa: F401\n list_datasets, # noqa: F401\n list_inference_catalog, # noqa: F401\n list_inference_endpoints, # noqa: F401\n list_lfs_files, # noqa: F401\n list_liked_repos, # noqa: F401\n list_models, # noqa: F401\n list_organization_members, # noqa: F401\n list_papers, # noqa: F401\n list_pending_access_requests, # noqa: F401\n list_rejected_access_requests, # noqa: F401\n list_repo_commits, # noqa: F401\n list_repo_files, # noqa: F401\n list_repo_likers, # noqa: F401\n list_repo_refs, # noqa: F401\n list_repo_tree, # noqa: F401\n list_spaces, # noqa: F401\n list_user_followers, # noqa: F401\n list_user_following, # noqa: F401\n list_webhooks, # noqa: F401\n merge_pull_request, # noqa: F401\n model_info, # noqa: F401\n move_repo, # noqa: F401\n paper_info, # noqa: F401\n parse_safetensors_file_metadata, # noqa: F401\n pause_inference_endpoint, # noqa: F401\n pause_space, # noqa: F401\n permanently_delete_lfs_files, # noqa: F401\n preupload_lfs_files, # noqa: F401\n reject_access_request, # noqa: F401\n rename_discussion, # noqa: F401\n repo_exists, # noqa: F401\n repo_info, # noqa: F401\n repo_type_and_id_from_hf_id, # noqa: F401\n request_space_hardware, # noqa: F401\n request_space_storage, # noqa: F401\n restart_space, # noqa: F401\n resume_inference_endpoint, # noqa: F401\n revision_exists, # noqa: F401\n run_as_future, # noqa: F401\n scale_to_zero_inference_endpoint, # noqa: F401\n set_space_sleep_time, # noqa: F401\n space_info, # noqa: F401\n super_squash_history, # noqa: F401\n unlike, # noqa: F401\n update_collection_item, # noqa: F401\n update_collection_metadata, # noqa: F401\n update_inference_endpoint, # noqa: F401\n update_repo_settings, # noqa: F401\n update_repo_visibility, # noqa: F401\n update_webhook, # noqa: F401\n upload_file, # noqa: F401\n upload_folder, # noqa: F401\n upload_large_folder, # noqa: F401\n whoami, # noqa: F401\n )\n from .hf_file_system import (\n HfFileSystem, # noqa: F401\n HfFileSystemFile, # noqa: F401\n HfFileSystemResolvedPath, # noqa: F401\n HfFileSystemStreamFile, # noqa: F401\n )\n from .hub_mixin import (\n ModelHubMixin, # noqa: F401\n PyTorchModelHubMixin, # noqa: F401\n )\n from .inference._client import (\n InferenceClient, # noqa: F401\n InferenceTimeoutError, # noqa: F401\n )\n from .inference._generated._async_client import AsyncInferenceClient # noqa: F401\n from .inference._generated.types import (\n AudioClassificationInput, # noqa: F401\n AudioClassificationOutputElement, # noqa: F401\n AudioClassificationOutputTransform, # noqa: F401\n AudioClassificationParameters, # noqa: F401\n AudioToAudioInput, # noqa: F401\n AudioToAudioOutputElement, # noqa: F401\n AutomaticSpeechRecognitionEarlyStoppingEnum, # noqa: F401\n AutomaticSpeechRecognitionGenerationParameters, # noqa: F401\n AutomaticSpeechRecognitionInput, # noqa: F401\n AutomaticSpeechRecognitionOutput, # noqa: F401\n AutomaticSpeechRecognitionOutputChunk, # noqa: F401\n AutomaticSpeechRecognitionParameters, # noqa: F401\n ChatCompletionInput, # noqa: F401\n ChatCompletionInputFunctionDefinition, # noqa: F401\n ChatCompletionInputFunctionName, # noqa: F401\n ChatCompletionInputGrammarType, # noqa: F401\n ChatCompletionInputJSONSchema, # noqa: F401\n ChatCompletionInputMessage, # noqa: F401\n ChatCompletionInputMessageChunk, # noqa: F401\n ChatCompletionInputMessageChunkType, # noqa: F401\n ChatCompletionInputResponseFormatJSONObject, # noqa: F401\n ChatCompletionInputResponseFormatJSONSchema, # noqa: F401\n ChatCompletionInputResponseFormatText, # noqa: F401\n ChatCompletionInputStreamOptions, # noqa: F401\n ChatCompletionInputTool, # noqa: F401\n ChatCompletionInputToolCall, # noqa: F401\n ChatCompletionInputToolChoiceClass, # noqa: F401\n ChatCompletionInputToolChoiceEnum, # noqa: F401\n ChatCompletionInputURL, # noqa: F401\n ChatCompletionOutput, # noqa: F401\n ChatCompletionOutputComplete, # noqa: F401\n ChatCompletionOutputFunctionDefinition, # noqa: F401\n ChatCompletionOutputLogprob, # noqa: F401\n ChatCompletionOutputLogprobs, # noqa: F401\n ChatCompletionOutputMessage, # noqa: F401\n ChatCompletionOutputToolCall, # noqa: F401\n ChatCompletionOutputTopLogprob, # noqa: F401\n ChatCompletionOutputUsage, # noqa: F401\n ChatCompletionStreamOutput, # noqa: F401\n ChatCompletionStreamOutputChoice, # noqa: F401\n ChatCompletionStreamOutputDelta, # noqa: F401\n ChatCompletionStreamOutputDeltaToolCall, # noqa: F401\n ChatCompletionStreamOutputFunction, # noqa: F401\n ChatCompletionStreamOutputLogprob, # noqa: F401\n ChatCompletionStreamOutputLogprobs, # noqa: F401\n ChatCompletionStreamOutputTopLogprob, # noqa: F401\n ChatCompletionStreamOutputUsage, # noqa: F401\n DepthEstimationInput, # noqa: F401\n DepthEstimationOutput, # noqa: F401\n DocumentQuestionAnsweringInput, # noqa: F401\n DocumentQuestionAnsweringInputData, # noqa: F401\n DocumentQuestionAnsweringOutputElement, # noqa: F401\n DocumentQuestionAnsweringParameters, # noqa: F401\n FeatureExtractionInput, # noqa: F401\n FeatureExtractionInputTruncationDirection, # noqa: F401\n FillMaskInput, # noqa: F401\n FillMaskOutputElement, # noqa: F401\n FillMaskParameters, # noqa: F401\n ImageClassificationInput, # noqa: F401\n ImageClassificationOutputElement, # noqa: F401\n ImageClassificationOutputTransform, # noqa: F401\n ImageClassificationParameters, # noqa: F401\n ImageSegmentationInput, # noqa: F401\n ImageSegmentationOutputElement, # noqa: F401\n ImageSegmentationParameters, # noqa: F401\n ImageSegmentationSubtask, # noqa: F401\n ImageToImageInput, # noqa: F401\n ImageToImageOutput, # noqa: F401\n ImageToImageParameters, # noqa: F401\n ImageToImageTargetSize, # noqa: F401\n ImageToTextEarlyStoppingEnum, # noqa: F401\n ImageToTextGenerationParameters, # noqa: F401\n ImageToTextInput, # noqa: F401\n ImageToTextOutput, # noqa: F401\n ImageToTextParameters, # noqa: F401\n ObjectDetectionBoundingBox, # noqa: F401\n ObjectDetectionInput, # noqa: F401\n ObjectDetectionOutputElement, # noqa: F401\n ObjectDetectionParameters, # noqa: F401\n Padding, # noqa: F401\n QuestionAnsweringInput, # noqa: F401\n QuestionAnsweringInputData, # noqa: F401\n QuestionAnsweringOutputElement, # noqa: F401\n QuestionAnsweringParameters, # noqa: F401\n SentenceSimilarityInput, # noqa: F401\n SentenceSimilarityInputData, # noqa: F401\n SummarizationInput, # noqa: F401\n SummarizationOutput, # noqa: F401\n SummarizationParameters, # noqa: F401\n SummarizationTruncationStrategy, # noqa: F401\n TableQuestionAnsweringInput, # noqa: F401\n TableQuestionAnsweringInputData, # noqa: F401\n TableQuestionAnsweringOutputElement, # noqa: F401\n TableQuestionAnsweringParameters, # noqa: F401\n Text2TextGenerationInput, # noqa: F401\n Text2TextGenerationOutput, # noqa: F401\n Text2TextGenerationParameters, # noqa: F401\n Text2TextGenerationTruncationStrategy, # noqa: F401\n TextClassificationInput, # noqa: F401\n TextClassificationOutputElement, # noqa: F401\n TextClassificationOutputTransform, # noqa: F401\n TextClassificationParameters, # noqa: F401\n TextGenerationInput, # noqa: F401\n TextGenerationInputGenerateParameters, # noqa: F401\n TextGenerationInputGrammarType, # noqa: F401\n TextGenerationOutput, # noqa: F401\n TextGenerationOutputBestOfSequence, # noqa: F401\n TextGenerationOutputDetails, # noqa: F401\n TextGenerationOutputFinishReason, # noqa: F401\n TextGenerationOutputPrefillToken, # noqa: F401\n TextGenerationOutputToken, # noqa: F401\n TextGenerationStreamOutput, # noqa: F401\n TextGenerationStreamOutputStreamDetails, # noqa: F401\n TextGenerationStreamOutputToken, # noqa: F401\n TextToAudioEarlyStoppingEnum, # noqa: F401\n TextToAudioGenerationParameters, # noqa: F401\n TextToAudioInput, # noqa: F401\n TextToAudioOutput, # noqa: F401\n TextToAudioParameters, # noqa: F401\n TextToImageInput, # noqa: F401\n TextToImageOutput, # noqa: F401\n TextToImageParameters, # noqa: F401\n TextToSpeechEarlyStoppingEnum, # noqa: F401\n TextToSpeechGenerationParameters, # noqa: F401\n TextToSpeechInput, # noqa: F401\n TextToSpeechOutput, # noqa: F401\n TextToSpeechParameters, # noqa: F401\n TextToVideoInput, # noqa: F401\n TextToVideoOutput, # noqa: F401\n TextToVideoParameters, # noqa: F401\n TokenClassificationAggregationStrategy, # noqa: F401\n TokenClassificationInput, # noqa: F401\n TokenClassificationOutputElement, # noqa: F401\n TokenClassificationParameters, # noqa: F401\n TranslationInput, # noqa: F401\n TranslationOutput, # noqa: F401\n TranslationParameters, # noqa: F401\n TranslationTruncationStrategy, # noqa: F401\n TypeEnum, # noqa: F401\n VideoClassificationInput, # noqa: F401\n VideoClassificationOutputElement, # noqa: F401\n VideoClassificationOutputTransform, # noqa: F401\n VideoClassificationParameters, # noqa: F401\n VisualQuestionAnsweringInput, # noqa: F401\n VisualQuestionAnsweringInputData, # noqa: F401\n VisualQuestionAnsweringOutputElement, # noqa: F401\n VisualQuestionAnsweringParameters, # noqa: F401\n ZeroShotClassificationInput, # noqa: F401\n ZeroShotClassificationOutputElement, # noqa: F401\n ZeroShotClassificationParameters, # noqa: F401\n ZeroShotImageClassificationInput, # noqa: F401\n ZeroShotImageClassificationOutputElement, # noqa: F401\n ZeroShotImageClassificationParameters, # noqa: F401\n ZeroShotObjectDetectionBoundingBox, # noqa: F401\n ZeroShotObjectDetectionInput, # noqa: F401\n ZeroShotObjectDetectionOutputElement, # noqa: F401\n ZeroShotObjectDetectionParameters, # noqa: F401\n )\n from .inference._mcp.agent import Agent # noqa: F401\n from .inference._mcp.mcp_client import MCPClient # noqa: F401\n from .inference_api import InferenceApi # noqa: F401\n from .keras_mixin import (\n KerasModelHubMixin, # noqa: F401\n from_pretrained_keras, # noqa: F401\n push_to_hub_keras, # noqa: F401\n save_pretrained_keras, # noqa: F401\n )\n from .repocard import (\n DatasetCard, # noqa: F401\n ModelCard, # noqa: F401\n RepoCard, # noqa: F401\n SpaceCard, # noqa: F401\n metadata_eval_result, # noqa: F401\n metadata_load, # noqa: F401\n metadata_save, # noqa: F401\n metadata_update, # noqa: F401\n )\n from .repocard_data import (\n CardData, # noqa: F401\n DatasetCardData, # noqa: F401\n EvalResult, # noqa: F401\n ModelCardData, # noqa: F401\n SpaceCardData, # noqa: F401\n )\n from .repository import Repository # noqa: F401\n from .serialization import (\n StateDictSplit, # noqa: F401\n get_tf_storage_size, # noqa: F401\n get_torch_storage_id, # noqa: F401\n get_torch_storage_size, # noqa: F401\n load_state_dict_from_file, # noqa: F401\n load_torch_model, # noqa: F401\n save_torch_model, # noqa: F401\n save_torch_state_dict, # noqa: F401\n split_state_dict_into_shards_factory, # noqa: F401\n split_tf_state_dict_into_shards, # noqa: F401\n split_torch_state_dict_into_shards, # noqa: F401\n )\n from .serialization._dduf import (\n DDUFEntry, # noqa: F401\n export_entries_as_dduf, # noqa: F401\n export_folder_as_dduf, # noqa: F401\n read_dduf_file, # noqa: F401\n )\n from .utils import (\n CachedFileInfo, # noqa: F401\n CachedRepoInfo, # noqa: F401\n CachedRevisionInfo, # noqa: F401\n CacheNotFound, # noqa: F401\n CorruptedCacheException, # noqa: F401\n DeleteCacheStrategy, # noqa: F401\n HFCacheInfo, # noqa: F401\n HfFolder, # noqa: F401\n cached_assets_path, # noqa: F401\n configure_http_backend, # noqa: F401\n dump_environment_info, # noqa: F401\n get_session, # noqa: F401\n get_token, # noqa: F401\n logging, # noqa: F401\n scan_cache_dir, # noqa: F401\n )\n
|
.venv\Lib\site-packages\huggingface_hub\__init__.py
|
__init__.py
|
Python
| 50,644 | 0.75 | 0.011456 | 0.046703 |
node-utils
| 892 |
2025-03-26T09:34:51.867140
|
BSD-3-Clause
| false |
7b36434f13ba599de2bd999b1d27ffa2
|
# coding=utf-8\n# Copyright 2023-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains command to download files from the Hub with the CLI.\n\nUsage:\n huggingface-cli download --help\n\n # Download file\n huggingface-cli download gpt2 config.json\n\n # Download entire repo\n huggingface-cli download fffiloni/zeroscope --repo-type=space --revision=refs/pr/78\n\n # Download repo with filters\n huggingface-cli download gpt2 --include="*.safetensors"\n\n # Download with token\n huggingface-cli download Wauplin/private-model --token=hf_***\n\n # Download quietly (no progress bar, no warnings, only the returned path)\n huggingface-cli download gpt2 config.json --quiet\n\n # Download to local dir\n huggingface-cli download gpt2 --local-dir=./models/gpt2\n"""\n\nimport warnings\nfrom argparse import Namespace, _SubParsersAction\nfrom typing import List, Optional\n\nfrom huggingface_hub import logging\nfrom huggingface_hub._snapshot_download import snapshot_download\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.file_download import hf_hub_download\nfrom huggingface_hub.utils import disable_progress_bars, enable_progress_bars\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass DownloadCommand(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n download_parser = parser.add_parser("download", help="Download files from the Hub")\n download_parser.add_argument(\n "repo_id", type=str, help="ID of the repo to download from (e.g. `username/repo-name`)."\n )\n download_parser.add_argument(\n "filenames", type=str, nargs="*", help="Files to download (e.g. `config.json`, `data/metadata.jsonl`)."\n )\n download_parser.add_argument(\n "--repo-type",\n choices=["model", "dataset", "space"],\n default="model",\n help="Type of repo to download from (defaults to 'model').",\n )\n download_parser.add_argument(\n "--revision",\n type=str,\n help="An optional Git revision id which can be a branch name, a tag, or a commit hash.",\n )\n download_parser.add_argument(\n "--include", nargs="*", type=str, help="Glob patterns to match files to download."\n )\n download_parser.add_argument(\n "--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to download."\n )\n download_parser.add_argument(\n "--cache-dir", type=str, help="Path to the directory where to save the downloaded files."\n )\n download_parser.add_argument(\n "--local-dir",\n type=str,\n help=(\n "If set, the downloaded file will be placed under this directory. Check out"\n " https://huggingface.co/docs/huggingface_hub/guides/download#download-files-to-local-folder for more"\n " details."\n ),\n )\n download_parser.add_argument(\n "--local-dir-use-symlinks",\n choices=["auto", "True", "False"],\n help=("Deprecated and ignored. Downloading to a local directory does not use symlinks anymore."),\n )\n download_parser.add_argument(\n "--force-download",\n action="store_true",\n help="If True, the files will be downloaded even if they are already cached.",\n )\n download_parser.add_argument(\n "--resume-download",\n action="store_true",\n help="Deprecated and ignored. Downloading a file to local dir always attempts to resume previously interrupted downloads (unless hf-transfer is enabled).",\n )\n download_parser.add_argument(\n "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"\n )\n download_parser.add_argument(\n "--quiet",\n action="store_true",\n help="If True, progress bars are disabled and only the path to the download files is printed.",\n )\n download_parser.add_argument(\n "--max-workers",\n type=int,\n default=8,\n help="Maximum number of workers to use for downloading files. Default is 8.",\n )\n download_parser.set_defaults(func=DownloadCommand)\n\n def __init__(self, args: Namespace) -> None:\n self.token = args.token\n self.repo_id: str = args.repo_id\n self.filenames: List[str] = args.filenames\n self.repo_type: str = args.repo_type\n self.revision: Optional[str] = args.revision\n self.include: Optional[List[str]] = args.include\n self.exclude: Optional[List[str]] = args.exclude\n self.cache_dir: Optional[str] = args.cache_dir\n self.local_dir: Optional[str] = args.local_dir\n self.force_download: bool = args.force_download\n self.resume_download: Optional[bool] = args.resume_download or None\n self.quiet: bool = args.quiet\n self.max_workers: int = args.max_workers\n\n if args.local_dir_use_symlinks is not None:\n warnings.warn(\n "Ignoring --local-dir-use-symlinks. Downloading to a local directory does not use symlinks anymore.",\n FutureWarning,\n )\n\n def run(self) -> None:\n if self.quiet:\n disable_progress_bars()\n with warnings.catch_warnings():\n warnings.simplefilter("ignore")\n print(self._download()) # Print path to downloaded files\n enable_progress_bars()\n else:\n logging.set_verbosity_info()\n print(self._download()) # Print path to downloaded files\n logging.set_verbosity_warning()\n\n def _download(self) -> str:\n # Warn user if patterns are ignored\n if len(self.filenames) > 0:\n if self.include is not None and len(self.include) > 0:\n warnings.warn("Ignoring `--include` since filenames have being explicitly set.")\n if self.exclude is not None and len(self.exclude) > 0:\n warnings.warn("Ignoring `--exclude` since filenames have being explicitly set.")\n\n # Single file to download: use `hf_hub_download`\n if len(self.filenames) == 1:\n return hf_hub_download(\n repo_id=self.repo_id,\n repo_type=self.repo_type,\n revision=self.revision,\n filename=self.filenames[0],\n cache_dir=self.cache_dir,\n resume_download=self.resume_download,\n force_download=self.force_download,\n token=self.token,\n local_dir=self.local_dir,\n library_name="huggingface-cli",\n )\n\n # Otherwise: use `snapshot_download` to ensure all files comes from same revision\n elif len(self.filenames) == 0:\n allow_patterns = self.include\n ignore_patterns = self.exclude\n else:\n allow_patterns = self.filenames\n ignore_patterns = None\n\n return snapshot_download(\n repo_id=self.repo_id,\n repo_type=self.repo_type,\n revision=self.revision,\n allow_patterns=allow_patterns,\n ignore_patterns=ignore_patterns,\n resume_download=self.resume_download,\n force_download=self.force_download,\n cache_dir=self.cache_dir,\n token=self.token,\n local_dir=self.local_dir,\n library_name="huggingface-cli",\n max_workers=self.max_workers,\n )\n
|
.venv\Lib\site-packages\huggingface_hub\commands\download.py
|
download.py
|
Python
| 8,183 | 0.95 | 0.08 | 0.127778 |
python-kit
| 984 |
2025-06-29T08:15:40.524559
|
BSD-3-Clause
| false |
234baae7222c61290f9567d16ff2746c
|
# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains command to print information about the environment.\n\nUsage:\n huggingface-cli env\n"""\n\nfrom argparse import _SubParsersAction\n\nfrom ..utils import dump_environment_info\nfrom . import BaseHuggingfaceCLICommand\n\n\nclass EnvironmentCommand(BaseHuggingfaceCLICommand):\n def __init__(self, args):\n self.args = args\n\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n env_parser = parser.add_parser("env", help="Print information about the environment.")\n env_parser.set_defaults(func=EnvironmentCommand)\n\n def run(self) -> None:\n dump_environment_info()\n
|
.venv\Lib\site-packages\huggingface_hub\commands\env.py
|
env.py
|
Python
| 1,226 | 0.95 | 0.138889 | 0.448276 |
awesome-app
| 778 |
2023-08-06T06:44:52.616238
|
Apache-2.0
| false |
112916d995d6009cec517109fa0def19
|
# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom argparse import ArgumentParser\n\nfrom huggingface_hub.commands.delete_cache import DeleteCacheCommand\nfrom huggingface_hub.commands.download import DownloadCommand\nfrom huggingface_hub.commands.env import EnvironmentCommand\nfrom huggingface_hub.commands.lfs import LfsCommands\nfrom huggingface_hub.commands.repo import RepoCommands\nfrom huggingface_hub.commands.repo_files import RepoFilesCommand\nfrom huggingface_hub.commands.scan_cache import ScanCacheCommand\nfrom huggingface_hub.commands.tag import TagCommands\nfrom huggingface_hub.commands.upload import UploadCommand\nfrom huggingface_hub.commands.upload_large_folder import UploadLargeFolderCommand\nfrom huggingface_hub.commands.user import UserCommands\nfrom huggingface_hub.commands.version import VersionCommand\n\n\ndef main():\n parser = ArgumentParser("huggingface-cli", usage="huggingface-cli <command> [<args>]")\n commands_parser = parser.add_subparsers(help="huggingface-cli command helpers")\n\n # Register commands\n DownloadCommand.register_subcommand(commands_parser)\n UploadCommand.register_subcommand(commands_parser)\n RepoFilesCommand.register_subcommand(commands_parser)\n EnvironmentCommand.register_subcommand(commands_parser)\n UserCommands.register_subcommand(commands_parser)\n RepoCommands.register_subcommand(commands_parser)\n LfsCommands.register_subcommand(commands_parser)\n ScanCacheCommand.register_subcommand(commands_parser)\n DeleteCacheCommand.register_subcommand(commands_parser)\n TagCommands.register_subcommand(commands_parser)\n VersionCommand.register_subcommand(commands_parser)\n\n # Experimental\n UploadLargeFolderCommand.register_subcommand(commands_parser)\n\n # Let's go\n args = parser.parse_args()\n if not hasattr(args, "func"):\n parser.print_help()\n exit(1)\n\n # Run\n service = args.func(args)\n service.run()\n\n\nif __name__ == "__main__":\n main()\n
|
.venv\Lib\site-packages\huggingface_hub\commands\huggingface_cli.py
|
huggingface_cli.py
|
Python
| 2,523 | 0.95 | 0.063492 | 0.320755 |
vue-tools
| 849 |
2024-05-11T01:50:00.418053
|
MIT
| false |
f3ece60f8a55feaedfef921a96f9950e
|
"""\nImplementation of a custom transfer agent for the transfer type "multipart" for\ngit-lfs.\n\nInspired by:\ngithub.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py\n\nSpec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md\n\n\nTo launch debugger while developing:\n\n``` [lfs "customtransfer.multipart"]\npath = /path/to/huggingface_hub/.env/bin/python args = -m debugpy --listen 5678\n--wait-for-client\n/path/to/huggingface_hub/src/huggingface_hub/commands/huggingface_cli.py\nlfs-multipart-upload ```"""\n\nimport json\nimport os\nimport subprocess\nimport sys\nfrom argparse import _SubParsersAction\nfrom typing import Dict, List, Optional\n\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.lfs import LFS_MULTIPART_UPLOAD_COMMAND\n\nfrom ..utils import get_session, hf_raise_for_status, logging\nfrom ..utils._lfs import SliceFileObj\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass LfsCommands(BaseHuggingfaceCLICommand):\n """\n Implementation of a custom transfer agent for the transfer type "multipart"\n for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom\n transfer agent is:\n https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md\n\n This introduces two commands to the CLI:\n\n 1. $ huggingface-cli lfs-enable-largefiles\n\n This should be executed once for each model repo that contains a model file\n >5GB. It's documented in the error message you get if you just try to git\n push a 5GB file without having enabled it before.\n\n 2. $ huggingface-cli lfs-multipart-upload\n\n This command is called by lfs directly and is not meant to be called by the\n user.\n """\n\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n enable_parser = parser.add_parser(\n "lfs-enable-largefiles", help="Configure your repository to enable upload of files > 5GB."\n )\n enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")\n enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))\n\n # Command will get called by git-lfs, do not call it directly.\n upload_parser = parser.add_parser(LFS_MULTIPART_UPLOAD_COMMAND, add_help=False)\n upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))\n\n\nclass LfsEnableCommand:\n def __init__(self, args):\n self.args = args\n\n def run(self):\n local_path = os.path.abspath(self.args.path)\n if not os.path.isdir(local_path):\n print("This does not look like a valid git repo.")\n exit(1)\n subprocess.run(\n "git config lfs.customtransfer.multipart.path huggingface-cli".split(),\n check=True,\n cwd=local_path,\n )\n subprocess.run(\n f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),\n check=True,\n cwd=local_path,\n )\n print("Local repo set up for largefiles")\n\n\ndef write_msg(msg: Dict):\n """Write out the message in Line delimited JSON."""\n msg_str = json.dumps(msg) + "\n"\n sys.stdout.write(msg_str)\n sys.stdout.flush()\n\n\ndef read_msg() -> Optional[Dict]:\n """Read Line delimited JSON from stdin."""\n msg = json.loads(sys.stdin.readline().strip())\n\n if "terminate" in (msg.get("type"), msg.get("event")):\n # terminate message received\n return None\n\n if msg.get("event") not in ("download", "upload"):\n logger.critical("Received unexpected message")\n sys.exit(1)\n\n return msg\n\n\nclass LfsUploadCommand:\n def __init__(self, args) -> None:\n self.args = args\n\n def run(self) -> None:\n # Immediately after invoking a custom transfer process, git-lfs\n # sends initiation data to the process over stdin.\n # This tells the process useful information about the configuration.\n init_msg = json.loads(sys.stdin.readline().strip())\n if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):\n write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})\n sys.exit(1)\n\n # The transfer process should use the information it needs from the\n # initiation structure, and also perform any one-off setup tasks it\n # needs to do. It should then respond on stdout with a simple empty\n # confirmation structure, as follows:\n write_msg({})\n\n # After the initiation exchange, git-lfs will send any number of\n # transfer requests to the stdin of the transfer process, in a serial sequence.\n while True:\n msg = read_msg()\n if msg is None:\n # When all transfers have been processed, git-lfs will send\n # a terminate event to the stdin of the transfer process.\n # On receiving this message the transfer process should\n # clean up and terminate. No response is expected.\n sys.exit(0)\n\n oid = msg["oid"]\n filepath = msg["path"]\n completion_url = msg["action"]["href"]\n header = msg["action"]["header"]\n chunk_size = int(header.pop("chunk_size"))\n presigned_urls: List[str] = list(header.values())\n\n # Send a "started" progress event to allow other workers to start.\n # Otherwise they're delayed until first "progress" event is reported,\n # i.e. after the first 5GB by default (!)\n write_msg(\n {\n "event": "progress",\n "oid": oid,\n "bytesSoFar": 1,\n "bytesSinceLast": 0,\n }\n )\n\n parts = []\n with open(filepath, "rb") as file:\n for i, presigned_url in enumerate(presigned_urls):\n with SliceFileObj(\n file,\n seek_from=i * chunk_size,\n read_limit=chunk_size,\n ) as data:\n r = get_session().put(presigned_url, data=data)\n hf_raise_for_status(r)\n parts.append(\n {\n "etag": r.headers.get("etag"),\n "partNumber": i + 1,\n }\n )\n # In order to support progress reporting while data is uploading / downloading,\n # the transfer process should post messages to stdout\n write_msg(\n {\n "event": "progress",\n "oid": oid,\n "bytesSoFar": (i + 1) * chunk_size,\n "bytesSinceLast": chunk_size,\n }\n )\n # Not precise but that's ok.\n\n r = get_session().post(\n completion_url,\n json={\n "oid": oid,\n "parts": parts,\n },\n )\n hf_raise_for_status(r)\n\n write_msg({"event": "complete", "oid": oid})\n
|
.venv\Lib\site-packages\huggingface_hub\commands\lfs.py
|
lfs.py
|
Python
| 7,342 | 0.95 | 0.145 | 0.130435 |
react-lib
| 313 |
2024-07-25T13:40:00.875812
|
BSD-3-Clause
| false |
b8bcf310983b8b03349b77e6de959bd7
|
# Copyright 2025 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains commands to interact with repositories on the Hugging Face Hub.\n\nUsage:\n # create a new dataset repo on the Hub\n huggingface-cli repo create my-cool-dataset --repo-type=dataset\n\n # create a private model repo on the Hub\n huggingface-cli repo create my-cool-model --private\n"""\n\nimport argparse\nfrom argparse import _SubParsersAction\nfrom typing import Optional\n\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.commands._cli_utils import ANSI\nfrom huggingface_hub.constants import SPACES_SDK_TYPES\nfrom huggingface_hub.hf_api import HfApi\nfrom huggingface_hub.utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass RepoCommands(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n repo_parser = parser.add_parser("repo", help="{create} Commands to interact with your huggingface.co repos.")\n repo_subparsers = repo_parser.add_subparsers(help="huggingface.co repos related commands")\n repo_create_parser = repo_subparsers.add_parser("create", help="Create a new repo on huggingface.co")\n repo_create_parser.add_argument(\n "repo_id",\n type=str,\n help="The ID of the repo to create to (e.g. `username/repo-name`). The username is optional and will be set to your username if not provided.",\n )\n repo_create_parser.add_argument(\n "--repo-type",\n type=str,\n help='Optional: set to "dataset" or "space" if creating a dataset or space, default is model.',\n )\n repo_create_parser.add_argument(\n "--space_sdk",\n type=str,\n help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".',\n choices=SPACES_SDK_TYPES,\n )\n repo_create_parser.add_argument(\n "--private",\n action="store_true",\n help="Whether to create a private repository. Defaults to public unless the organization's default is private.",\n )\n repo_create_parser.add_argument(\n "--token",\n type=str,\n help="Hugging Face token. Will default to the locally saved token if not provided.",\n )\n repo_create_parser.add_argument(\n "--exist-ok",\n action="store_true",\n help="Do not raise an error if repo already exists.",\n )\n repo_create_parser.add_argument(\n "--resource-group-id",\n type=str,\n help="Resource group in which to create the repo. Resource groups is only available for Enterprise Hub organizations.",\n )\n repo_create_parser.add_argument(\n "--type",\n type=str,\n help="[Deprecated]: use --repo-type instead.",\n )\n repo_create_parser.add_argument(\n "-y",\n "--yes",\n action="store_true",\n help="[Deprecated] no effect.",\n )\n repo_create_parser.add_argument(\n "--organization", type=str, help="[Deprecated] Pass the organization namespace directly in the repo_id."\n )\n repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))\n\n\nclass RepoCreateCommand:\n def __init__(self, args: argparse.Namespace):\n self.repo_id: str = args.repo_id\n self.repo_type: Optional[str] = args.repo_type or args.type\n self.space_sdk: Optional[str] = args.space_sdk\n self.organization: Optional[str] = args.organization\n self.yes: bool = args.yes\n self.private: bool = args.private\n self.token: Optional[str] = args.token\n self.exist_ok: bool = args.exist_ok\n self.resource_group_id: Optional[str] = args.resource_group_id\n\n if args.type is not None:\n print(\n ANSI.yellow(\n "The --type argument is deprecated and will be removed in a future version. Use --repo-type instead."\n )\n )\n if self.organization is not None:\n print(\n ANSI.yellow(\n "The --organization argument is deprecated and will be removed in a future version. Pass the organization namespace directly in the repo_id."\n )\n )\n if self.yes:\n print(\n ANSI.yellow(\n "The --yes argument is deprecated and will be removed in a future version. It does not have any effect."\n )\n )\n\n self._api = HfApi()\n\n def run(self):\n if self.organization is not None:\n if "/" in self.repo_id:\n print(ANSI.red("You cannot pass both --organization and a repo_id with a namespace."))\n exit(1)\n self.repo_id = f"{self.organization}/{self.repo_id}"\n\n repo_url = self._api.create_repo(\n repo_id=self.repo_id,\n repo_type=self.repo_type,\n private=self.private,\n token=self.token,\n exist_ok=self.exist_ok,\n resource_group_id=self.resource_group_id,\n space_sdk=self.space_sdk,\n )\n print(f"Successfully created {ANSI.bold(repo_url.repo_id)} on the Hub.")\n print(f"Your repo is now available at {ANSI.bold(repo_url)}")\n
|
.venv\Lib\site-packages\huggingface_hub\commands\repo.py
|
repo.py
|
Python
| 5,923 | 0.95 | 0.108844 | 0.112782 |
vue-tools
| 439 |
2025-05-20T01:41:57.908988
|
GPL-3.0
| false |
3011ab212f7f289794c68d84b05ecd5b
|
# coding=utf-8\n# Copyright 2023-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains command to update or delete files in a repository using the CLI.\n\nUsage:\n # delete all\n huggingface-cli repo-files <repo_id> delete "*"\n\n # delete single file\n huggingface-cli repo-files <repo_id> delete file.txt\n\n # delete single folder\n huggingface-cli repo-files <repo_id> delete folder/\n\n # delete multiple\n huggingface-cli repo-files <repo_id> delete file.txt folder/ file2.txt\n\n # delete multiple patterns\n huggingface-cli repo-files <repo_id> delete file.txt "*.json" "folder/*.parquet"\n\n # delete from different revision / repo-type\n huggingface-cli repo-files <repo_id> delete file.txt --revision=refs/pr/1 --repo-type=dataset\n"""\n\nfrom argparse import _SubParsersAction\nfrom typing import List, Optional\n\nfrom huggingface_hub import logging\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.hf_api import HfApi\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass DeleteFilesSubCommand:\n def __init__(self, args) -> None:\n self.args = args\n self.repo_id: str = args.repo_id\n self.repo_type: Optional[str] = args.repo_type\n self.revision: Optional[str] = args.revision\n self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli")\n self.patterns: List[str] = args.patterns\n self.commit_message: Optional[str] = args.commit_message\n self.commit_description: Optional[str] = args.commit_description\n self.create_pr: bool = args.create_pr\n self.token: Optional[str] = args.token\n\n def run(self) -> None:\n logging.set_verbosity_info()\n url = self.api.delete_files(\n delete_patterns=self.patterns,\n repo_id=self.repo_id,\n repo_type=self.repo_type,\n revision=self.revision,\n commit_message=self.commit_message,\n commit_description=self.commit_description,\n create_pr=self.create_pr,\n )\n print(f"Files correctly deleted from repo. Commit: {url}.")\n logging.set_verbosity_warning()\n\n\nclass RepoFilesCommand(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n repo_files_parser = parser.add_parser("repo-files", help="Manage files in a repo on the Hub")\n repo_files_parser.add_argument(\n "repo_id", type=str, help="The ID of the repo to manage (e.g. `username/repo-name`)."\n )\n repo_files_subparsers = repo_files_parser.add_subparsers(\n help="Action to execute against the files.",\n required=True,\n )\n delete_subparser = repo_files_subparsers.add_parser(\n "delete",\n help="Delete files from a repo on the Hub",\n )\n delete_subparser.set_defaults(func=lambda args: DeleteFilesSubCommand(args))\n delete_subparser.add_argument(\n "patterns",\n nargs="+",\n type=str,\n help="Glob patterns to match files to delete.",\n )\n delete_subparser.add_argument(\n "--repo-type",\n choices=["model", "dataset", "space"],\n default="model",\n help="Type of the repo to upload to (e.g. `dataset`).",\n )\n delete_subparser.add_argument(\n "--revision",\n type=str,\n help=(\n "An optional Git revision to push to. It can be a branch name "\n "or a PR reference. If revision does not"\n " exist and `--create-pr` is not set, a branch will be automatically created."\n ),\n )\n delete_subparser.add_argument(\n "--commit-message", type=str, help="The summary / title / first line of the generated commit."\n )\n delete_subparser.add_argument(\n "--commit-description", type=str, help="The description of the generated commit."\n )\n delete_subparser.add_argument(\n "--create-pr", action="store_true", help="Whether to create a new Pull Request for these changes."\n )\n repo_files_parser.add_argument(\n "--token",\n type=str,\n help="A User Access Token generated from https://huggingface.co/settings/tokens",\n )\n\n repo_files_parser.set_defaults(func=RepoFilesCommand)\n
|
.venv\Lib\site-packages\huggingface_hub\commands\repo_files.py
|
repo_files.py
|
Python
| 4,923 | 0.95 | 0.054688 | 0.178571 |
python-kit
| 763 |
2025-04-22T00:23:10.125933
|
MIT
| false |
b93fa6f2cb8983e2c004e5bd30c0f67f
|
# coding=utf-8\n# Copyright 2022-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains command to scan the HF cache directory.\n\nUsage:\n huggingface-cli scan-cache\n huggingface-cli scan-cache -v\n huggingface-cli scan-cache -vvv\n huggingface-cli scan-cache --dir ~/.cache/huggingface/hub\n"""\n\nimport time\nfrom argparse import Namespace, _SubParsersAction\nfrom typing import Optional\n\nfrom ..utils import CacheNotFound, HFCacheInfo, scan_cache_dir\nfrom . import BaseHuggingfaceCLICommand\nfrom ._cli_utils import ANSI, tabulate\n\n\nclass ScanCacheCommand(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n scan_cache_parser = parser.add_parser("scan-cache", help="Scan cache directory.")\n\n scan_cache_parser.add_argument(\n "--dir",\n type=str,\n default=None,\n help="cache directory to scan (optional). Default to the default HuggingFace cache.",\n )\n scan_cache_parser.add_argument(\n "-v",\n "--verbose",\n action="count",\n default=0,\n help="show a more verbose output",\n )\n scan_cache_parser.set_defaults(func=ScanCacheCommand)\n\n def __init__(self, args: Namespace) -> None:\n self.verbosity: int = args.verbose\n self.cache_dir: Optional[str] = args.dir\n\n def run(self):\n try:\n t0 = time.time()\n hf_cache_info = scan_cache_dir(self.cache_dir)\n t1 = time.time()\n except CacheNotFound as exc:\n cache_dir = exc.cache_dir\n print(f"Cache directory not found: {cache_dir}")\n return\n\n self._print_hf_cache_info_as_table(hf_cache_info)\n\n print(\n f"\nDone in {round(t1 - t0, 1)}s. Scanned {len(hf_cache_info.repos)} repo(s)"\n f" for a total of {ANSI.red(hf_cache_info.size_on_disk_str)}."\n )\n if len(hf_cache_info.warnings) > 0:\n message = f"Got {len(hf_cache_info.warnings)} warning(s) while scanning."\n if self.verbosity >= 3:\n print(ANSI.gray(message))\n for warning in hf_cache_info.warnings:\n print(ANSI.gray(warning))\n else:\n print(ANSI.gray(message + " Use -vvv to print details."))\n\n def _print_hf_cache_info_as_table(self, hf_cache_info: HFCacheInfo) -> None:\n print(get_table(hf_cache_info, verbosity=self.verbosity))\n\n\ndef get_table(hf_cache_info: HFCacheInfo, *, verbosity: int = 0) -> str:\n """Generate a table from the [`HFCacheInfo`] object.\n\n Pass `verbosity=0` to get a table with a single row per repo, with columns\n "repo_id", "repo_type", "size_on_disk", "nb_files", "last_accessed", "last_modified", "refs", "local_path".\n\n Pass `verbosity=1` to get a table with a row per repo and revision (thus multiple rows can appear for a single repo), with columns\n "repo_id", "repo_type", "revision", "size_on_disk", "nb_files", "last_modified", "refs", "local_path".\n\n Example:\n ```py\n >>> from huggingface_hub.utils import scan_cache_dir\n >>> from huggingface_hub.commands.scan_cache import get_table\n\n >>> hf_cache_info = scan_cache_dir()\n HFCacheInfo(...)\n\n >>> print(get_table(hf_cache_info, verbosity=0))\n REPO ID REPO TYPE SIZE ON DISK NB FILES LAST_ACCESSED LAST_MODIFIED REFS LOCAL PATH\n --------------------------------------------------- --------- ------------ -------- ------------- ------------- ---- --------------------------------------------------------------------------------------------------\n roberta-base model 2.7M 5 1 day ago 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--roberta-base\n suno/bark model 8.8K 1 1 week ago 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--suno--bark\n t5-base model 893.8M 4 4 days ago 7 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-base\n t5-large model 3.0G 4 5 weeks ago 5 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-large\n\n >>> print(get_table(hf_cache_info, verbosity=1))\n REPO ID REPO TYPE REVISION SIZE ON DISK NB FILES LAST_MODIFIED REFS LOCAL PATH\n --------------------------------------------------- --------- ---------------------------------------- ------------ -------- ------------- ---- -----------------------------------------------------------------------------------------------------------------------------------------------------\n roberta-base model e2da8e2f811d1448a5b465c236feacd80ffbac7b 2.7M 5 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--roberta-base\\snapshots\\e2da8e2f811d1448a5b465c236feacd80ffbac7b\n suno/bark model 70a8a7d34168586dc5d028fa9666aceade177992 8.8K 1 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--suno--bark\\snapshots\\70a8a7d34168586dc5d028fa9666aceade177992\n t5-base model a9723ea7f1b39c1eae772870f3b547bf6ef7e6c1 893.8M 4 7 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-base\\snapshots\\a9723ea7f1b39c1eae772870f3b547bf6ef7e6c1\n t5-large model 150ebc2c4b72291e770f58e6057481c8d2ed331a 3.0G 4 5 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-large\\snapshots\\150ebc2c4b72291e770f58e6057481c8d2ed331a ```\n ```\n\n Args:\n hf_cache_info ([`HFCacheInfo`]):\n The HFCacheInfo object to print.\n verbosity (`int`, *optional*):\n The verbosity level. Defaults to 0.\n\n Returns:\n `str`: The table as a string.\n """\n if verbosity == 0:\n return tabulate(\n rows=[\n [\n repo.repo_id,\n repo.repo_type,\n "{:>12}".format(repo.size_on_disk_str),\n repo.nb_files,\n repo.last_accessed_str,\n repo.last_modified_str,\n ", ".join(sorted(repo.refs)),\n str(repo.repo_path),\n ]\n for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path)\n ],\n headers=[\n "REPO ID",\n "REPO TYPE",\n "SIZE ON DISK",\n "NB FILES",\n "LAST_ACCESSED",\n "LAST_MODIFIED",\n "REFS",\n "LOCAL PATH",\n ],\n )\n else:\n return tabulate(\n rows=[\n [\n repo.repo_id,\n repo.repo_type,\n revision.commit_hash,\n "{:>12}".format(revision.size_on_disk_str),\n revision.nb_files,\n revision.last_modified_str,\n ", ".join(sorted(revision.refs)),\n str(revision.snapshot_path),\n ]\n for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path)\n for revision in sorted(repo.revisions, key=lambda revision: revision.commit_hash)\n ],\n headers=[\n "REPO ID",\n "REPO TYPE",\n "REVISION",\n "SIZE ON DISK",\n "NB FILES",\n "LAST_MODIFIED",\n "REFS",\n "LOCAL PATH",\n ],\n )\n
|
.venv\Lib\site-packages\huggingface_hub\commands\scan_cache.py
|
scan_cache.py
|
Python
| 8,563 | 0.95 | 0.099448 | 0.0875 |
awesome-app
| 976 |
2025-02-03T17:20:50.032138
|
BSD-3-Clause
| false |
ee770b5f0f4dc7af717b82ae7e1ad225
|
# coding=utf-8\n# Copyright 2024-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n"""Contains commands to perform tag management with the CLI.\n\nUsage Examples:\n - Create a tag:\n $ huggingface-cli tag user/my-model 1.0 --message "First release"\n $ huggingface-cli tag user/my-model 1.0 -m "First release" --revision develop\n $ huggingface-cli tag user/my-dataset 1.0 -m "First release" --repo-type dataset\n $ huggingface-cli tag user/my-space 1.0\n - List all tags:\n $ huggingface-cli tag -l user/my-model\n $ huggingface-cli tag --list user/my-dataset --repo-type dataset\n - Delete a tag:\n $ huggingface-cli tag -d user/my-model 1.0\n $ huggingface-cli tag --delete user/my-dataset 1.0 --repo-type dataset\n $ huggingface-cli tag -d user/my-space 1.0 -y\n"""\n\nfrom argparse import Namespace, _SubParsersAction\n\nfrom requests.exceptions import HTTPError\n\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.constants import (\n REPO_TYPES,\n)\nfrom huggingface_hub.hf_api import HfApi\n\nfrom ..errors import HfHubHTTPError, RepositoryNotFoundError, RevisionNotFoundError\nfrom ._cli_utils import ANSI\n\n\nclass TagCommands(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n tag_parser = parser.add_parser("tag", help="(create, list, delete) tags for a repo in the hub")\n\n tag_parser.add_argument("repo_id", type=str, help="The ID of the repo to tag (e.g. `username/repo-name`).")\n tag_parser.add_argument("tag", nargs="?", type=str, help="The name of the tag for creation or deletion.")\n tag_parser.add_argument("-m", "--message", type=str, help="The description of the tag to create.")\n tag_parser.add_argument("--revision", type=str, help="The git revision to tag.")\n tag_parser.add_argument(\n "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens."\n )\n tag_parser.add_argument(\n "--repo-type",\n choices=["model", "dataset", "space"],\n default="model",\n help="Set the type of repository (model, dataset, or space).",\n )\n tag_parser.add_argument("-y", "--yes", action="store_true", help="Answer Yes to prompts automatically.")\n\n tag_parser.add_argument("-l", "--list", action="store_true", help="List tags for a repository.")\n tag_parser.add_argument("-d", "--delete", action="store_true", help="Delete a tag for a repository.")\n\n tag_parser.set_defaults(func=lambda args: handle_commands(args))\n\n\ndef handle_commands(args: Namespace):\n if args.list:\n return TagListCommand(args)\n elif args.delete:\n return TagDeleteCommand(args)\n else:\n return TagCreateCommand(args)\n\n\nclass TagCommand:\n def __init__(self, args: Namespace):\n self.args = args\n self.api = HfApi(token=self.args.token)\n self.repo_id = self.args.repo_id\n self.repo_type = self.args.repo_type\n if self.repo_type not in REPO_TYPES:\n print("Invalid repo --repo-type")\n exit(1)\n\n\nclass TagCreateCommand(TagCommand):\n def run(self):\n print(f"You are about to create tag {ANSI.bold(self.args.tag)} on {self.repo_type} {ANSI.bold(self.repo_id)}")\n\n try:\n self.api.create_tag(\n repo_id=self.repo_id,\n tag=self.args.tag,\n tag_message=self.args.message,\n revision=self.args.revision,\n repo_type=self.repo_type,\n )\n except RepositoryNotFoundError:\n print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.")\n exit(1)\n except RevisionNotFoundError:\n print(f"Revision {ANSI.bold(self.args.revision)} not found.")\n exit(1)\n except HfHubHTTPError as e:\n if e.response.status_code == 409:\n print(f"Tag {ANSI.bold(self.args.tag)} already exists on {ANSI.bold(self.repo_id)}")\n exit(1)\n raise e\n\n print(f"Tag {ANSI.bold(self.args.tag)} created on {ANSI.bold(self.repo_id)}")\n\n\nclass TagListCommand(TagCommand):\n def run(self):\n try:\n refs = self.api.list_repo_refs(\n repo_id=self.repo_id,\n repo_type=self.repo_type,\n )\n except RepositoryNotFoundError:\n print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.")\n exit(1)\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n if len(refs.tags) == 0:\n print("No tags found")\n exit(0)\n print(f"Tags for {self.repo_type} {ANSI.bold(self.repo_id)}:")\n for tag in refs.tags:\n print(tag.name)\n\n\nclass TagDeleteCommand(TagCommand):\n def run(self):\n print(f"You are about to delete tag {ANSI.bold(self.args.tag)} on {self.repo_type} {ANSI.bold(self.repo_id)}")\n\n if not self.args.yes:\n choice = input("Proceed? [Y/n] ").lower()\n if choice not in ("", "y", "yes"):\n print("Abort")\n exit()\n try:\n self.api.delete_tag(repo_id=self.repo_id, tag=self.args.tag, repo_type=self.repo_type)\n except RepositoryNotFoundError:\n print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.")\n exit(1)\n except RevisionNotFoundError:\n print(f"Tag {ANSI.bold(self.args.tag)} not found on {ANSI.bold(self.repo_id)}")\n exit(1)\n print(f"Tag {ANSI.bold(self.args.tag)} deleted on {ANSI.bold(self.repo_id)}")\n
|
.venv\Lib\site-packages\huggingface_hub\commands\tag.py
|
tag.py
|
Python
| 6,288 | 0.95 | 0.169811 | 0.103704 |
node-utils
| 676 |
2025-03-30T18:48:41.665317
|
MIT
| false |
bc2d55b4a2506ffcd97abf6fdf654f3e
|
# coding=utf-8\n# Copyright 2023-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains command to upload a repo or file with the CLI.\n\nUsage:\n # Upload file (implicit)\n huggingface-cli upload my-cool-model ./my-cool-model.safetensors\n\n # Upload file (explicit)\n huggingface-cli upload my-cool-model ./my-cool-model.safetensors model.safetensors\n\n # Upload directory (implicit). If `my-cool-model/` is a directory it will be uploaded, otherwise an exception is raised.\n huggingface-cli upload my-cool-model\n\n # Upload directory (explicit)\n huggingface-cli upload my-cool-model ./models/my-cool-model .\n\n # Upload filtered directory (example: tensorboard logs except for the last run)\n huggingface-cli upload my-cool-model ./model/training /logs --include "*.tfevents.*" --exclude "*20230905*"\n\n # Upload with wildcard\n huggingface-cli upload my-cool-model "./model/training/*.safetensors"\n\n # Upload private dataset\n huggingface-cli upload Wauplin/my-cool-dataset ./data . --repo-type=dataset --private\n\n # Upload with token\n huggingface-cli upload Wauplin/my-cool-model --token=hf_****\n\n # Sync local Space with Hub (upload new files, delete removed files)\n huggingface-cli upload Wauplin/space-example --repo-type=space --exclude="/logs/*" --delete="*" --commit-message="Sync local Space with Hub"\n\n # Schedule commits every 30 minutes\n huggingface-cli upload Wauplin/my-cool-model --every=30\n"""\n\nimport os\nimport time\nimport warnings\nfrom argparse import Namespace, _SubParsersAction\nfrom typing import List, Optional\n\nfrom huggingface_hub import logging\nfrom huggingface_hub._commit_scheduler import CommitScheduler\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.constants import HF_HUB_ENABLE_HF_TRANSFER\nfrom huggingface_hub.errors import RevisionNotFoundError\nfrom huggingface_hub.hf_api import HfApi\nfrom huggingface_hub.utils import disable_progress_bars, enable_progress_bars\nfrom huggingface_hub.utils._runtime import is_xet_available\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass UploadCommand(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n upload_parser = parser.add_parser("upload", help="Upload a file or a folder to a repo on the Hub")\n upload_parser.add_argument(\n "repo_id", type=str, help="The ID of the repo to upload to (e.g. `username/repo-name`)."\n )\n upload_parser.add_argument(\n "local_path",\n nargs="?",\n help="Local path to the file or folder to upload. Wildcard patterns are supported. Defaults to current directory.",\n )\n upload_parser.add_argument(\n "path_in_repo",\n nargs="?",\n help="Path of the file or folder in the repo. Defaults to the relative path of the file or folder.",\n )\n upload_parser.add_argument(\n "--repo-type",\n choices=["model", "dataset", "space"],\n default="model",\n help="Type of the repo to upload to (e.g. `dataset`).",\n )\n upload_parser.add_argument(\n "--revision",\n type=str,\n help=(\n "An optional Git revision to push to. It can be a branch name or a PR reference. If revision does not"\n " exist and `--create-pr` is not set, a branch will be automatically created."\n ),\n )\n upload_parser.add_argument(\n "--private",\n action="store_true",\n help=(\n "Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already"\n " exists."\n ),\n )\n upload_parser.add_argument("--include", nargs="*", type=str, help="Glob patterns to match files to upload.")\n upload_parser.add_argument(\n "--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to upload."\n )\n upload_parser.add_argument(\n "--delete",\n nargs="*",\n type=str,\n help="Glob patterns for file to be deleted from the repo while committing.",\n )\n upload_parser.add_argument(\n "--commit-message", type=str, help="The summary / title / first line of the generated commit."\n )\n upload_parser.add_argument("--commit-description", type=str, help="The description of the generated commit.")\n upload_parser.add_argument(\n "--create-pr", action="store_true", help="Whether to upload content as a new Pull Request."\n )\n upload_parser.add_argument(\n "--every",\n type=float,\n help="If set, a background job is scheduled to create commits every `every` minutes.",\n )\n upload_parser.add_argument(\n "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"\n )\n upload_parser.add_argument(\n "--quiet",\n action="store_true",\n help="If True, progress bars are disabled and only the path to the uploaded files is printed.",\n )\n upload_parser.set_defaults(func=UploadCommand)\n\n def __init__(self, args: Namespace) -> None:\n self.repo_id: str = args.repo_id\n self.repo_type: Optional[str] = args.repo_type\n self.revision: Optional[str] = args.revision\n self.private: bool = args.private\n\n self.include: Optional[List[str]] = args.include\n self.exclude: Optional[List[str]] = args.exclude\n self.delete: Optional[List[str]] = args.delete\n\n self.commit_message: Optional[str] = args.commit_message\n self.commit_description: Optional[str] = args.commit_description\n self.create_pr: bool = args.create_pr\n self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli")\n self.quiet: bool = args.quiet # disable warnings and progress bars\n\n # Check `--every` is valid\n if args.every is not None and args.every <= 0:\n raise ValueError(f"`every` must be a positive value (got '{args.every}')")\n self.every: Optional[float] = args.every\n\n # Resolve `local_path` and `path_in_repo`\n repo_name: str = args.repo_id.split("/")[-1] # e.g. "Wauplin/my-cool-model" => "my-cool-model"\n self.local_path: str\n self.path_in_repo: str\n\n if args.local_path is not None and any(c in args.local_path for c in ["*", "?", "["]):\n if args.include is not None:\n raise ValueError("Cannot set `--include` when passing a `local_path` containing a wildcard.")\n if args.path_in_repo is not None and args.path_in_repo != ".":\n raise ValueError("Cannot set `path_in_repo` when passing a `local_path` containing a wildcard.")\n self.local_path = "."\n self.include = args.local_path\n self.path_in_repo = "."\n elif args.local_path is None and os.path.isfile(repo_name):\n # Implicit case 1: user provided only a repo_id which happen to be a local file as well => upload it with same name\n self.local_path = repo_name\n self.path_in_repo = repo_name\n elif args.local_path is None and os.path.isdir(repo_name):\n # Implicit case 2: user provided only a repo_id which happen to be a local folder as well => upload it at root\n self.local_path = repo_name\n self.path_in_repo = "."\n elif args.local_path is None:\n # Implicit case 3: user provided only a repo_id that does not match a local file or folder\n # => the user must explicitly provide a local_path => raise exception\n raise ValueError(f"'{repo_name}' is not a local file or folder. Please set `local_path` explicitly.")\n elif args.path_in_repo is None and os.path.isfile(args.local_path):\n # Explicit local path to file, no path in repo => upload it at root with same name\n self.local_path = args.local_path\n self.path_in_repo = os.path.basename(args.local_path)\n elif args.path_in_repo is None:\n # Explicit local path to folder, no path in repo => upload at root\n self.local_path = args.local_path\n self.path_in_repo = "."\n else:\n # Finally, if both paths are explicit\n self.local_path = args.local_path\n self.path_in_repo = args.path_in_repo\n\n def run(self) -> None:\n if self.quiet:\n disable_progress_bars()\n with warnings.catch_warnings():\n warnings.simplefilter("ignore")\n print(self._upload())\n enable_progress_bars()\n else:\n logging.set_verbosity_info()\n print(self._upload())\n logging.set_verbosity_warning()\n\n def _upload(self) -> str:\n if os.path.isfile(self.local_path):\n if self.include is not None and len(self.include) > 0:\n warnings.warn("Ignoring `--include` since a single file is uploaded.")\n if self.exclude is not None and len(self.exclude) > 0:\n warnings.warn("Ignoring `--exclude` since a single file is uploaded.")\n if self.delete is not None and len(self.delete) > 0:\n warnings.warn("Ignoring `--delete` since a single file is uploaded.")\n\n if not is_xet_available() and not HF_HUB_ENABLE_HF_TRANSFER:\n logger.info(\n "Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See"\n " https://huggingface.co/docs/huggingface_hub/hf_transfer for more details."\n )\n\n # Schedule commits if `every` is set\n if self.every is not None:\n if os.path.isfile(self.local_path):\n # If file => watch entire folder + use allow_patterns\n folder_path = os.path.dirname(self.local_path)\n path_in_repo = (\n self.path_in_repo[: -len(self.local_path)] # remove filename from path_in_repo\n if self.path_in_repo.endswith(self.local_path)\n else self.path_in_repo\n )\n allow_patterns = [self.local_path]\n ignore_patterns = []\n else:\n folder_path = self.local_path\n path_in_repo = self.path_in_repo\n allow_patterns = self.include or []\n ignore_patterns = self.exclude or []\n if self.delete is not None and len(self.delete) > 0:\n warnings.warn("Ignoring `--delete` when uploading with scheduled commits.")\n\n scheduler = CommitScheduler(\n folder_path=folder_path,\n repo_id=self.repo_id,\n repo_type=self.repo_type,\n revision=self.revision,\n allow_patterns=allow_patterns,\n ignore_patterns=ignore_patterns,\n path_in_repo=path_in_repo,\n private=self.private,\n every=self.every,\n hf_api=self.api,\n )\n print(f"Scheduling commits every {self.every} minutes to {scheduler.repo_id}.")\n try: # Block main thread until KeyboardInterrupt\n while True:\n time.sleep(100)\n except KeyboardInterrupt:\n scheduler.stop()\n return "Stopped scheduled commits."\n\n # Otherwise, create repo and proceed with the upload\n if not os.path.isfile(self.local_path) and not os.path.isdir(self.local_path):\n raise FileNotFoundError(f"No such file or directory: '{self.local_path}'.")\n repo_id = self.api.create_repo(\n repo_id=self.repo_id,\n repo_type=self.repo_type,\n exist_ok=True,\n private=self.private,\n space_sdk="gradio" if self.repo_type == "space" else None,\n # ^ We don't want it to fail when uploading to a Space => let's set Gradio by default.\n # ^ I'd rather not add CLI args to set it explicitly as we already have `huggingface-cli repo create` for that.\n ).repo_id\n\n # Check if branch already exists and if not, create it\n if self.revision is not None and not self.create_pr:\n try:\n self.api.repo_info(repo_id=repo_id, repo_type=self.repo_type, revision=self.revision)\n except RevisionNotFoundError:\n logger.info(f"Branch '{self.revision}' not found. Creating it...")\n self.api.create_branch(repo_id=repo_id, repo_type=self.repo_type, branch=self.revision, exist_ok=True)\n # ^ `exist_ok=True` to avoid race concurrency issues\n\n # File-based upload\n if os.path.isfile(self.local_path):\n return self.api.upload_file(\n path_or_fileobj=self.local_path,\n path_in_repo=self.path_in_repo,\n repo_id=repo_id,\n repo_type=self.repo_type,\n revision=self.revision,\n commit_message=self.commit_message,\n commit_description=self.commit_description,\n create_pr=self.create_pr,\n )\n\n # Folder-based upload\n else:\n return self.api.upload_folder(\n folder_path=self.local_path,\n path_in_repo=self.path_in_repo,\n repo_id=repo_id,\n repo_type=self.repo_type,\n revision=self.revision,\n commit_message=self.commit_message,\n commit_description=self.commit_description,\n create_pr=self.create_pr,\n allow_patterns=self.include,\n ignore_patterns=self.exclude,\n delete_patterns=self.delete,\n )\n
|
.venv\Lib\site-packages\huggingface_hub\commands\upload.py
|
upload.py
|
Python
| 14,453 | 0.95 | 0.127389 | 0.14841 |
awesome-app
| 599 |
2024-03-31T23:22:23.440449
|
GPL-3.0
| false |
1c6778cfd8b42946091e7301813cadf8
|
# coding=utf-8\n# Copyright 2023-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains command to upload a large folder with the CLI."""\n\nimport os\nfrom argparse import Namespace, _SubParsersAction\nfrom typing import List, Optional\n\nfrom huggingface_hub import logging\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.hf_api import HfApi\nfrom huggingface_hub.utils import disable_progress_bars\n\nfrom ._cli_utils import ANSI\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass UploadLargeFolderCommand(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n subparser = parser.add_parser("upload-large-folder", help="Upload a large folder to a repo on the Hub")\n subparser.add_argument(\n "repo_id", type=str, help="The ID of the repo to upload to (e.g. `username/repo-name`)."\n )\n subparser.add_argument("local_path", type=str, help="Local path to the file or folder to upload.")\n subparser.add_argument(\n "--repo-type",\n choices=["model", "dataset", "space"],\n help="Type of the repo to upload to (e.g. `dataset`).",\n )\n subparser.add_argument(\n "--revision",\n type=str,\n help=("An optional Git revision to push to. It can be a branch name or a PR reference."),\n )\n subparser.add_argument(\n "--private",\n action="store_true",\n help=(\n "Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already exists."\n ),\n )\n subparser.add_argument("--include", nargs="*", type=str, help="Glob patterns to match files to upload.")\n subparser.add_argument("--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to upload.")\n subparser.add_argument(\n "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"\n )\n subparser.add_argument(\n "--num-workers", type=int, help="Number of workers to use to hash, upload and commit files."\n )\n subparser.add_argument("--no-report", action="store_true", help="Whether to disable regular status report.")\n subparser.add_argument("--no-bars", action="store_true", help="Whether to disable progress bars.")\n subparser.set_defaults(func=UploadLargeFolderCommand)\n\n def __init__(self, args: Namespace) -> None:\n self.repo_id: str = args.repo_id\n self.local_path: str = args.local_path\n self.repo_type: str = args.repo_type\n self.revision: Optional[str] = args.revision\n self.private: bool = args.private\n\n self.include: Optional[List[str]] = args.include\n self.exclude: Optional[List[str]] = args.exclude\n\n self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli")\n\n self.num_workers: Optional[int] = args.num_workers\n self.no_report: bool = args.no_report\n self.no_bars: bool = args.no_bars\n\n if not os.path.isdir(self.local_path):\n raise ValueError("Large upload is only supported for folders.")\n\n def run(self) -> None:\n logging.set_verbosity_info()\n\n print(\n ANSI.yellow(\n "You are about to upload a large folder to the Hub using `huggingface-cli upload-large-folder`. "\n "This is a new feature so feedback is very welcome!\n"\n "\n"\n "A few things to keep in mind:\n"\n " - Repository limits still apply: https://huggingface.co/docs/hub/repositories-recommendations\n"\n " - Do not start several processes in parallel.\n"\n " - You can interrupt and resume the process at any time. "\n "The script will pick up where it left off except for partially uploaded files that would have to be entirely reuploaded.\n"\n " - Do not upload the same folder to several repositories. If you need to do so, you must delete the `./.cache/huggingface/` folder first.\n"\n "\n"\n f"Some temporary metadata will be stored under `{self.local_path}/.cache/huggingface`.\n"\n " - You must not modify those files manually.\n"\n " - You must not delete the `./.cache/huggingface/` folder while a process is running.\n"\n " - You can delete the `./.cache/huggingface/` folder to reinitialize the upload state when process is not running. Files will have to be hashed and preuploaded again, except for already committed files.\n"\n "\n"\n "If the process output is too verbose, you can disable the progress bars with `--no-bars`. "\n "You can also entirely disable the status report with `--no-report`.\n"\n "\n"\n "For more details, run `huggingface-cli upload-large-folder --help` or check the documentation at "\n "https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-large-folder."\n )\n )\n\n if self.no_bars:\n disable_progress_bars()\n\n self.api.upload_large_folder(\n repo_id=self.repo_id,\n folder_path=self.local_path,\n repo_type=self.repo_type,\n revision=self.revision,\n private=self.private,\n allow_patterns=self.include,\n ignore_patterns=self.exclude,\n num_workers=self.num_workers,\n print_report=not self.no_report,\n )\n
|
.venv\Lib\site-packages\huggingface_hub\commands\upload_large_folder.py
|
upload_large_folder.py
|
Python
| 6,129 | 0.95 | 0.100775 | 0.123894 |
react-lib
| 648 |
2023-11-12T19:45:42.706334
|
GPL-3.0
| false |
a58380e36a891a04167744099f478849
|
# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains commands to authenticate to the Hugging Face Hub and interact with your repositories.\n\nUsage:\n # login and save token locally.\n huggingface-cli login --token=hf_*** --add-to-git-credential\n\n # switch between tokens\n huggingface-cli auth switch\n\n # list all tokens\n huggingface-cli auth list\n\n # logout from a specific token, if no token-name is provided, all tokens will be deleted from your machine.\n huggingface-cli logout --token-name=your_token_name\n\n # find out which huggingface.co account you are logged in as\n huggingface-cli whoami\n"""\n\nfrom argparse import _SubParsersAction\nfrom typing import List, Optional\n\nfrom requests.exceptions import HTTPError\n\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.constants import ENDPOINT\nfrom huggingface_hub.hf_api import HfApi\n\nfrom .._login import auth_list, auth_switch, login, logout\nfrom ..utils import get_stored_tokens, get_token, logging\nfrom ._cli_utils import ANSI\n\n\nlogger = logging.get_logger(__name__)\n\ntry:\n from InquirerPy import inquirer\n from InquirerPy.base.control import Choice\n\n _inquirer_py_available = True\nexcept ImportError:\n _inquirer_py_available = False\n\n\nclass UserCommands(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n login_parser = parser.add_parser("login", help="Log in using a token from huggingface.co/settings/tokens")\n login_parser.add_argument(\n "--token",\n type=str,\n help="Token generated from https://huggingface.co/settings/tokens",\n )\n login_parser.add_argument(\n "--add-to-git-credential",\n action="store_true",\n help="Optional: Save token to git credential helper.",\n )\n login_parser.set_defaults(func=lambda args: LoginCommand(args))\n whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")\n whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))\n\n logout_parser = parser.add_parser("logout", help="Log out")\n logout_parser.add_argument(\n "--token-name",\n type=str,\n help="Optional: Name of the access token to log out from.",\n )\n logout_parser.set_defaults(func=lambda args: LogoutCommand(args))\n\n auth_parser = parser.add_parser("auth", help="Other authentication related commands")\n auth_subparsers = auth_parser.add_subparsers(help="Authentication subcommands")\n auth_switch_parser = auth_subparsers.add_parser("switch", help="Switch between access tokens")\n auth_switch_parser.add_argument(\n "--token-name",\n type=str,\n help="Optional: Name of the access token to switch to.",\n )\n auth_switch_parser.add_argument(\n "--add-to-git-credential",\n action="store_true",\n help="Optional: Save token to git credential helper.",\n )\n auth_switch_parser.set_defaults(func=lambda args: AuthSwitchCommand(args))\n auth_list_parser = auth_subparsers.add_parser("list", help="List all stored access tokens")\n auth_list_parser.set_defaults(func=lambda args: AuthListCommand(args))\n\n\nclass BaseUserCommand:\n def __init__(self, args):\n self.args = args\n self._api = HfApi()\n\n\nclass LoginCommand(BaseUserCommand):\n def run(self):\n logging.set_verbosity_info()\n login(\n token=self.args.token,\n add_to_git_credential=self.args.add_to_git_credential,\n )\n\n\nclass LogoutCommand(BaseUserCommand):\n def run(self):\n logging.set_verbosity_info()\n logout(token_name=self.args.token_name)\n\n\nclass AuthSwitchCommand(BaseUserCommand):\n def run(self):\n logging.set_verbosity_info()\n token_name = self.args.token_name\n if token_name is None:\n token_name = self._select_token_name()\n\n if token_name is None:\n print("No token name provided. Aborting.")\n exit()\n auth_switch(token_name, add_to_git_credential=self.args.add_to_git_credential)\n\n def _select_token_name(self) -> Optional[str]:\n token_names = list(get_stored_tokens().keys())\n\n if not token_names:\n logger.error("No stored tokens found. Please login first.")\n return None\n\n if _inquirer_py_available:\n return self._select_token_name_tui(token_names)\n # if inquirer is not available, use a simpler terminal UI\n print("Available stored tokens:")\n for i, token_name in enumerate(token_names, 1):\n print(f"{i}. {token_name}")\n while True:\n try:\n choice = input("Enter the number of the token to switch to (or 'q' to quit): ")\n if choice.lower() == "q":\n return None\n index = int(choice) - 1\n if 0 <= index < len(token_names):\n return token_names[index]\n else:\n print("Invalid selection. Please try again.")\n except ValueError:\n print("Invalid input. Please enter a number or 'q' to quit.")\n\n def _select_token_name_tui(self, token_names: List[str]) -> Optional[str]:\n choices = [Choice(token_name, name=token_name) for token_name in token_names]\n try:\n return inquirer.select(\n message="Select a token to switch to:",\n choices=choices,\n default=None,\n ).execute()\n except KeyboardInterrupt:\n logger.info("Token selection cancelled.")\n return None\n\n\nclass AuthListCommand(BaseUserCommand):\n def run(self):\n logging.set_verbosity_info()\n auth_list()\n\n\nclass WhoamiCommand(BaseUserCommand):\n def run(self):\n token = get_token()\n if token is None:\n print("Not logged in")\n exit()\n try:\n info = self._api.whoami(token)\n print(info["name"])\n orgs = [org["name"] for org in info["orgs"]]\n if orgs:\n print(ANSI.bold("orgs: "), ",".join(orgs))\n\n if ENDPOINT != "https://huggingface.co":\n print(f"Authenticated through private endpoint: {ENDPOINT}")\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n
|
.venv\Lib\site-packages\huggingface_hub\commands\user.py
|
user.py
|
Python
| 7,096 | 0.95 | 0.217172 | 0.116564 |
react-lib
| 552 |
2025-04-05T06:33:15.114414
|
BSD-3-Clause
| false |
1adf4e56b310a19dd65fb54951b0add3
|
# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains command to print information about the version.\n\nUsage:\n huggingface-cli version\n"""\n\nfrom argparse import _SubParsersAction\n\nfrom huggingface_hub import __version__\n\nfrom . import BaseHuggingfaceCLICommand\n\n\nclass VersionCommand(BaseHuggingfaceCLICommand):\n def __init__(self, args):\n self.args = args\n\n @staticmethod\n def register_subcommand(parser: _SubParsersAction):\n version_parser = parser.add_parser("version", help="Print information about the huggingface-cli version.")\n version_parser.set_defaults(func=VersionCommand)\n\n def run(self) -> None:\n print(f"huggingface_hub version: {__version__}")\n
|
.venv\Lib\site-packages\huggingface_hub\commands\version.py
|
version.py
|
Python
| 1,266 | 0.95 | 0.135135 | 0.448276 |
react-lib
| 504 |
2025-04-05T23:14:30.171937
|
MIT
| false |
b57e0c7939986770f952cbcf80f49457
|
# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains a utility for good-looking prints."""\n\nimport os\nfrom typing import List, Union\n\n\nclass ANSI:\n """\n Helper for en.wikipedia.org/wiki/ANSI_escape_code\n """\n\n _bold = "\u001b[1m"\n _gray = "\u001b[90m"\n _red = "\u001b[31m"\n _reset = "\u001b[0m"\n _yellow = "\u001b[33m"\n\n @classmethod\n def bold(cls, s: str) -> str:\n return cls._format(s, cls._bold)\n\n @classmethod\n def gray(cls, s: str) -> str:\n return cls._format(s, cls._gray)\n\n @classmethod\n def red(cls, s: str) -> str:\n return cls._format(s, cls._bold + cls._red)\n\n @classmethod\n def yellow(cls, s: str) -> str:\n return cls._format(s, cls._yellow)\n\n @classmethod\n def _format(cls, s: str, code: str) -> str:\n if os.environ.get("NO_COLOR"):\n # See https://no-color.org/\n return s\n return f"{code}{s}{cls._reset}"\n\n\ndef tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:\n """\n Inspired by:\n\n - stackoverflow.com/a/8356620/593036\n - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data\n """\n col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]\n row_format = ("{{:{}}} " * len(headers)).format(*col_widths)\n lines = []\n lines.append(row_format.format(*headers))\n lines.append(row_format.format(*["-" * w for w in col_widths]))\n for row in rows:\n lines.append(row_format.format(*row))\n return "\n".join(lines)\n
|
.venv\Lib\site-packages\huggingface_hub\commands\_cli_utils.py
|
_cli_utils.py
|
Python
| 2,095 | 0.95 | 0.217391 | 0.245614 |
react-lib
| 168 |
2023-11-19T23:06:57.275478
|
Apache-2.0
| false |
cc880734d667623c96034ed6f7f5da7b
|
# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom argparse import _SubParsersAction\n\n\nclass BaseHuggingfaceCLICommand(ABC):\n @staticmethod\n @abstractmethod\n def register_subcommand(parser: _SubParsersAction):\n raise NotImplementedError()\n\n @abstractmethod\n def run(self):\n raise NotImplementedError()\n
|
.venv\Lib\site-packages\huggingface_hub\commands\__init__.py
|
__init__.py
|
Python
| 928 | 0.95 | 0.148148 | 0.565217 |
awesome-app
| 266 |
2024-02-08T06:16:41.784220
|
GPL-3.0
| false |
7079d395b7ff910df67aa9d2626ef500
|
\n\n
|
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\delete_cache.cpython-313.pyc
|
delete_cache.cpython-313.pyc
|
Other
| 19,537 | 0.95 | 0.053061 | 0.143498 |
react-lib
| 511 |
2024-03-25T00:03:58.308313
|
BSD-3-Clause
| false |
5ffa6ff7ded2e0e542d710f7a3e48679
|
\n\n
|
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\download.cpython-313.pyc
|
download.cpython-313.pyc
|
Other
| 8,545 | 0.8 | 0.030928 | 0.074074 |
awesome-app
| 887 |
2024-04-30T20:25:06.412452
|
Apache-2.0
| false |
15aff08245ab00acddf39078c9875dc8
|
\n\n
|
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\env.cpython-313.pyc
|
env.cpython-313.pyc
|
Other
| 1,463 | 0.8 | 0 | 0 |
awesome-app
| 656 |
2023-10-07T15:21:38.210608
|
MIT
| false |
e55254e5a0cdbde310859bb59c3f5a18
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.