content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
from typing import Any, Dict, Optional\n\nfrom huggingface_hub.hf_api import InferenceProviderMapping\n\nfrom ._common import BaseConversationalTask\n\n\nclass FireworksAIConversationalTask(BaseConversationalTask):\n def __init__(self):\n super().__init__(provider="fireworks-ai", base_url="https://api.fireworks.ai")\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n return "/inference/v1/chat/completions"\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)\n response_format = parameters.get("response_format")\n if isinstance(response_format, dict) and response_format.get("type") == "json_schema":\n json_schema_details = response_format.get("json_schema")\n if isinstance(json_schema_details, dict) and "schema" in json_schema_details:\n payload["response_format"] = { # type: ignore [index]\n "type": "json_object",\n "schema": json_schema_details["schema"],\n }\n return payload\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\fireworks_ai.py
fireworks_ai.py
Python
1,215
0.95
0.222222
0
react-lib
669
2024-11-29T18:21:44.349023
MIT
false
c8c15a149910cf54d1eee1ff636ce79a
from ._common import BaseConversationalTask\n\n\nclass GroqConversationalTask(BaseConversationalTask):\n def __init__(self):\n super().__init__(provider="groq", base_url="https://api.groq.com")\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n return "/openai/v1/chat/completions"\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\groq.py
groq.py
Python
315
0.95
0.333333
0
awesome-app
917
2023-11-21T18:15:37.658453
Apache-2.0
false
73d4582466ae392e8d3f6073cb7a2c47
import json\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Union\n\nfrom huggingface_hub import constants\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._common import RequestParameters, _b64_encode, _bytes_to_dict, _open_as_binary\nfrom huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none\nfrom huggingface_hub.utils import build_hf_headers, get_session, get_token, hf_raise_for_status\n\n\nclass HFInferenceTask(TaskProviderHelper):\n """Base class for HF Inference API tasks."""\n\n def __init__(self, task: str):\n super().__init__(\n provider="hf-inference",\n base_url=constants.INFERENCE_PROXY_TEMPLATE.format(provider="hf-inference"),\n task=task,\n )\n\n def _prepare_api_key(self, api_key: Optional[str]) -> str:\n # special case: for HF Inference we allow not providing an API key\n return api_key or get_token() # type: ignore[return-value]\n\n def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping:\n if model is not None and model.startswith(("http://", "https://")):\n return InferenceProviderMapping(\n provider="hf-inference", providerId=model, hf_model_id=model, task=self.task, status="live"\n )\n model_id = model if model is not None else _fetch_recommended_models().get(self.task)\n if model_id is None:\n raise ValueError(\n f"Task {self.task} has no recommended model for HF Inference. Please specify a model"\n " explicitly. Visit https://huggingface.co/tasks for more info."\n )\n _check_supported_task(model_id, self.task)\n return InferenceProviderMapping(\n provider="hf-inference", providerId=model_id, hf_model_id=model_id, task=self.task, status="live"\n )\n\n def _prepare_url(self, api_key: str, mapped_model: str) -> str:\n # hf-inference provider can handle URLs (e.g. Inference Endpoints or TGI deployment)\n if mapped_model.startswith(("http://", "https://")):\n return mapped_model\n return (\n # Feature-extraction and sentence-similarity are the only cases where we handle models with several tasks.\n f"{self.base_url}/models/{mapped_model}/pipeline/{self.task}"\n if self.task in ("feature-extraction", "sentence-similarity")\n # Otherwise, we use the default endpoint\n else f"{self.base_url}/models/{mapped_model}"\n )\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n if isinstance(inputs, bytes):\n raise ValueError(f"Unexpected binary input for task {self.task}.")\n if isinstance(inputs, Path):\n raise ValueError(f"Unexpected path input for task {self.task} (got {inputs})")\n return {"inputs": inputs, "parameters": filter_none(parameters)}\n\n\nclass HFInferenceBinaryInputTask(HFInferenceTask):\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n return None\n\n def _prepare_payload_as_bytes(\n self,\n inputs: Any,\n parameters: Dict,\n provider_mapping_info: InferenceProviderMapping,\n extra_payload: Optional[Dict],\n ) -> Optional[bytes]:\n parameters = filter_none(parameters)\n extra_payload = extra_payload or {}\n has_parameters = len(parameters) > 0 or len(extra_payload) > 0\n\n # Raise if not a binary object or a local path or a URL.\n if not isinstance(inputs, (bytes, Path)) and not isinstance(inputs, str):\n raise ValueError(f"Expected binary inputs or a local path or a URL. Got {inputs}")\n\n # Send inputs as raw content when no parameters are provided\n if not has_parameters:\n with _open_as_binary(inputs) as data:\n data_as_bytes = data if isinstance(data, bytes) else data.read()\n return data_as_bytes\n\n # Otherwise encode as b64\n return json.dumps({"inputs": _b64_encode(inputs), "parameters": parameters, **extra_payload}).encode("utf-8")\n\n\nclass HFInferenceConversational(HFInferenceTask):\n def __init__(self):\n super().__init__("conversational")\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n payload = filter_none(parameters)\n mapped_model = provider_mapping_info.provider_id\n payload_model = parameters.get("model") or mapped_model\n\n if payload_model is None or payload_model.startswith(("http://", "https://")):\n payload_model = "dummy"\n\n response_format = parameters.get("response_format")\n if isinstance(response_format, dict) and response_format.get("type") == "json_schema":\n payload["response_format"] = {\n "type": "json_object",\n "value": response_format["json_schema"]["schema"],\n }\n return {**payload, "model": payload_model, "messages": inputs}\n\n def _prepare_url(self, api_key: str, mapped_model: str) -> str:\n base_url = (\n mapped_model\n if mapped_model.startswith(("http://", "https://"))\n else f"{constants.INFERENCE_PROXY_TEMPLATE.format(provider='hf-inference')}/models/{mapped_model}"\n )\n return _build_chat_completion_url(base_url)\n\n\ndef _build_chat_completion_url(model_url: str) -> str:\n # Strip trailing /\n model_url = model_url.rstrip("/")\n\n # Append /chat/completions if not already present\n if model_url.endswith("/v1"):\n model_url += "/chat/completions"\n\n # Append /v1/chat/completions if not already present\n if not model_url.endswith("/chat/completions"):\n model_url += "/v1/chat/completions"\n\n return model_url\n\n\n@lru_cache(maxsize=1)\ndef _fetch_recommended_models() -> Dict[str, Optional[str]]:\n response = get_session().get(f"{constants.ENDPOINT}/api/tasks", headers=build_hf_headers())\n hf_raise_for_status(response)\n return {task: next(iter(details["widgetModels"]), None) for task, details in response.json().items()}\n\n\n@lru_cache(maxsize=None)\ndef _check_supported_task(model: str, task: str) -> None:\n from huggingface_hub.hf_api import HfApi\n\n model_info = HfApi().model_info(model)\n pipeline_tag = model_info.pipeline_tag\n tags = model_info.tags or []\n is_conversational = "conversational" in tags\n if task in ("text-generation", "conversational"):\n if pipeline_tag == "text-generation":\n # text-generation + conversational tag -> both tasks allowed\n if is_conversational:\n return\n # text-generation without conversational tag -> only text-generation allowed\n if task == "text-generation":\n return\n raise ValueError(f"Model '{model}' doesn't support task '{task}'.")\n\n if pipeline_tag == "text2text-generation":\n if task == "text-generation":\n return\n raise ValueError(f"Model '{model}' doesn't support task '{task}'.")\n\n if pipeline_tag == "image-text-to-text":\n if is_conversational and task == "conversational":\n return # Only conversational allowed if tagged as conversational\n raise ValueError("Non-conversational image-text-to-text task is not supported.")\n\n if (\n task in ("feature-extraction", "sentence-similarity")\n and pipeline_tag in ("feature-extraction", "sentence-similarity")\n and task in tags\n ):\n # feature-extraction and sentence-similarity are interchangeable for HF Inference\n return\n\n # For all other tasks, just check pipeline tag\n if pipeline_tag != task:\n raise ValueError(\n f"Model '{model}' doesn't support task '{task}'. Supported tasks: '{pipeline_tag}', got: '{task}'"\n )\n return\n\n\nclass HFInferenceFeatureExtractionTask(HFInferenceTask):\n def __init__(self):\n super().__init__("feature-extraction")\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n if isinstance(inputs, bytes):\n raise ValueError(f"Unexpected binary input for task {self.task}.")\n if isinstance(inputs, Path):\n raise ValueError(f"Unexpected path input for task {self.task} (got {inputs})")\n\n # Parameters are sent at root-level for feature-extraction task\n # See specs: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/tasks/feature-extraction/spec/input.json\n return {"inputs": inputs, **filter_none(parameters)}\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n if isinstance(response, bytes):\n return _bytes_to_dict(response)\n return response\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\hf_inference.py
hf_inference.py
Python
9,133
0.95
0.301887
0.092486
awesome-app
87
2025-06-07T16:02:27.884712
BSD-3-Clause
false
cb9fb828583cb20424cfa1a9c2386725
import base64\nfrom typing import Any, Dict, Optional, Union\n\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._common import RequestParameters, _as_dict\nfrom huggingface_hub.inference._providers._common import BaseConversationalTask, TaskProviderHelper, filter_none\n\n\nclass HyperbolicTextToImageTask(TaskProviderHelper):\n def __init__(self):\n super().__init__(provider="hyperbolic", base_url="https://api.hyperbolic.xyz", task="text-to-image")\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n return "/v1/images/generations"\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n mapped_model = provider_mapping_info.provider_id\n parameters = filter_none(parameters)\n if "num_inference_steps" in parameters:\n parameters["steps"] = parameters.pop("num_inference_steps")\n if "guidance_scale" in parameters:\n parameters["cfg_scale"] = parameters.pop("guidance_scale")\n # For Hyperbolic, the width and height are required parameters\n if "width" not in parameters:\n parameters["width"] = 512\n if "height" not in parameters:\n parameters["height"] = 512\n return {"prompt": inputs, "model_name": mapped_model, **parameters}\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n response_dict = _as_dict(response)\n return base64.b64decode(response_dict["images"][0]["image"])\n\n\nclass HyperbolicTextGenerationTask(BaseConversationalTask):\n """\n Special case for Hyperbolic, where text-generation task is handled as a conversational task.\n """\n\n def __init__(self, task: str):\n super().__init__(\n provider="hyperbolic",\n base_url="https://api.hyperbolic.xyz",\n )\n self.task = task\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\hyperbolic.py
hyperbolic.py
Python
1,985
0.95
0.255319
0.026316
python-kit
904
2025-02-13T18:26:54.480442
GPL-3.0
false
351304a60cfacdad80b25184a322826d
from typing import Any, Dict, Optional, Union\n\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._common import RequestParameters, _as_dict\nfrom huggingface_hub.inference._providers._common import (\n BaseConversationalTask,\n BaseTextGenerationTask,\n TaskProviderHelper,\n filter_none,\n)\nfrom huggingface_hub.utils import get_session\n\n\n_PROVIDER = "novita"\n_BASE_URL = "https://api.novita.ai"\n\n\nclass NovitaTextGenerationTask(BaseTextGenerationTask):\n def __init__(self):\n super().__init__(provider=_PROVIDER, base_url=_BASE_URL)\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n # there is no v1/ route for novita\n return "/v3/openai/completions"\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n output = _as_dict(response)["choices"][0]\n return {\n "generated_text": output["text"],\n "details": {\n "finish_reason": output.get("finish_reason"),\n "seed": output.get("seed"),\n },\n }\n\n\nclass NovitaConversationalTask(BaseConversationalTask):\n def __init__(self):\n super().__init__(provider=_PROVIDER, base_url=_BASE_URL)\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n # there is no v1/ route for novita\n return "/v3/openai/chat/completions"\n\n\nclass NovitaTextToVideoTask(TaskProviderHelper):\n def __init__(self):\n super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task="text-to-video")\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n return f"/v3/hf/{mapped_model}"\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n return {"prompt": inputs, **filter_none(parameters)}\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n response_dict = _as_dict(response)\n if not (\n isinstance(response_dict, dict)\n and "video" in response_dict\n and isinstance(response_dict["video"], dict)\n and "video_url" in response_dict["video"]\n ):\n raise ValueError("Expected response format: { 'video': { 'video_url': string } }")\n\n video_url = response_dict["video"]["video_url"]\n return get_session().get(video_url).content\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\novita.py
novita.py
Python
2,514
0.95
0.217391
0.037736
node-utils
354
2024-05-09T22:58:04.936185
MIT
false
62e7cd1cea7f731fd952da9498975002
import base64\nfrom typing import Any, Dict, Optional, Union\n\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._common import RequestParameters, _as_dict\n\nfrom ._common import BaseConversationalTask, TaskProviderHelper, filter_none\n\n\nclass NscaleConversationalTask(BaseConversationalTask):\n def __init__(self):\n super().__init__(provider="nscale", base_url="https://inference.api.nscale.com")\n\n\nclass NscaleTextToImageTask(TaskProviderHelper):\n def __init__(self):\n super().__init__(provider="nscale", base_url="https://inference.api.nscale.com", task="text-to-image")\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n return "/v1/images/generations"\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n mapped_model = provider_mapping_info.provider_id\n # Combine all parameters except inputs and parameters\n parameters = filter_none(parameters)\n if "width" in parameters and "height" in parameters:\n parameters["size"] = f"{parameters.pop('width')}x{parameters.pop('height')}"\n if "num_inference_steps" in parameters:\n parameters.pop("num_inference_steps")\n if "cfg_scale" in parameters:\n parameters.pop("cfg_scale")\n payload = {\n "response_format": "b64_json",\n "prompt": inputs,\n "model": mapped_model,\n **parameters,\n }\n return payload\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n response_dict = _as_dict(response)\n return base64.b64decode(response_dict["data"][0]["b64_json"])\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\nscale.py
nscale.py
Python
1,802
0.95
0.227273
0.057143
vue-tools
949
2023-10-21T21:46:56.212947
BSD-3-Clause
false
28bf161171d0c2f58fe42d62e2c3f808
from typing import Optional\n\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._providers._common import BaseConversationalTask\n\n\nclass OpenAIConversationalTask(BaseConversationalTask):\n def __init__(self):\n super().__init__(provider="openai", base_url="https://api.openai.com")\n\n def _prepare_api_key(self, api_key: Optional[str]) -> str:\n if api_key is None:\n raise ValueError("You must provide an api_key to work with OpenAI API.")\n if api_key.startswith("hf_"):\n raise ValueError(\n "OpenAI provider is not available through Hugging Face routing, please use your own OpenAI API key."\n )\n return api_key\n\n def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping:\n if model is None:\n raise ValueError("Please provide an OpenAI model ID, e.g. `gpt-4o` or `o1`.")\n return InferenceProviderMapping(\n provider="openai", providerId=model, task="conversational", status="live", hf_model_id=model\n )\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\openai.py
openai.py
Python
1,089
0.95
0.28
0
awesome-app
622
2024-11-23T19:50:26.104514
GPL-3.0
false
47dca962f8cb3b1a39ca06b8bfd7d7e7
from typing import Any, Dict, Optional, Union\n\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._common import RequestParameters, _as_dict\nfrom huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none\nfrom huggingface_hub.utils import get_session\n\n\n_PROVIDER = "replicate"\n_BASE_URL = "https://api.replicate.com"\n\n\nclass ReplicateTask(TaskProviderHelper):\n def __init__(self, task: str):\n super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task)\n\n def _prepare_headers(self, headers: Dict, api_key: str) -> Dict:\n headers = super()._prepare_headers(headers, api_key)\n headers["Prefer"] = "wait"\n return headers\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n if ":" in mapped_model:\n return "/v1/predictions"\n return f"/v1/models/{mapped_model}/predictions"\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n mapped_model = provider_mapping_info.provider_id\n payload: Dict[str, Any] = {"input": {"prompt": inputs, **filter_none(parameters)}}\n if ":" in mapped_model:\n version = mapped_model.split(":", 1)[1]\n payload["version"] = version\n return payload\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n response_dict = _as_dict(response)\n if response_dict.get("output") is None:\n raise TimeoutError(\n f"Inference request timed out after 60 seconds. No output generated for model {response_dict.get('model')}"\n "The model might be in cold state or starting up. Please try again later."\n )\n output_url = (\n response_dict["output"] if isinstance(response_dict["output"], str) else response_dict["output"][0]\n )\n return get_session().get(output_url).content\n\n\nclass ReplicateTextToImageTask(ReplicateTask):\n def __init__(self):\n super().__init__("text-to-image")\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n payload: Dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]\n if provider_mapping_info.adapter_weights_path is not None:\n payload["input"]["lora_weights"] = f"https://huggingface.co/{provider_mapping_info.hf_model_id}"\n return payload\n\n\nclass ReplicateTextToSpeechTask(ReplicateTask):\n def __init__(self):\n super().__init__("text-to-speech")\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n payload: Dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]\n payload["input"]["text"] = payload["input"].pop("prompt") # rename "prompt" to "text" for TTS\n return payload\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\replicate.py
replicate.py
Python
3,157
0.95
0.277778
0
python-kit
451
2023-11-15T23:27:42.500687
Apache-2.0
false
2af9c48b61f3860d97fe860974fe0a25
import base64\nfrom abc import ABC\nfrom typing import Any, Dict, Optional, Union\n\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._common import RequestParameters, _as_dict\nfrom huggingface_hub.inference._providers._common import (\n BaseConversationalTask,\n BaseTextGenerationTask,\n TaskProviderHelper,\n filter_none,\n)\n\n\n_PROVIDER = "together"\n_BASE_URL = "https://api.together.xyz"\n\n\nclass TogetherTask(TaskProviderHelper, ABC):\n """Base class for Together API tasks."""\n\n def __init__(self, task: str):\n super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task)\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n if self.task == "text-to-image":\n return "/v1/images/generations"\n elif self.task == "conversational":\n return "/v1/chat/completions"\n elif self.task == "text-generation":\n return "/v1/completions"\n raise ValueError(f"Unsupported task '{self.task}' for Together API.")\n\n\nclass TogetherTextGenerationTask(BaseTextGenerationTask):\n def __init__(self):\n super().__init__(provider=_PROVIDER, base_url=_BASE_URL)\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n output = _as_dict(response)["choices"][0]\n return {\n "generated_text": output["text"],\n "details": {\n "finish_reason": output.get("finish_reason"),\n "seed": output.get("seed"),\n },\n }\n\n\nclass TogetherConversationalTask(BaseConversationalTask):\n def __init__(self):\n super().__init__(provider=_PROVIDER, base_url=_BASE_URL)\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)\n response_format = parameters.get("response_format")\n if isinstance(response_format, dict) and response_format.get("type") == "json_schema":\n json_schema_details = response_format.get("json_schema")\n if isinstance(json_schema_details, dict) and "schema" in json_schema_details:\n payload["response_format"] = { # type: ignore [index]\n "type": "json_object",\n "schema": json_schema_details["schema"],\n }\n\n return payload\n\n\nclass TogetherTextToImageTask(TogetherTask):\n def __init__(self):\n super().__init__("text-to-image")\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n mapped_model = provider_mapping_info.provider_id\n parameters = filter_none(parameters)\n if "num_inference_steps" in parameters:\n parameters["steps"] = parameters.pop("num_inference_steps")\n if "guidance_scale" in parameters:\n parameters["guidance"] = parameters.pop("guidance_scale")\n\n return {"prompt": inputs, "response_format": "base64", **parameters, "model": mapped_model}\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n response_dict = _as_dict(response)\n return base64.b64decode(response_dict["data"][0]["b64_json"])\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\together.py
together.py
Python
3,439
0.95
0.238636
0
vue-tools
585
2023-08-23T12:39:18.525136
MIT
false
b88ca87747220a217be481adbb93974f
from functools import lru_cache\nfrom typing import Any, Dict, List, Optional, Union, overload\n\nfrom huggingface_hub import constants\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._common import RequestParameters\nfrom huggingface_hub.inference._generated.types.chat_completion import ChatCompletionInputMessage\nfrom huggingface_hub.utils import build_hf_headers, get_token, logging\n\n\nlogger = logging.get_logger(__name__)\n\n\n# Dev purposes only.\n# If you want to try to run inference for a new model locally before it's registered on huggingface.co\n# for a given Inference Provider, you can add it to the following dictionary.\nHARDCODED_MODEL_INFERENCE_MAPPING: Dict[str, Dict[str, InferenceProviderMapping]] = {\n # "HF model ID" => InferenceProviderMapping object initialized with "Model ID on Inference Provider's side"\n #\n # Example:\n # "Qwen/Qwen2.5-Coder-32B-Instruct": InferenceProviderMapping(hf_model_id="Qwen/Qwen2.5-Coder-32B-Instruct",\n # provider_id="Qwen2.5-Coder-32B-Instruct",\n # task="conversational",\n # status="live")\n "cerebras": {},\n "cohere": {},\n "fal-ai": {},\n "fireworks-ai": {},\n "groq": {},\n "hf-inference": {},\n "hyperbolic": {},\n "nebius": {},\n "nscale": {},\n "replicate": {},\n "sambanova": {},\n "together": {},\n}\n\n\n@overload\ndef filter_none(obj: Dict[str, Any]) -> Dict[str, Any]: ...\n@overload\ndef filter_none(obj: List[Any]) -> List[Any]: ...\n\n\ndef filter_none(obj: Union[Dict[str, Any], List[Any]]) -> Union[Dict[str, Any], List[Any]]:\n if isinstance(obj, dict):\n cleaned: Dict[str, Any] = {}\n for k, v in obj.items():\n if v is None:\n continue\n if isinstance(v, (dict, list)):\n v = filter_none(v)\n # remove empty nested dicts\n if isinstance(v, dict) and not v:\n continue\n cleaned[k] = v\n return cleaned\n\n if isinstance(obj, list):\n return [filter_none(v) if isinstance(v, (dict, list)) else v for v in obj]\n\n raise ValueError(f"Expected dict or list, got {type(obj)}")\n\n\nclass TaskProviderHelper:\n """Base class for task-specific provider helpers."""\n\n def __init__(self, provider: str, base_url: str, task: str) -> None:\n self.provider = provider\n self.task = task\n self.base_url = base_url\n\n def prepare_request(\n self,\n *,\n inputs: Any,\n parameters: Dict[str, Any],\n headers: Dict,\n model: Optional[str],\n api_key: Optional[str],\n extra_payload: Optional[Dict[str, Any]] = None,\n ) -> RequestParameters:\n """\n Prepare the request to be sent to the provider.\n\n Each step (api_key, model, headers, url, payload) can be customized in subclasses.\n """\n # api_key from user, or local token, or raise error\n api_key = self._prepare_api_key(api_key)\n\n # mapped model from HF model ID\n provider_mapping_info = self._prepare_mapping_info(model)\n\n # default HF headers + user headers (to customize in subclasses)\n headers = self._prepare_headers(headers, api_key)\n\n # routed URL if HF token, or direct URL (to customize in '_prepare_route' in subclasses)\n url = self._prepare_url(api_key, provider_mapping_info.provider_id)\n\n # prepare payload (to customize in subclasses)\n payload = self._prepare_payload_as_dict(inputs, parameters, provider_mapping_info=provider_mapping_info)\n if payload is not None:\n payload = recursive_merge(payload, extra_payload or {})\n\n # body data (to customize in subclasses)\n data = self._prepare_payload_as_bytes(inputs, parameters, provider_mapping_info, extra_payload)\n\n # check if both payload and data are set and return\n if payload is not None and data is not None:\n raise ValueError("Both payload and data cannot be set in the same request.")\n if payload is None and data is None:\n raise ValueError("Either payload or data must be set in the request.")\n return RequestParameters(\n url=url, task=self.task, model=provider_mapping_info.provider_id, json=payload, data=data, headers=headers\n )\n\n def get_response(\n self,\n response: Union[bytes, Dict],\n request_params: Optional[RequestParameters] = None,\n ) -> Any:\n """\n Return the response in the expected format.\n\n Override this method in subclasses for customized response handling."""\n return response\n\n def _prepare_api_key(self, api_key: Optional[str]) -> str:\n """Return the API key to use for the request.\n\n Usually not overwritten in subclasses."""\n if api_key is None:\n api_key = get_token()\n if api_key is None:\n raise ValueError(\n f"You must provide an api_key to work with {self.provider} API or log in with `huggingface-cli login`."\n )\n return api_key\n\n def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping:\n """Return the mapped model ID to use for the request.\n\n Usually not overwritten in subclasses."""\n if model is None:\n raise ValueError(f"Please provide an HF model ID supported by {self.provider}.")\n\n # hardcoded mapping for local testing\n if HARDCODED_MODEL_INFERENCE_MAPPING.get(self.provider, {}).get(model):\n return HARDCODED_MODEL_INFERENCE_MAPPING[self.provider][model]\n\n provider_mapping = None\n for mapping in _fetch_inference_provider_mapping(model):\n if mapping.provider == self.provider:\n provider_mapping = mapping\n break\n\n if provider_mapping is None:\n raise ValueError(f"Model {model} is not supported by provider {self.provider}.")\n\n if provider_mapping.task != self.task:\n raise ValueError(\n f"Model {model} is not supported for task {self.task} and provider {self.provider}. "\n f"Supported task: {provider_mapping.task}."\n )\n\n if provider_mapping.status == "staging":\n logger.warning(\n f"Model {model} is in staging mode for provider {self.provider}. Meant for test purposes only."\n )\n if provider_mapping.status == "error":\n logger.warning(\n f"Our latest automated health check on model '{model}' for provider '{self.provider}' did not complete successfully. "\n "Inference call might fail."\n )\n return provider_mapping\n\n def _prepare_headers(self, headers: Dict, api_key: str) -> Dict:\n """Return the headers to use for the request.\n\n Override this method in subclasses for customized headers.\n """\n return {**build_hf_headers(token=api_key), **headers}\n\n def _prepare_url(self, api_key: str, mapped_model: str) -> str:\n """Return the URL to use for the request.\n\n Usually not overwritten in subclasses."""\n base_url = self._prepare_base_url(api_key)\n route = self._prepare_route(mapped_model, api_key)\n return f"{base_url.rstrip('/')}/{route.lstrip('/')}"\n\n def _prepare_base_url(self, api_key: str) -> str:\n """Return the base URL to use for the request.\n\n Usually not overwritten in subclasses."""\n # Route to the proxy if the api_key is a HF TOKEN\n if api_key.startswith("hf_"):\n logger.info(f"Calling '{self.provider}' provider through Hugging Face router.")\n return constants.INFERENCE_PROXY_TEMPLATE.format(provider=self.provider)\n else:\n logger.info(f"Calling '{self.provider}' provider directly.")\n return self.base_url\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n """Return the route to use for the request.\n\n Override this method in subclasses for customized routes.\n """\n return ""\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n """Return the payload to use for the request, as a dict.\n\n Override this method in subclasses for customized payloads.\n Only one of `_prepare_payload_as_dict` and `_prepare_payload_as_bytes` should return a value.\n """\n return None\n\n def _prepare_payload_as_bytes(\n self,\n inputs: Any,\n parameters: Dict,\n provider_mapping_info: InferenceProviderMapping,\n extra_payload: Optional[Dict],\n ) -> Optional[bytes]:\n """Return the body to use for the request, as bytes.\n\n Override this method in subclasses for customized body data.\n Only one of `_prepare_payload_as_dict` and `_prepare_payload_as_bytes` should return a value.\n """\n return None\n\n\nclass BaseConversationalTask(TaskProviderHelper):\n """\n Base class for conversational (chat completion) tasks.\n The schema follows the OpenAI API format defined here: https://platform.openai.com/docs/api-reference/chat\n """\n\n def __init__(self, provider: str, base_url: str):\n super().__init__(provider=provider, base_url=base_url, task="conversational")\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n return "/v1/chat/completions"\n\n def _prepare_payload_as_dict(\n self,\n inputs: List[Union[Dict, ChatCompletionInputMessage]],\n parameters: Dict,\n provider_mapping_info: InferenceProviderMapping,\n ) -> Optional[Dict]:\n return filter_none({"messages": inputs, **parameters, "model": provider_mapping_info.provider_id})\n\n\nclass BaseTextGenerationTask(TaskProviderHelper):\n """\n Base class for text-generation (completion) tasks.\n The schema follows the OpenAI API format defined here: https://platform.openai.com/docs/api-reference/completions\n """\n\n def __init__(self, provider: str, base_url: str):\n super().__init__(provider=provider, base_url=base_url, task="text-generation")\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n return "/v1/completions"\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n return {"prompt": inputs, **filter_none(parameters), "model": provider_mapping_info.provider_id}\n\n\n@lru_cache(maxsize=None)\ndef _fetch_inference_provider_mapping(model: str) -> List["InferenceProviderMapping"]:\n """\n Fetch provider mappings for a model from the Hub.\n """\n from huggingface_hub.hf_api import HfApi\n\n info = HfApi().model_info(model, expand=["inferenceProviderMapping"])\n provider_mapping = info.inference_provider_mapping\n if provider_mapping is None:\n raise ValueError(f"No provider mapping found for model {model}")\n return provider_mapping\n\n\ndef recursive_merge(dict1: Dict, dict2: Dict) -> Dict:\n return {\n **dict1,\n **{\n key: recursive_merge(dict1[key], value)\n if (key in dict1 and isinstance(dict1[key], dict) and isinstance(value, dict))\n else value\n for key, value in dict2.items()\n },\n }\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\_common.py
_common.py
Python
11,448
0.95
0.274247
0.096234
awesome-app
633
2023-10-14T07:50:02.299775
BSD-3-Clause
false
058a4906d14686f675671a254d4ecb26
from typing import Dict, Literal, Optional, Union\n\nfrom huggingface_hub.inference._providers.featherless_ai import (\n FeatherlessConversationalTask,\n FeatherlessTextGenerationTask,\n)\nfrom huggingface_hub.utils import logging\n\nfrom ._common import TaskProviderHelper, _fetch_inference_provider_mapping\nfrom .black_forest_labs import BlackForestLabsTextToImageTask\nfrom .cerebras import CerebrasConversationalTask\nfrom .cohere import CohereConversationalTask\nfrom .fal_ai import (\n FalAIAutomaticSpeechRecognitionTask,\n FalAITextToImageTask,\n FalAITextToSpeechTask,\n FalAITextToVideoTask,\n)\nfrom .fireworks_ai import FireworksAIConversationalTask\nfrom .groq import GroqConversationalTask\nfrom .hf_inference import (\n HFInferenceBinaryInputTask,\n HFInferenceConversational,\n HFInferenceFeatureExtractionTask,\n HFInferenceTask,\n)\nfrom .hyperbolic import HyperbolicTextGenerationTask, HyperbolicTextToImageTask\nfrom .nebius import (\n NebiusConversationalTask,\n NebiusFeatureExtractionTask,\n NebiusTextGenerationTask,\n NebiusTextToImageTask,\n)\nfrom .novita import NovitaConversationalTask, NovitaTextGenerationTask, NovitaTextToVideoTask\nfrom .nscale import NscaleConversationalTask, NscaleTextToImageTask\nfrom .openai import OpenAIConversationalTask\nfrom .replicate import ReplicateTask, ReplicateTextToImageTask, ReplicateTextToSpeechTask\nfrom .sambanova import SambanovaConversationalTask, SambanovaFeatureExtractionTask\nfrom .together import TogetherConversationalTask, TogetherTextGenerationTask, TogetherTextToImageTask\n\n\nlogger = logging.get_logger(__name__)\n\n\nPROVIDER_T = Literal[\n "black-forest-labs",\n "cerebras",\n "cohere",\n "fal-ai",\n "featherless-ai",\n "fireworks-ai",\n "groq",\n "hf-inference",\n "hyperbolic",\n "nebius",\n "novita",\n "nscale",\n "openai",\n "replicate",\n "sambanova",\n "together",\n]\n\nPROVIDER_OR_POLICY_T = Union[PROVIDER_T, Literal["auto"]]\n\nPROVIDERS: Dict[PROVIDER_T, Dict[str, TaskProviderHelper]] = {\n "black-forest-labs": {\n "text-to-image": BlackForestLabsTextToImageTask(),\n },\n "cerebras": {\n "conversational": CerebrasConversationalTask(),\n },\n "cohere": {\n "conversational": CohereConversationalTask(),\n },\n "fal-ai": {\n "automatic-speech-recognition": FalAIAutomaticSpeechRecognitionTask(),\n "text-to-image": FalAITextToImageTask(),\n "text-to-speech": FalAITextToSpeechTask(),\n "text-to-video": FalAITextToVideoTask(),\n },\n "featherless-ai": {\n "conversational": FeatherlessConversationalTask(),\n "text-generation": FeatherlessTextGenerationTask(),\n },\n "fireworks-ai": {\n "conversational": FireworksAIConversationalTask(),\n },\n "groq": {\n "conversational": GroqConversationalTask(),\n },\n "hf-inference": {\n "text-to-image": HFInferenceTask("text-to-image"),\n "conversational": HFInferenceConversational(),\n "text-generation": HFInferenceTask("text-generation"),\n "text-classification": HFInferenceTask("text-classification"),\n "question-answering": HFInferenceTask("question-answering"),\n "audio-classification": HFInferenceBinaryInputTask("audio-classification"),\n "automatic-speech-recognition": HFInferenceBinaryInputTask("automatic-speech-recognition"),\n "fill-mask": HFInferenceTask("fill-mask"),\n "feature-extraction": HFInferenceFeatureExtractionTask(),\n "image-classification": HFInferenceBinaryInputTask("image-classification"),\n "image-segmentation": HFInferenceBinaryInputTask("image-segmentation"),\n "document-question-answering": HFInferenceTask("document-question-answering"),\n "image-to-text": HFInferenceBinaryInputTask("image-to-text"),\n "object-detection": HFInferenceBinaryInputTask("object-detection"),\n "audio-to-audio": HFInferenceBinaryInputTask("audio-to-audio"),\n "zero-shot-image-classification": HFInferenceBinaryInputTask("zero-shot-image-classification"),\n "zero-shot-classification": HFInferenceTask("zero-shot-classification"),\n "image-to-image": HFInferenceBinaryInputTask("image-to-image"),\n "sentence-similarity": HFInferenceTask("sentence-similarity"),\n "table-question-answering": HFInferenceTask("table-question-answering"),\n "tabular-classification": HFInferenceTask("tabular-classification"),\n "text-to-speech": HFInferenceTask("text-to-speech"),\n "token-classification": HFInferenceTask("token-classification"),\n "translation": HFInferenceTask("translation"),\n "summarization": HFInferenceTask("summarization"),\n "visual-question-answering": HFInferenceBinaryInputTask("visual-question-answering"),\n },\n "hyperbolic": {\n "text-to-image": HyperbolicTextToImageTask(),\n "conversational": HyperbolicTextGenerationTask("conversational"),\n "text-generation": HyperbolicTextGenerationTask("text-generation"),\n },\n "nebius": {\n "text-to-image": NebiusTextToImageTask(),\n "conversational": NebiusConversationalTask(),\n "text-generation": NebiusTextGenerationTask(),\n "feature-extraction": NebiusFeatureExtractionTask(),\n },\n "novita": {\n "text-generation": NovitaTextGenerationTask(),\n "conversational": NovitaConversationalTask(),\n "text-to-video": NovitaTextToVideoTask(),\n },\n "nscale": {\n "conversational": NscaleConversationalTask(),\n "text-to-image": NscaleTextToImageTask(),\n },\n "openai": {\n "conversational": OpenAIConversationalTask(),\n },\n "replicate": {\n "text-to-image": ReplicateTextToImageTask(),\n "text-to-speech": ReplicateTextToSpeechTask(),\n "text-to-video": ReplicateTask("text-to-video"),\n },\n "sambanova": {\n "conversational": SambanovaConversationalTask(),\n "feature-extraction": SambanovaFeatureExtractionTask(),\n },\n "together": {\n "text-to-image": TogetherTextToImageTask(),\n "conversational": TogetherConversationalTask(),\n "text-generation": TogetherTextGenerationTask(),\n },\n}\n\n\ndef get_provider_helper(\n provider: Optional[PROVIDER_OR_POLICY_T], task: str, model: Optional[str]\n) -> TaskProviderHelper:\n """Get provider helper instance by name and task.\n\n Args:\n provider (`str`, *optional*): name of the provider, or "auto" to automatically select the provider for the model.\n task (`str`): Name of the task\n model (`str`, *optional*): Name of the model\n Returns:\n TaskProviderHelper: Helper instance for the specified provider and task\n\n Raises:\n ValueError: If provider or task is not supported\n """\n\n if (model is None and provider in (None, "auto")) or (\n model is not None and model.startswith(("http://", "https://"))\n ):\n provider = "hf-inference"\n\n if provider is None:\n logger.info(\n "Defaulting to 'auto' which will select the first provider available for the model, sorted by the user's order in https://hf.co/settings/inference-providers."\n )\n provider = "auto"\n\n if provider == "auto":\n if model is None:\n raise ValueError("Specifying a model is required when provider is 'auto'")\n provider_mapping = _fetch_inference_provider_mapping(model)\n provider = next(iter(provider_mapping)).provider\n\n provider_tasks = PROVIDERS.get(provider) # type: ignore\n if provider_tasks is None:\n raise ValueError(\n f"Provider '{provider}' not supported. Available values: 'auto' or any provider from {list(PROVIDERS.keys())}."\n "Passing 'auto' (default value) will automatically select the first provider available for the model, sorted "\n "by the user's order in https://hf.co/settings/inference-providers."\n )\n\n if task not in provider_tasks:\n raise ValueError(\n f"Task '{task}' not supported for provider '{provider}'. Available tasks: {list(provider_tasks.keys())}"\n )\n return provider_tasks[task]\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__init__.py
__init__.py
Python
8,116
0.95
0.058537
0
awesome-app
757
2024-10-28T18:24:21.807721
MIT
false
cba40e9c622301abd26dd57386556270
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\black_forest_labs.cpython-313.pyc
black_forest_labs.cpython-313.pyc
Other
4,513
0.8
0.051282
0
node-utils
270
2025-05-14T22:57:47.732803
Apache-2.0
false
6c826f0b4f0a29ab27219f098a10898b
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\cerebras.cpython-313.pyc
cerebras.cpython-313.pyc
Other
824
0.8
0
0
awesome-app
251
2025-05-30T16:30:30.366101
Apache-2.0
false
98b8a6f469d28e1afbad6587dea9ceb4
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\cohere.cpython-313.pyc
cohere.cpython-313.pyc
Other
2,156
0.8
0
0
react-lib
558
2025-05-03T09:56:14.429545
BSD-3-Clause
false
2005f9be81fc715f4a2b4751c28a80b6
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\fal_ai.cpython-313.pyc
fal_ai.cpython-313.pyc
Other
10,808
0.8
0
0.012346
awesome-app
706
2024-01-15T10:47:01.697269
GPL-3.0
false
f19de317523286152b68268fb8e15766
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\featherless_ai.cpython-313.pyc
featherless_ai.cpython-313.pyc
Other
2,795
0.8
0
0
node-utils
871
2025-06-12T04:37:46.981900
MIT
false
237d73e8c398b88e8b6711a24351a81d
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\fireworks_ai.cpython-313.pyc
fireworks_ai.cpython-313.pyc
Other
2,119
0.8
0
0
node-utils
892
2024-05-02T07:13:53.458607
BSD-3-Clause
false
64b896b5704de64203e28422e985f0a8
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\groq.cpython-313.pyc
groq.cpython-313.pyc
Other
1,062
0.8
0
0
react-lib
159
2025-03-15T21:59:41.403784
Apache-2.0
false
81c243d8923ccc99fbed7f496d67fecb
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\hf_inference.cpython-313.pyc
hf_inference.cpython-313.pyc
Other
11,539
0.95
0.055046
0
awesome-app
621
2025-03-25T06:58:31.854642
Apache-2.0
false
639eda3dcae8c8630578eade00f72933
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\hyperbolic.cpython-313.pyc
hyperbolic.cpython-313.pyc
Other
3,323
0.8
0.028571
0
awesome-app
707
2025-03-28T18:25:35.558204
BSD-3-Clause
false
b3b454c4c43adc3a9590010a57fa08dc
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\nebius.cpython-313.pyc
nebius.cpython-313.pyc
Other
6,051
0.8
0
0
awesome-app
332
2023-10-19T08:19:31.838611
Apache-2.0
false
cc4616df774176ab584c98b7a5fb535c
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\novita.cpython-313.pyc
novita.cpython-313.pyc
Other
4,553
0.8
0
0
python-kit
966
2023-08-03T15:55:30.993259
BSD-3-Clause
false
1da8168801b7803209ab62e0f41fba60
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\nscale.cpython-313.pyc
nscale.cpython-313.pyc
Other
3,159
0.8
0
0
react-lib
414
2024-01-21T08:18:34.096259
BSD-3-Clause
false
a20369e7b9f84ce98817b0362ca808c8
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\openai.cpython-313.pyc
openai.cpython-313.pyc
Other
1,960
0.8
0
0
python-kit
601
2023-12-27T10:59:25.650050
GPL-3.0
false
7d81175d11508fe209dee3a68799064e
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\replicate.cpython-313.pyc
replicate.cpython-313.pyc
Other
5,139
0.8
0.05
0
awesome-app
244
2024-05-13T12:32:25.236802
GPL-3.0
false
2aa1d5bd96598f6f6303f3fcf8b67962
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\sambanova.cpython-313.pyc
sambanova.cpython-313.pyc
Other
3,628
0.8
0
0
react-lib
995
2025-04-23T05:00:10.553488
MIT
false
0b479572d161eb93d9ba59edba241227
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\together.cpython-313.pyc
together.cpython-313.pyc
Other
5,763
0.95
0.046154
0.015873
vue-tools
360
2024-11-24T16:12:08.515922
BSD-3-Clause
false
3d61b7784e578e53b2c0780027cf688a
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\_common.cpython-313.pyc
_common.cpython-313.pyc
Other
14,008
0.95
0.158228
0.006897
react-lib
250
2025-05-15T07:01:32.144090
MIT
false
5e3070a6bce4141e213f58e67be75a8c
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
7,315
0.95
0.089286
0
vue-tools
114
2024-01-16T19:51:44.939228
BSD-3-Clause
false
4b7706f7e87e95793befa581f0748f0e
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\__pycache__\_common.cpython-313.pyc
_common.cpython-313.pyc
Other
16,189
0.95
0.030675
0.006623
react-lib
763
2023-07-20T11:37:54.251953
MIT
false
bc2fae22c5c8ff8a8c52216419b4fa99
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
200
0.7
0
0
node-utils
779
2024-10-26T21:41:14.598014
MIT
false
97aef6cbe4feafcd14c5c274873780b1
# Copyright 2024 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains helpers to split tensors into shards."""\n\nfrom dataclasses import dataclass, field\nfrom typing import Any, Callable, Dict, List, Optional, TypeVar, Union\n\nfrom .. import logging\n\n\nTensorT = TypeVar("TensorT")\nTensorSizeFn_T = Callable[[TensorT], int]\nStorageIDFn_T = Callable[[TensorT], Optional[Any]]\n\nMAX_SHARD_SIZE = "5GB"\nSIZE_UNITS = {\n "TB": 10**12,\n "GB": 10**9,\n "MB": 10**6,\n "KB": 10**3,\n}\n\n\nlogger = logging.get_logger(__file__)\n\n\n@dataclass\nclass StateDictSplit:\n is_sharded: bool = field(init=False)\n metadata: Dict[str, Any]\n filename_to_tensors: Dict[str, List[str]]\n tensor_to_filename: Dict[str, str]\n\n def __post_init__(self):\n self.is_sharded = len(self.filename_to_tensors) > 1\n\n\ndef split_state_dict_into_shards_factory(\n state_dict: Dict[str, TensorT],\n *,\n get_storage_size: TensorSizeFn_T,\n filename_pattern: str,\n get_storage_id: StorageIDFn_T = lambda tensor: None,\n max_shard_size: Union[int, str] = MAX_SHARD_SIZE,\n) -> StateDictSplit:\n """\n Split a model state dictionary in shards so that each shard is smaller than a given size.\n\n The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization\n made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we\n have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not\n [6+2+2GB], [6+2GB], [6GB].\n\n <Tip warning={true}>\n\n If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a\n size greater than `max_shard_size`.\n\n </Tip>\n\n Args:\n state_dict (`Dict[str, Tensor]`):\n The state dictionary to save.\n get_storage_size (`Callable[[Tensor], int]`):\n A function that returns the size of a tensor when saved on disk in bytes.\n get_storage_id (`Callable[[Tensor], Optional[Any]]`, *optional*):\n A function that returns a unique identifier to a tensor storage. Multiple different tensors can share the\n same underlying storage. This identifier is guaranteed to be unique and constant for this tensor's storage\n during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id.\n filename_pattern (`str`, *optional*):\n The pattern to generate the files names in which the model will be saved. Pattern must be a string that\n can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`\n max_shard_size (`int` or `str`, *optional*):\n The maximum size of each shard, in bytes. Defaults to 5GB.\n\n Returns:\n [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.\n """\n storage_id_to_tensors: Dict[Any, List[str]] = {}\n\n shard_list: List[Dict[str, TensorT]] = []\n current_shard: Dict[str, TensorT] = {}\n current_shard_size = 0\n total_size = 0\n\n if isinstance(max_shard_size, str):\n max_shard_size = parse_size_to_int(max_shard_size)\n\n for key, tensor in state_dict.items():\n # when bnb serialization is used the weights in the state dict can be strings\n # check: https://github.com/huggingface/transformers/pull/24416 for more details\n if isinstance(tensor, str):\n logger.info("Skipping tensor %s as it is a string (bnb serialization)", key)\n continue\n\n # If a `tensor` shares the same underlying storage as another tensor, we put `tensor` in the same `block`\n storage_id = get_storage_id(tensor)\n if storage_id is not None:\n if storage_id in storage_id_to_tensors:\n # We skip this tensor for now and will reassign to correct shard later\n storage_id_to_tensors[storage_id].append(key)\n continue\n else:\n # This is the first tensor with this storage_id, we create a new entry\n # in the storage_id_to_tensors dict => we will assign the shard id later\n storage_id_to_tensors[storage_id] = [key]\n\n # Compute tensor size\n tensor_size = get_storage_size(tensor)\n\n # If this tensor is bigger than the maximal size, we put it in its own shard\n if tensor_size > max_shard_size:\n total_size += tensor_size\n shard_list.append({key: tensor})\n continue\n\n # If this tensor is going to tip up over the maximal size, we split.\n # Current shard already has some tensors, we add it to the list of shards and create a new one.\n if current_shard_size + tensor_size > max_shard_size:\n shard_list.append(current_shard)\n current_shard = {}\n current_shard_size = 0\n\n # Add the tensor to the current shard\n current_shard[key] = tensor\n current_shard_size += tensor_size\n total_size += tensor_size\n\n # Add the last shard\n if len(current_shard) > 0:\n shard_list.append(current_shard)\n nb_shards = len(shard_list)\n\n # Loop over the tensors that share the same storage and assign them together\n for storage_id, keys in storage_id_to_tensors.items():\n # Let's try to find the shard where the first tensor of this storage is and put all tensors in the same shard\n for shard in shard_list:\n if keys[0] in shard:\n for key in keys:\n shard[key] = state_dict[key]\n break\n\n # If we only have one shard, we return it => no need to build the index\n if nb_shards == 1:\n filename = filename_pattern.format(suffix="")\n return StateDictSplit(\n metadata={"total_size": total_size},\n filename_to_tensors={filename: list(state_dict.keys())},\n tensor_to_filename={key: filename for key in state_dict.keys()},\n )\n\n # Now that each tensor is assigned to a shard, let's assign a filename to each shard\n tensor_name_to_filename = {}\n filename_to_tensors = {}\n for idx, shard in enumerate(shard_list):\n filename = filename_pattern.format(suffix=f"-{idx + 1:05d}-of-{nb_shards:05d}")\n for key in shard:\n tensor_name_to_filename[key] = filename\n filename_to_tensors[filename] = list(shard.keys())\n\n # Build the index and return\n return StateDictSplit(\n metadata={"total_size": total_size},\n filename_to_tensors=filename_to_tensors,\n tensor_to_filename=tensor_name_to_filename,\n )\n\n\ndef parse_size_to_int(size_as_str: str) -> int:\n """\n Parse a size expressed as a string with digits and unit (like `"5MB"`) to an integer (in bytes).\n\n Supported units are "TB", "GB", "MB", "KB".\n\n Args:\n size_as_str (`str`): The size to convert. Will be directly returned if an `int`.\n\n Example:\n\n ```py\n >>> parse_size_to_int("5MB")\n 5000000\n ```\n """\n size_as_str = size_as_str.strip()\n\n # Parse unit\n unit = size_as_str[-2:].upper()\n if unit not in SIZE_UNITS:\n raise ValueError(f"Unit '{unit}' not supported. Supported units are TB, GB, MB, KB. Got '{size_as_str}'.")\n multiplier = SIZE_UNITS[unit]\n\n # Parse value\n try:\n value = float(size_as_str[:-2].strip())\n except ValueError as e:\n raise ValueError(f"Could not parse the size value from '{size_as_str}': {e}") from e\n\n return int(value * multiplier)\n
.venv\Lib\site-packages\huggingface_hub\serialization\_base.py
_base.py
Python
8,126
0.95
0.147619
0.194118
python-kit
837
2024-04-23T22:53:36.826901
GPL-3.0
false
37af78c1f439f354dcd7ce4ef13c8cd5
# Copyright 2024 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains tensorflow-specific helpers."""\n\nimport math\nimport re\nfrom typing import TYPE_CHECKING, Dict, Union\n\nfrom .. import constants\nfrom ._base import MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory\n\n\nif TYPE_CHECKING:\n import tensorflow as tf\n\n\ndef split_tf_state_dict_into_shards(\n state_dict: Dict[str, "tf.Tensor"],\n *,\n filename_pattern: str = constants.TF2_WEIGHTS_FILE_PATTERN,\n max_shard_size: Union[int, str] = MAX_SHARD_SIZE,\n) -> StateDictSplit:\n """\n Split a model state dictionary in shards so that each shard is smaller than a given size.\n\n The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization\n made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we\n have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not\n [6+2+2GB], [6+2GB], [6GB].\n\n <Tip warning={true}>\n\n If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a\n size greater than `max_shard_size`.\n\n </Tip>\n\n Args:\n state_dict (`Dict[str, Tensor]`):\n The state dictionary to save.\n filename_pattern (`str`, *optional*):\n The pattern to generate the files names in which the model will be saved. Pattern must be a string that\n can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`\n Defaults to `"tf_model{suffix}.h5"`.\n max_shard_size (`int` or `str`, *optional*):\n The maximum size of each shard, in bytes. Defaults to 5GB.\n\n Returns:\n [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.\n """\n return split_state_dict_into_shards_factory(\n state_dict,\n max_shard_size=max_shard_size,\n filename_pattern=filename_pattern,\n get_storage_size=get_tf_storage_size,\n )\n\n\ndef get_tf_storage_size(tensor: "tf.Tensor") -> int:\n # Return `math.ceil` since dtype byte size can be a float (e.g., 0.125 for tf.bool).\n # Better to overestimate than underestimate.\n return math.ceil(tensor.numpy().size * _dtype_byte_size_tf(tensor.dtype))\n\n\ndef _dtype_byte_size_tf(dtype) -> float:\n """\n Returns the size (in bytes) occupied by one parameter of type `dtype`.\n Taken from https://github.com/huggingface/transformers/blob/74d9d0cebb0263a3f8ab9c280569170cc74651d0/src/transformers/modeling_tf_utils.py#L608.\n NOTE: why not `tensor.numpy().nbytes`?\n Example:\n ```py\n >>> _dtype_byte_size(tf.float32)\n 4\n ```\n """\n import tensorflow as tf\n\n if dtype == tf.bool:\n return 1 / 8\n bit_search = re.search(r"[^\d](\d+)$", dtype.name)\n if bit_search is None:\n raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")\n bit_size = int(bit_search.groups()[0])\n return bit_size // 8\n
.venv\Lib\site-packages\huggingface_hub\serialization\_tensorflow.py
_tensorflow.py
Python
3,625
0.95
0.094737
0.205128
react-lib
79
2024-08-26T09:51:09.130603
BSD-3-Clause
false
95fcfd6a0367692c1de01451d6ceccdb
\n\n
.venv\Lib\site-packages\huggingface_hub\serialization\__pycache__\_base.cpython-313.pyc
_base.cpython-313.pyc
Other
7,162
0.95
0.04065
0
react-lib
619
2023-08-06T15:07:53.940315
BSD-3-Clause
false
977f7fb6f977ea05cacb09753eaff747
\n\n
.venv\Lib\site-packages\huggingface_hub\serialization\__pycache__\_dduf.cpython-313.pyc
_dduf.cpython-313.pyc
Other
19,209
0.95
0.024055
0.023622
python-kit
730
2025-05-13T09:13:54.672773
GPL-3.0
false
9ed024e4165c4a3e1d6d3a27ca3963e9
\n\n
.venv\Lib\site-packages\huggingface_hub\serialization\__pycache__\_tensorflow.cpython-313.pyc
_tensorflow.cpython-313.pyc
Other
3,603
0.8
0.016949
0
awesome-app
403
2024-02-23T08:50:08.757956
GPL-3.0
false
940f70bcc82fba5fe9487bbe20037be3
\n\n
.venv\Lib\site-packages\huggingface_hub\serialization\__pycache__\_torch.cpython-313.pyc
_torch.cpython-313.pyc
Other
45,695
0.95
0.0592
0.016453
vue-tools
792
2024-11-02T05:48:53.020755
Apache-2.0
false
382a8500e5cab169e5834ad15501cd23
\n\n
.venv\Lib\site-packages\huggingface_hub\serialization\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
718
0.7
0
0
vue-tools
910
2023-11-19T17:20:04.446771
Apache-2.0
false
b6a24fae927f6b5ff0e8a493ca71ccd0
---\n# For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1\n# Doc / guide: https://huggingface.co/docs/hub/datasets-cards\n{{ card_data }}\n---\n\n# Dataset Card for {{ pretty_name | default("Dataset Name", true) }}\n\n<!-- Provide a quick summary of the dataset. -->\n\n{{ dataset_summary | default("", true) }}\n\n## Dataset Details\n\n### Dataset Description\n\n<!-- Provide a longer summary of what this dataset is. -->\n\n{{ dataset_description | default("", true) }}\n\n- **Curated by:** {{ curators | default("[More Information Needed]", true)}}\n- **Funded by [optional]:** {{ funded_by | default("[More Information Needed]", true)}}\n- **Shared by [optional]:** {{ shared_by | default("[More Information Needed]", true)}}\n- **Language(s) (NLP):** {{ language | default("[More Information Needed]", true)}}\n- **License:** {{ license | default("[More Information Needed]", true)}}\n\n### Dataset Sources [optional]\n\n<!-- Provide the basic links for the dataset. -->\n\n- **Repository:** {{ repo | default("[More Information Needed]", true)}}\n- **Paper [optional]:** {{ paper | default("[More Information Needed]", true)}}\n- **Demo [optional]:** {{ demo | default("[More Information Needed]", true)}}\n\n## Uses\n\n<!-- Address questions around how the dataset is intended to be used. -->\n\n### Direct Use\n\n<!-- This section describes suitable use cases for the dataset. -->\n\n{{ direct_use | default("[More Information Needed]", true)}}\n\n### Out-of-Scope Use\n\n<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->\n\n{{ out_of_scope_use | default("[More Information Needed]", true)}}\n\n## Dataset Structure\n\n<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->\n\n{{ dataset_structure | default("[More Information Needed]", true)}}\n\n## Dataset Creation\n\n### Curation Rationale\n\n<!-- Motivation for the creation of this dataset. -->\n\n{{ curation_rationale_section | default("[More Information Needed]", true)}}\n\n### Source Data\n\n<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->\n\n#### Data Collection and Processing\n\n<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->\n\n{{ data_collection_and_processing_section | default("[More Information Needed]", true)}}\n\n#### Who are the source data producers?\n\n<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->\n\n{{ source_data_producers_section | default("[More Information Needed]", true)}}\n\n### Annotations [optional]\n\n<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->\n\n#### Annotation process\n\n<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->\n\n{{ annotation_process_section | default("[More Information Needed]", true)}}\n\n#### Who are the annotators?\n\n<!-- This section describes the people or systems who created the annotations. -->\n\n{{ who_are_annotators_section | default("[More Information Needed]", true)}}\n\n#### Personal and Sensitive Information\n\n<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->\n\n{{ personal_and_sensitive_information | default("[More Information Needed]", true)}}\n\n## Bias, Risks, and Limitations\n\n<!-- This section is meant to convey both technical and sociotechnical limitations. -->\n\n{{ bias_risks_limitations | default("[More Information Needed]", true)}}\n\n### Recommendations\n\n<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->\n\n{{ bias_recommendations | default("Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.", true)}}\n\n## Citation [optional]\n\n<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->\n\n**BibTeX:**\n\n{{ citation_bibtex | default("[More Information Needed]", true)}}\n\n**APA:**\n\n{{ citation_apa | default("[More Information Needed]", true)}}\n\n## Glossary [optional]\n\n<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->\n\n{{ glossary | default("[More Information Needed]", true)}}\n\n## More Information [optional]\n\n{{ more_information | default("[More Information Needed]", true)}}\n\n## Dataset Card Authors [optional]\n\n{{ dataset_card_authors | default("[More Information Needed]", true)}}\n\n## Dataset Card Contact\n\n{{ dataset_card_contact | default("[More Information Needed]", true)}}\n
.venv\Lib\site-packages\huggingface_hub\templates\datasetcard_template.md
datasetcard_template.md
Markdown
5,503
0.8
0.062937
0.363636
vue-tools
725
2025-05-29T19:45:42.510848
BSD-3-Clause
false
12c6a5ef189e67f5be69bc1bbe9549d5
# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""\nHelpful utility functions and classes in relation to exploring API endpoints\nwith the aim for a user-friendly interface.\n"""\n\nimport math\nimport re\nfrom typing import TYPE_CHECKING\n\nfrom ..repocard_data import ModelCardData\n\n\nif TYPE_CHECKING:\n from ..hf_api import ModelInfo\n\n\ndef _is_emission_within_threshold(model_info: "ModelInfo", minimum_threshold: float, maximum_threshold: float) -> bool:\n """Checks if a model's emission is within a given threshold.\n\n Args:\n model_info (`ModelInfo`):\n A model info object containing the model's emission information.\n minimum_threshold (`float`):\n A minimum carbon threshold to filter by, such as 1.\n maximum_threshold (`float`):\n A maximum carbon threshold to filter by, such as 10.\n\n Returns:\n `bool`: Whether the model's emission is within the given threshold.\n """\n if minimum_threshold is None and maximum_threshold is None:\n raise ValueError("Both `minimum_threshold` and `maximum_threshold` cannot both be `None`")\n if minimum_threshold is None:\n minimum_threshold = -1\n if maximum_threshold is None:\n maximum_threshold = math.inf\n\n card_data = getattr(model_info, "card_data", None)\n if card_data is None or not isinstance(card_data, (dict, ModelCardData)):\n return False\n\n # Get CO2 emission metadata\n emission = card_data.get("co2_eq_emissions", None)\n if isinstance(emission, dict):\n emission = emission["emissions"]\n if not emission:\n return False\n\n # Filter out if value is missing or out of range\n matched = re.search(r"\d+\.\d+|\d+", str(emission))\n if matched is None:\n return False\n\n emission_value = float(matched.group(0))\n return minimum_threshold <= emission_value <= maximum_threshold\n
.venv\Lib\site-packages\huggingface_hub\utils\endpoint_helpers.py
endpoint_helpers.py
Python
2,366
0.95
0.19697
0.240741
node-utils
243
2025-01-02T08:45:51.914348
BSD-3-Clause
false
5c16ef75b4cb7bcb73826a6cc3c9a6e6
# Taken from https://github.com/mlflow/mlflow/pull/10119\n#\n# DO NOT use this function for security purposes (e.g., password hashing).\n#\n# In Python >= 3.9, insecure hashing algorithms such as MD5 fail in FIPS-compliant\n# environments unless `usedforsecurity=False` is explicitly passed.\n#\n# References:\n# - https://github.com/mlflow/mlflow/issues/9905\n# - https://github.com/mlflow/mlflow/pull/10119\n# - https://docs.python.org/3/library/hashlib.html\n# - https://github.com/huggingface/transformers/pull/27038\n#\n# Usage:\n# ```python\n# # Use\n# from huggingface_hub.utils.insecure_hashlib import sha256\n# # instead of\n# from hashlib import sha256\n#\n# # Use\n# from huggingface_hub.utils import insecure_hashlib\n# # instead of\n# import hashlib\n# ```\nimport functools\nimport hashlib\nimport sys\n\n\nif sys.version_info >= (3, 9):\n md5 = functools.partial(hashlib.md5, usedforsecurity=False)\n sha1 = functools.partial(hashlib.sha1, usedforsecurity=False)\n sha256 = functools.partial(hashlib.sha256, usedforsecurity=False)\nelse:\n md5 = hashlib.md5\n sha1 = hashlib.sha1\n sha256 = hashlib.sha256\n
.venv\Lib\site-packages\huggingface_hub\utils\insecure_hashlib.py
insecure_hashlib.py
Python
1,142
0.95
0.078947
0.694444
vue-tools
561
2023-12-15T18:36:14.454670
BSD-3-Clause
false
fccce6d9804020c745555f84037bde82
# coding=utf-8\n# Copyright 2020 Optuna, Hugging Face\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Logging utilities."""\n\nimport logging\nimport os\nfrom logging import (\n CRITICAL, # NOQA\n DEBUG, # NOQA\n ERROR, # NOQA\n FATAL, # NOQA\n INFO, # NOQA\n NOTSET, # NOQA\n WARN, # NOQA\n WARNING, # NOQA\n)\nfrom typing import Optional\n\nfrom .. import constants\n\n\nlog_levels = {\n "debug": logging.DEBUG,\n "info": logging.INFO,\n "warning": logging.WARNING,\n "error": logging.ERROR,\n "critical": logging.CRITICAL,\n}\n\n_default_log_level = logging.WARNING\n\n\ndef _get_library_name() -> str:\n return __name__.split(".")[0]\n\n\ndef _get_library_root_logger() -> logging.Logger:\n return logging.getLogger(_get_library_name())\n\n\ndef _get_default_logging_level():\n """\n If `HF_HUB_VERBOSITY` env var is set to one of the valid choices return that as the new default level. If it is not\n - fall back to `_default_log_level`\n """\n env_level_str = os.getenv("HF_HUB_VERBOSITY", None)\n if env_level_str:\n if env_level_str in log_levels:\n return log_levels[env_level_str]\n else:\n logging.getLogger().warning(\n f"Unknown option HF_HUB_VERBOSITY={env_level_str}, has to be one of: {', '.join(log_levels.keys())}"\n )\n return _default_log_level\n\n\ndef _configure_library_root_logger() -> None:\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(logging.StreamHandler())\n library_root_logger.setLevel(_get_default_logging_level())\n\n\ndef _reset_library_root_logger() -> None:\n library_root_logger = _get_library_root_logger()\n library_root_logger.setLevel(logging.NOTSET)\n\n\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\n """\n Returns a logger with the specified name. This function is not supposed\n to be directly accessed by library users.\n\n Args:\n name (`str`, *optional*):\n The name of the logger to get, usually the filename\n\n Example:\n\n ```python\n >>> from huggingface_hub import get_logger\n\n >>> logger = get_logger(__file__)\n >>> logger.set_verbosity_info()\n ```\n """\n\n if name is None:\n name = _get_library_name()\n\n return logging.getLogger(name)\n\n\ndef get_verbosity() -> int:\n """Return the current level for the HuggingFace Hub's root logger.\n\n Returns:\n Logging level, e.g., `huggingface_hub.logging.DEBUG` and\n `huggingface_hub.logging.INFO`.\n\n <Tip>\n\n HuggingFace Hub has following logging levels:\n\n - `huggingface_hub.logging.CRITICAL`, `huggingface_hub.logging.FATAL`\n - `huggingface_hub.logging.ERROR`\n - `huggingface_hub.logging.WARNING`, `huggingface_hub.logging.WARN`\n - `huggingface_hub.logging.INFO`\n - `huggingface_hub.logging.DEBUG`\n\n </Tip>\n """\n return _get_library_root_logger().getEffectiveLevel()\n\n\ndef set_verbosity(verbosity: int) -> None:\n """\n Sets the level for the HuggingFace Hub's root logger.\n\n Args:\n verbosity (`int`):\n Logging level, e.g., `huggingface_hub.logging.DEBUG` and\n `huggingface_hub.logging.INFO`.\n """\n _get_library_root_logger().setLevel(verbosity)\n\n\ndef set_verbosity_info():\n """\n Sets the verbosity to `logging.INFO`.\n """\n return set_verbosity(INFO)\n\n\ndef set_verbosity_warning():\n """\n Sets the verbosity to `logging.WARNING`.\n """\n return set_verbosity(WARNING)\n\n\ndef set_verbosity_debug():\n """\n Sets the verbosity to `logging.DEBUG`.\n """\n return set_verbosity(DEBUG)\n\n\ndef set_verbosity_error():\n """\n Sets the verbosity to `logging.ERROR`.\n """\n return set_verbosity(ERROR)\n\n\ndef disable_propagation() -> None:\n """\n Disable propagation of the library log outputs. Note that log propagation is\n disabled by default.\n """\n _get_library_root_logger().propagate = False\n\n\ndef enable_propagation() -> None:\n """\n Enable propagation of the library log outputs. Please disable the\n HuggingFace Hub's default handler to prevent double logging if the root\n logger has been configured.\n """\n _get_library_root_logger().propagate = True\n\n\n_configure_library_root_logger()\n\nif constants.HF_DEBUG:\n # If `HF_DEBUG` environment variable is set, set the verbosity of `huggingface_hub` logger to `DEBUG`.\n set_verbosity_debug()\n
.venv\Lib\site-packages\huggingface_hub\utils\logging.py
logging.py
Python
4,909
0.95
0.12234
0.107143
vue-tools
82
2025-02-19T20:57:03.647683
BSD-3-Clause
false
01961cb73ca785a61e8ea68078089271
"""Utilities to efficiently compute the SHA 256 hash of a bunch of bytes."""\n\nfrom typing import BinaryIO, Optional\n\nfrom .insecure_hashlib import sha1, sha256\n\n\ndef sha_fileobj(fileobj: BinaryIO, chunk_size: Optional[int] = None) -> bytes:\n """\n Computes the sha256 hash of the given file object, by chunks of size `chunk_size`.\n\n Args:\n fileobj (file-like object):\n The File object to compute sha256 for, typically obtained with `open(path, "rb")`\n chunk_size (`int`, *optional*):\n The number of bytes to read from `fileobj` at once, defaults to 1MB.\n\n Returns:\n `bytes`: `fileobj`'s sha256 hash as bytes\n """\n chunk_size = chunk_size if chunk_size is not None else 1024 * 1024\n\n sha = sha256()\n while True:\n chunk = fileobj.read(chunk_size)\n sha.update(chunk)\n if not chunk:\n break\n return sha.digest()\n\n\ndef git_hash(data: bytes) -> str:\n """\n Computes the git-sha1 hash of the given bytes, using the same algorithm as git.\n\n This is equivalent to running `git hash-object`. See https://git-scm.com/docs/git-hash-object\n for more details.\n\n Note: this method is valid for regular files. For LFS files, the proper git hash is supposed to be computed on the\n pointer file content, not the actual file content. However, for simplicity, we directly compare the sha256 of\n the LFS file content when we want to compare LFS files.\n\n Args:\n data (`bytes`):\n The data to compute the git-hash for.\n\n Returns:\n `str`: the git-hash of `data` as an hexadecimal string.\n\n Example:\n ```python\n >>> from huggingface_hub.utils.sha import git_hash\n >>> git_hash(b"Hello, World!")\n 'b45ef6fec89518d314f546fd6c3025367b721684'\n ```\n """\n # Taken from https://gist.github.com/msabramo/763200\n # Note: no need to optimize by reading the file in chunks as we're not supposed to hash huge files (5MB maximum).\n sha = sha1()\n sha.update(b"blob ")\n sha.update(str(len(data)).encode())\n sha.update(b"\0")\n sha.update(data)\n return sha.hexdigest()\n
.venv\Lib\site-packages\huggingface_hub\utils\sha.py
sha.py
Python
2,134
0.95
0.15625
0.04
node-utils
420
2025-01-16T15:51:18.384081
GPL-3.0
false
f5c5e1ba1524c7aaba725a1cb9a6bcc1
# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n"""Utility helpers to handle progress bars in `huggingface_hub`.\n\nExample:\n 1. Use `huggingface_hub.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`.\n 2. To disable progress bars, either use `disable_progress_bars()` helper or set the\n environment variable `HF_HUB_DISABLE_PROGRESS_BARS` to 1.\n 3. To re-enable progress bars, use `enable_progress_bars()`.\n 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`.\n\nNOTE: Environment variable `HF_HUB_DISABLE_PROGRESS_BARS` has the priority.\n\nExample:\n ```py\n >>> from huggingface_hub.utils import are_progress_bars_disabled, disable_progress_bars, enable_progress_bars, tqdm\n\n # Disable progress bars globally\n >>> disable_progress_bars()\n\n # Use as normal `tqdm`\n >>> for _ in tqdm(range(5)):\n ... pass\n\n # Still not showing progress bars, as `disable=False` is overwritten to `True`.\n >>> for _ in tqdm(range(5), disable=False):\n ... pass\n\n >>> are_progress_bars_disabled()\n True\n\n # Re-enable progress bars globally\n >>> enable_progress_bars()\n\n # Progress bar will be shown !\n >>> for _ in tqdm(range(5)):\n ... pass\n 100%|███████████████████████████████████████| 5/5 [00:00<00:00, 117817.53it/s]\n ```\n\nGroup-based control:\n ```python\n # Disable progress bars for a specific group\n >>> disable_progress_bars("peft.foo")\n\n # Check state of different groups\n >>> assert not are_progress_bars_disabled("peft"))\n >>> assert not are_progress_bars_disabled("peft.something")\n >>> assert are_progress_bars_disabled("peft.foo"))\n >>> assert are_progress_bars_disabled("peft.foo.bar"))\n\n # Enable progress bars for a subgroup\n >>> enable_progress_bars("peft.foo.bar")\n\n # Check if enabling a subgroup affects the parent group\n >>> assert are_progress_bars_disabled("peft.foo"))\n >>> assert not are_progress_bars_disabled("peft.foo.bar"))\n\n # No progress bar for `name="peft.foo"`\n >>> for _ in tqdm(range(5), name="peft.foo"):\n ... pass\n\n # Progress bar will be shown for `name="peft.foo.bar"`\n >>> for _ in tqdm(range(5), name="peft.foo.bar"):\n ... pass\n 100%|███████████████████████████████████████| 5/5 [00:00<00:00, 117817.53it/s]\n\n ```\n"""\n\nimport io\nimport logging\nimport os\nimport warnings\nfrom contextlib import contextmanager, nullcontext\nfrom pathlib import Path\nfrom typing import ContextManager, Dict, Iterator, Optional, Union\n\nfrom tqdm.auto import tqdm as old_tqdm\n\nfrom ..constants import HF_HUB_DISABLE_PROGRESS_BARS\n\n\n# The `HF_HUB_DISABLE_PROGRESS_BARS` environment variable can be True, False, or not set (None),\n# allowing for control over progress bar visibility. When set, this variable takes precedence\n# over programmatic settings, dictating whether progress bars should be shown or hidden globally.\n# Essentially, the environment variable's setting overrides any code-based configurations.\n#\n# If `HF_HUB_DISABLE_PROGRESS_BARS` is not defined (None), it implies that users can manage\n# progress bar visibility through code. By default, progress bars are turned on.\n\n\nprogress_bar_states: Dict[str, bool] = {}\n\n\ndef disable_progress_bars(name: Optional[str] = None) -> None:\n """\n Disable progress bars either globally or for a specified group.\n\n This function updates the state of progress bars based on a group name.\n If no group name is provided, all progress bars are disabled. The operation\n respects the `HF_HUB_DISABLE_PROGRESS_BARS` environment variable's setting.\n\n Args:\n name (`str`, *optional*):\n The name of the group for which to disable the progress bars. If None,\n progress bars are disabled globally.\n\n Raises:\n Warning: If the environment variable precludes changes.\n """\n if HF_HUB_DISABLE_PROGRESS_BARS is False:\n warnings.warn(\n "Cannot disable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=0` is set and has priority."\n )\n return\n\n if name is None:\n progress_bar_states.clear()\n progress_bar_states["_global"] = False\n else:\n keys_to_remove = [key for key in progress_bar_states if key.startswith(f"{name}.")]\n for key in keys_to_remove:\n del progress_bar_states[key]\n progress_bar_states[name] = False\n\n\ndef enable_progress_bars(name: Optional[str] = None) -> None:\n """\n Enable progress bars either globally or for a specified group.\n\n This function sets the progress bars to enabled for the specified group or globally\n if no group is specified. The operation is subject to the `HF_HUB_DISABLE_PROGRESS_BARS`\n environment setting.\n\n Args:\n name (`str`, *optional*):\n The name of the group for which to enable the progress bars. If None,\n progress bars are enabled globally.\n\n Raises:\n Warning: If the environment variable precludes changes.\n """\n if HF_HUB_DISABLE_PROGRESS_BARS is True:\n warnings.warn(\n "Cannot enable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=1` is set and has priority."\n )\n return\n\n if name is None:\n progress_bar_states.clear()\n progress_bar_states["_global"] = True\n else:\n keys_to_remove = [key for key in progress_bar_states if key.startswith(f"{name}.")]\n for key in keys_to_remove:\n del progress_bar_states[key]\n progress_bar_states[name] = True\n\n\ndef are_progress_bars_disabled(name: Optional[str] = None) -> bool:\n """\n Check if progress bars are disabled globally or for a specific group.\n\n This function returns whether progress bars are disabled for a given group or globally.\n It checks the `HF_HUB_DISABLE_PROGRESS_BARS` environment variable first, then the programmatic\n settings.\n\n Args:\n name (`str`, *optional*):\n The group name to check; if None, checks the global setting.\n\n Returns:\n `bool`: True if progress bars are disabled, False otherwise.\n """\n if HF_HUB_DISABLE_PROGRESS_BARS is True:\n return True\n\n if name is None:\n return not progress_bar_states.get("_global", True)\n\n while name:\n if name in progress_bar_states:\n return not progress_bar_states[name]\n name = ".".join(name.split(".")[:-1])\n\n return not progress_bar_states.get("_global", True)\n\n\ndef is_tqdm_disabled(log_level: int) -> Optional[bool]:\n """\n Determine if tqdm progress bars should be disabled based on logging level and environment settings.\n\n see https://github.com/huggingface/huggingface_hub/pull/2000 and https://github.com/huggingface/huggingface_hub/pull/2698.\n """\n if log_level == logging.NOTSET:\n return True\n if os.getenv("TQDM_POSITION") == "-1":\n return False\n return None\n\n\nclass tqdm(old_tqdm):\n """\n Class to override `disable` argument in case progress bars are globally disabled.\n\n Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.\n """\n\n def __init__(self, *args, **kwargs):\n name = kwargs.pop("name", None) # do not pass `name` to `tqdm`\n if are_progress_bars_disabled(name):\n kwargs["disable"] = True\n super().__init__(*args, **kwargs)\n\n def __delattr__(self, attr: str) -> None:\n """Fix for https://github.com/huggingface/huggingface_hub/issues/1603"""\n try:\n super().__delattr__(attr)\n except AttributeError:\n if attr != "_lock":\n raise\n\n\n@contextmanager\ndef tqdm_stream_file(path: Union[Path, str]) -> Iterator[io.BufferedReader]:\n """\n Open a file as binary and wrap the `read` method to display a progress bar when it's streamed.\n\n First implemented in `transformers` in 2019 but removed when switched to git-lfs. Used in `huggingface_hub` to show\n progress bar when uploading an LFS file to the Hub. See github.com/huggingface/transformers/pull/2078#discussion_r354739608\n for implementation details.\n\n Note: currently implementation handles only files stored on disk as it is the most common use case. Could be\n extended to stream any `BinaryIO` object but we might have to debug some corner cases.\n\n Example:\n ```py\n >>> with tqdm_stream_file("config.json") as f:\n >>> requests.put(url, data=f)\n config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s]\n ```\n """\n if isinstance(path, str):\n path = Path(path)\n\n with path.open("rb") as f:\n total_size = path.stat().st_size\n pbar = tqdm(\n unit="B",\n unit_scale=True,\n total=total_size,\n initial=0,\n desc=path.name,\n )\n\n f_read = f.read\n\n def _inner_read(size: Optional[int] = -1) -> bytes:\n data = f_read(size)\n pbar.update(len(data))\n return data\n\n f.read = _inner_read # type: ignore\n\n yield f\n\n pbar.close()\n\n\ndef _get_progress_bar_context(\n *,\n desc: str,\n log_level: int,\n total: Optional[int] = None,\n initial: int = 0,\n unit: str = "B",\n unit_scale: bool = True,\n name: Optional[str] = None,\n _tqdm_bar: Optional[tqdm] = None,\n) -> ContextManager[tqdm]:\n if _tqdm_bar is not None:\n return nullcontext(_tqdm_bar)\n # ^ `contextlib.nullcontext` mimics a context manager that does nothing\n # Makes it easier to use the same code path for both cases but in the later\n # case, the progress bar is not closed when exiting the context manager.\n\n return tqdm(\n unit=unit,\n unit_scale=unit_scale,\n total=total,\n initial=initial,\n desc=desc,\n disable=is_tqdm_disabled(log_level=log_level),\n name=name,\n )\n
.venv\Lib\site-packages\huggingface_hub\utils\tqdm.py
tqdm.py
Python
10,671
0.95
0.198697
0.14876
awesome-app
533
2025-01-28T01:03:32.100392
BSD-3-Clause
false
bcfdb372595f820bda03aed4f548aee0
# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains an helper to get the token from machine (env variable, secret or config file)."""\n\nimport configparser\nimport logging\nimport os\nimport warnings\nfrom pathlib import Path\nfrom threading import Lock\nfrom typing import Dict, Optional\n\nfrom .. import constants\nfrom ._runtime import is_colab_enterprise, is_google_colab\n\n\n_IS_GOOGLE_COLAB_CHECKED = False\n_GOOGLE_COLAB_SECRET_LOCK = Lock()\n_GOOGLE_COLAB_SECRET: Optional[str] = None\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_token() -> Optional[str]:\n """\n Get token if user is logged in.\n\n Note: in most cases, you should use [`huggingface_hub.utils.build_hf_headers`] instead. This method is only useful\n if you want to retrieve the token for other purposes than sending an HTTP request.\n\n Token is retrieved in priority from the `HF_TOKEN` environment variable. Otherwise, we read the token file located\n in the Hugging Face home folder. Returns None if user is not logged in. To log in, use [`login`] or\n `huggingface-cli login`.\n\n Returns:\n `str` or `None`: The token, `None` if it doesn't exist.\n """\n return _get_token_from_google_colab() or _get_token_from_environment() or _get_token_from_file()\n\n\ndef _get_token_from_google_colab() -> Optional[str]:\n """Get token from Google Colab secrets vault using `google.colab.userdata.get(...)`.\n\n Token is read from the vault only once per session and then stored in a global variable to avoid re-requesting\n access to the vault.\n """\n # If it's not a Google Colab or it's Colab Enterprise, fallback to environment variable or token file authentication\n if not is_google_colab() or is_colab_enterprise():\n return None\n\n # `google.colab.userdata` is not thread-safe\n # This can lead to a deadlock if multiple threads try to access it at the same time\n # (typically when using `snapshot_download`)\n # => use a lock\n # See https://github.com/huggingface/huggingface_hub/issues/1952 for more details.\n with _GOOGLE_COLAB_SECRET_LOCK:\n global _GOOGLE_COLAB_SECRET\n global _IS_GOOGLE_COLAB_CHECKED\n\n if _IS_GOOGLE_COLAB_CHECKED: # request access only once\n return _GOOGLE_COLAB_SECRET\n\n try:\n from google.colab import userdata # type: ignore\n from google.colab.errors import Error as ColabError # type: ignore\n except ImportError:\n return None\n\n try:\n token = userdata.get("HF_TOKEN")\n _GOOGLE_COLAB_SECRET = _clean_token(token)\n except userdata.NotebookAccessError:\n # Means the user has a secret call `HF_TOKEN` and got a popup "please grand access to HF_TOKEN" and refused it\n # => warn user but ignore error => do not re-request access to user\n warnings.warn(\n "\nAccess to the secret `HF_TOKEN` has not been granted on this notebook."\n "\nYou will not be requested again."\n "\nPlease restart the session if you want to be prompted again."\n )\n _GOOGLE_COLAB_SECRET = None\n except userdata.SecretNotFoundError:\n # Means the user did not define a `HF_TOKEN` secret => warn\n warnings.warn(\n "\nThe secret `HF_TOKEN` does not exist in your Colab secrets."\n "\nTo authenticate with the Hugging Face Hub, create a token in your settings tab "\n "(https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session."\n "\nYou will be able to reuse this secret in all of your notebooks."\n "\nPlease note that authentication is recommended but still optional to access public models or datasets."\n )\n _GOOGLE_COLAB_SECRET = None\n except ColabError as e:\n # Something happen but we don't know what => recommend to open a GitHub issue\n warnings.warn(\n f"\nError while fetching `HF_TOKEN` secret value from your vault: '{str(e)}'."\n "\nYou are not authenticated with the Hugging Face Hub in this notebook."\n "\nIf the error persists, please let us know by opening an issue on GitHub "\n "(https://github.com/huggingface/huggingface_hub/issues/new)."\n )\n _GOOGLE_COLAB_SECRET = None\n\n _IS_GOOGLE_COLAB_CHECKED = True\n return _GOOGLE_COLAB_SECRET\n\n\ndef _get_token_from_environment() -> Optional[str]:\n # `HF_TOKEN` has priority (keep `HUGGING_FACE_HUB_TOKEN` for backward compatibility)\n return _clean_token(os.environ.get("HF_TOKEN") or os.environ.get("HUGGING_FACE_HUB_TOKEN"))\n\n\ndef _get_token_from_file() -> Optional[str]:\n try:\n return _clean_token(Path(constants.HF_TOKEN_PATH).read_text())\n except FileNotFoundError:\n return None\n\n\ndef get_stored_tokens() -> Dict[str, str]:\n """\n Returns the parsed INI file containing the access tokens.\n The file is located at `HF_STORED_TOKENS_PATH`, defaulting to `~/.cache/huggingface/stored_tokens`.\n If the file does not exist, an empty dictionary is returned.\n\n Returns: `Dict[str, str]`\n Key is the token name and value is the token.\n """\n tokens_path = Path(constants.HF_STORED_TOKENS_PATH)\n if not tokens_path.exists():\n stored_tokens = {}\n config = configparser.ConfigParser()\n try:\n config.read(tokens_path)\n stored_tokens = {token_name: config.get(token_name, "hf_token") for token_name in config.sections()}\n except configparser.Error as e:\n logger.error(f"Error parsing stored tokens file: {e}")\n stored_tokens = {}\n return stored_tokens\n\n\ndef _save_stored_tokens(stored_tokens: Dict[str, str]) -> None:\n """\n Saves the given configuration to the stored tokens file.\n\n Args:\n stored_tokens (`Dict[str, str]`):\n The stored tokens to save. Key is the token name and value is the token.\n """\n stored_tokens_path = Path(constants.HF_STORED_TOKENS_PATH)\n\n # Write the stored tokens into an INI file\n config = configparser.ConfigParser()\n for token_name in sorted(stored_tokens.keys()):\n config.add_section(token_name)\n config.set(token_name, "hf_token", stored_tokens[token_name])\n\n stored_tokens_path.parent.mkdir(parents=True, exist_ok=True)\n with stored_tokens_path.open("w") as config_file:\n config.write(config_file)\n\n\ndef _get_token_by_name(token_name: str) -> Optional[str]:\n """\n Get the token by name.\n\n Args:\n token_name (`str`):\n The name of the token to get.\n\n Returns:\n `str` or `None`: The token, `None` if it doesn't exist.\n\n """\n stored_tokens = get_stored_tokens()\n if token_name not in stored_tokens:\n return None\n return _clean_token(stored_tokens[token_name])\n\n\ndef _save_token(token: str, token_name: str) -> None:\n """\n Save the given token.\n\n If the stored tokens file does not exist, it will be created.\n Args:\n token (`str`):\n The token to save.\n token_name (`str`):\n The name of the token.\n """\n tokens_path = Path(constants.HF_STORED_TOKENS_PATH)\n stored_tokens = get_stored_tokens()\n stored_tokens[token_name] = token\n _save_stored_tokens(stored_tokens)\n logger.info(f"The token `{token_name}` has been saved to {tokens_path}")\n\n\ndef _clean_token(token: Optional[str]) -> Optional[str]:\n """Clean token by removing trailing and leading spaces and newlines.\n\n If token is an empty string, return None.\n """\n if token is None:\n return None\n return token.replace("\r", "").replace("\n", "").strip() or None\n
.venv\Lib\site-packages\huggingface_hub\utils\_auth.py
_auth.py
Python
8,294
0.95
0.154206
0.144509
react-lib
806
2024-01-31T17:23:47.122970
Apache-2.0
false
e9e7ce46c80afe3ec370a8268bc6d79c
# coding=utf-8\n# Copyright 2022-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains utilities to handle datetimes in Huggingface Hub."""\n\nfrom datetime import datetime, timezone\n\n\ndef parse_datetime(date_string: str) -> datetime:\n """\n Parses a date_string returned from the server to a datetime object.\n\n This parser is a weak-parser is the sense that it handles only a single format of\n date_string. It is expected that the server format will never change. The\n implementation depends only on the standard lib to avoid an external dependency\n (python-dateutil). See full discussion about this decision on PR:\n https://github.com/huggingface/huggingface_hub/pull/999.\n\n Example:\n ```py\n > parse_datetime('2022-08-19T07:19:38.123Z')\n datetime.datetime(2022, 8, 19, 7, 19, 38, 123000, tzinfo=timezone.utc)\n ```\n\n Args:\n date_string (`str`):\n A string representing a datetime returned by the Hub server.\n String is expected to follow '%Y-%m-%dT%H:%M:%S.%fZ' pattern.\n\n Returns:\n A python datetime object.\n\n Raises:\n :class:`ValueError`:\n If `date_string` cannot be parsed.\n """\n try:\n # Normalize the string to always have 6 digits of fractional seconds\n if date_string.endswith("Z"):\n # Case 1: No decimal point (e.g., "2024-11-16T00:27:02Z")\n if "." not in date_string:\n # No fractional seconds - insert .000000\n date_string = date_string[:-1] + ".000000Z"\n # Case 2: Has decimal point (e.g., "2022-08-19T07:19:38.123456789Z")\n else:\n # Get the fractional and base parts\n base, fraction = date_string[:-1].split(".")\n # fraction[:6] takes first 6 digits and :0<6 pads with zeros if less than 6 digits\n date_string = f"{base}.{fraction[:6]:0<6}Z"\n\n return datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=timezone.utc)\n except ValueError as e:\n raise ValueError(\n f"Cannot parse '{date_string}' as a datetime. Date string is expected to"\n " follow '%Y-%m-%dT%H:%M:%S.%fZ' pattern."\n ) from e\n
.venv\Lib\site-packages\huggingface_hub\utils\_datetime.py
_datetime.py
Python
2,770
0.95
0.104478
0.344828
awesome-app
167
2025-02-02T05:25:35.827069
MIT
false
d65005eb9ecb1463d2c613f3d152d294
import warnings\nfrom functools import wraps\nfrom inspect import Parameter, signature\nfrom typing import Iterable, Optional\n\n\ndef _deprecate_positional_args(*, version: str):\n """Decorator for methods that issues warnings for positional arguments.\n Using the keyword-only argument syntax in pep 3102, arguments after the\n * will issue a warning when passed as a positional argument.\n\n Args:\n version (`str`):\n The version when positional arguments will result in error.\n """\n\n def _inner_deprecate_positional_args(f):\n sig = signature(f)\n kwonly_args = []\n all_args = []\n for name, param in sig.parameters.items():\n if param.kind == Parameter.POSITIONAL_OR_KEYWORD:\n all_args.append(name)\n elif param.kind == Parameter.KEYWORD_ONLY:\n kwonly_args.append(name)\n\n @wraps(f)\n def inner_f(*args, **kwargs):\n extra_args = len(args) - len(all_args)\n if extra_args <= 0:\n return f(*args, **kwargs)\n # extra_args > 0\n args_msg = [\n f"{name}='{arg}'" if isinstance(arg, str) else f"{name}={arg}"\n for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])\n ]\n args_msg = ", ".join(args_msg)\n warnings.warn(\n f"Deprecated positional argument(s) used in '{f.__name__}': pass"\n f" {args_msg} as keyword args. From version {version} passing these"\n " as positional arguments will result in an error,",\n FutureWarning,\n )\n kwargs.update(zip(sig.parameters, args))\n return f(**kwargs)\n\n return inner_f\n\n return _inner_deprecate_positional_args\n\n\ndef _deprecate_arguments(\n *,\n version: str,\n deprecated_args: Iterable[str],\n custom_message: Optional[str] = None,\n):\n """Decorator to issue warnings when using deprecated arguments.\n\n TODO: could be useful to be able to set a custom error message.\n\n Args:\n version (`str`):\n The version when deprecated arguments will result in error.\n deprecated_args (`List[str]`):\n List of the arguments to be deprecated.\n custom_message (`str`, *optional*):\n Warning message that is raised. If not passed, a default warning message\n will be created.\n """\n\n def _inner_deprecate_positional_args(f):\n sig = signature(f)\n\n @wraps(f)\n def inner_f(*args, **kwargs):\n # Check for used deprecated arguments\n used_deprecated_args = []\n for _, parameter in zip(args, sig.parameters.values()):\n if parameter.name in deprecated_args:\n used_deprecated_args.append(parameter.name)\n for kwarg_name, kwarg_value in kwargs.items():\n if (\n # If argument is deprecated but still used\n kwarg_name in deprecated_args\n # And then the value is not the default value\n and kwarg_value != sig.parameters[kwarg_name].default\n ):\n used_deprecated_args.append(kwarg_name)\n\n # Warn and proceed\n if len(used_deprecated_args) > 0:\n message = (\n f"Deprecated argument(s) used in '{f.__name__}':"\n f" {', '.join(used_deprecated_args)}. Will not be supported from"\n f" version '{version}'."\n )\n if custom_message is not None:\n message += "\n\n" + custom_message\n warnings.warn(message, FutureWarning)\n return f(*args, **kwargs)\n\n return inner_f\n\n return _inner_deprecate_positional_args\n\n\ndef _deprecate_method(*, version: str, message: Optional[str] = None):\n """Decorator to issue warnings when using a deprecated method.\n\n Args:\n version (`str`):\n The version when deprecated arguments will result in error.\n message (`str`, *optional*):\n Warning message that is raised. If not passed, a default warning message\n will be created.\n """\n\n def _inner_deprecate_method(f):\n name = f.__name__\n if name == "__init__":\n name = f.__qualname__.split(".")[0] # class name instead of method name\n\n @wraps(f)\n def inner_f(*args, **kwargs):\n warning_message = (\n f"'{name}' (from '{f.__module__}') is deprecated and will be removed from version '{version}'."\n )\n if message is not None:\n warning_message += " " + message\n warnings.warn(warning_message, FutureWarning)\n return f(*args, **kwargs)\n\n return inner_f\n\n return _inner_deprecate_method\n
.venv\Lib\site-packages\huggingface_hub\utils\_deprecation.py
_deprecation.py
Python
4,872
0.95
0.191176
0.061947
react-lib
647
2024-02-02T13:22:09.001197
Apache-2.0
false
707b41fb925ff8fb3e09d3535e74b3ce
# coding=utf-8\n# Copyright 2023-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains utilities to flag a feature as "experimental" in Huggingface Hub."""\n\nimport warnings\nfrom functools import wraps\nfrom typing import Callable\n\nfrom .. import constants\n\n\ndef experimental(fn: Callable) -> Callable:\n """Decorator to flag a feature as experimental.\n\n An experimental feature triggers a warning when used as it might be subject to breaking changes without prior notice\n in the future.\n\n Warnings can be disabled by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment variable.\n\n Args:\n fn (`Callable`):\n The function to flag as experimental.\n\n Returns:\n `Callable`: The decorated function.\n\n Example:\n\n ```python\n >>> from huggingface_hub.utils import experimental\n\n >>> @experimental\n ... def my_function():\n ... print("Hello world!")\n\n >>> my_function()\n UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future without prior\n notice. You can disable this warning by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment variable.\n Hello world!\n ```\n """\n # For classes, put the "experimental" around the "__new__" method => __new__ will be removed in warning message\n name = fn.__qualname__[: -len(".__new__")] if fn.__qualname__.endswith(".__new__") else fn.__qualname__\n\n @wraps(fn)\n def _inner_fn(*args, **kwargs):\n if not constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING:\n warnings.warn(\n f"'{name}' is experimental and might be subject to breaking changes in the future without prior notice."\n " You can disable this warning by setting `HF_HUB_DISABLE_EXPERIMENTAL_WARNING=1` as environment"\n " variable.",\n UserWarning,\n )\n return fn(*args, **kwargs)\n\n return _inner_fn\n
.venv\Lib\site-packages\huggingface_hub\utils\_experimental.py
_experimental.py
Python
2,470
0.95
0.117647
0.277778
python-kit
508
2023-09-07T05:00:00.873754
Apache-2.0
false
f15f45b835f27137b2795534e0b915ef
# coding=utf-8\n# Copyright 2022-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains utilities to manage Git credentials."""\n\nimport re\nimport subprocess\nfrom typing import List, Optional\n\nfrom ..constants import ENDPOINT\nfrom ._subprocess import run_interactive_subprocess, run_subprocess\n\n\nGIT_CREDENTIAL_REGEX = re.compile(\n r"""\n ^\s* # start of line\n credential\.helper # credential.helper value\n \s*=\s* # separator\n (\w+) # the helper name (group 1)\n (\s|$) # whitespace or end of line\n """,\n flags=re.MULTILINE | re.IGNORECASE | re.VERBOSE,\n)\n\n\ndef list_credential_helpers(folder: Optional[str] = None) -> List[str]:\n """Return the list of git credential helpers configured.\n\n See https://git-scm.com/docs/gitcredentials.\n\n Credentials are saved in all configured helpers (store, cache, macOS keychain,...).\n Calls "`git credential approve`" internally. See https://git-scm.com/docs/git-credential.\n\n Args:\n folder (`str`, *optional*):\n The folder in which to check the configured helpers.\n """\n try:\n output = run_subprocess("git config --list", folder=folder).stdout\n parsed = _parse_credential_output(output)\n return parsed\n except subprocess.CalledProcessError as exc:\n raise EnvironmentError(exc.stderr)\n\n\ndef set_git_credential(token: str, username: str = "hf_user", folder: Optional[str] = None) -> None:\n """Save a username/token pair in git credential for HF Hub registry.\n\n Credentials are saved in all configured helpers (store, cache, macOS keychain,...).\n Calls "`git credential approve`" internally. See https://git-scm.com/docs/git-credential.\n\n Args:\n username (`str`, defaults to `"hf_user"`):\n A git username. Defaults to `"hf_user"`, the default user used in the Hub.\n token (`str`, defaults to `"hf_user"`):\n A git password. In practice, the User Access Token for the Hub.\n See https://huggingface.co/settings/tokens.\n folder (`str`, *optional*):\n The folder in which to check the configured helpers.\n """\n with run_interactive_subprocess("git credential approve", folder=folder) as (\n stdin,\n _,\n ):\n stdin.write(f"url={ENDPOINT}\nusername={username.lower()}\npassword={token}\n\n")\n stdin.flush()\n\n\ndef unset_git_credential(username: str = "hf_user", folder: Optional[str] = None) -> None:\n """Erase credentials from git credential for HF Hub registry.\n\n Credentials are erased from the configured helpers (store, cache, macOS\n keychain,...), if any. If `username` is not provided, any credential configured for\n HF Hub endpoint is erased.\n Calls "`git credential erase`" internally. See https://git-scm.com/docs/git-credential.\n\n Args:\n username (`str`, defaults to `"hf_user"`):\n A git username. Defaults to `"hf_user"`, the default user used in the Hub.\n folder (`str`, *optional*):\n The folder in which to check the configured helpers.\n """\n with run_interactive_subprocess("git credential reject", folder=folder) as (\n stdin,\n _,\n ):\n standard_input = f"url={ENDPOINT}\n"\n if username is not None:\n standard_input += f"username={username.lower()}\n"\n standard_input += "\n"\n\n stdin.write(standard_input)\n stdin.flush()\n\n\ndef _parse_credential_output(output: str) -> List[str]:\n """Parse the output of `git credential fill` to extract the password.\n\n Args:\n output (`str`):\n The output of `git credential fill`.\n """\n # NOTE: If user has set an helper for a custom URL, it will not we caught here.\n # Example: `credential.https://huggingface.co.helper=store`\n # See: https://github.com/huggingface/huggingface_hub/pull/1138#discussion_r1013324508\n return sorted( # Sort for nice printing\n set( # Might have some duplicates\n match[0] for match in GIT_CREDENTIAL_REGEX.findall(output)\n )\n )\n
.venv\Lib\site-packages\huggingface_hub\utils\_git_credential.py
_git_credential.py
Python
4,596
0.95
0.123967
0.17
python-kit
748
2025-05-24T18:11:38.786775
BSD-3-Clause
false
08e345e9cb1eb68b86875cf05161023e
# coding=utf-8\n# Copyright 2022-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains utilities to handle HTTP requests in Huggingface Hub."""\n\nimport io\nimport os\nimport re\nimport threading\nimport time\nimport uuid\nfrom functools import lru_cache\nfrom http import HTTPStatus\nfrom shlex import quote\nfrom typing import Any, Callable, List, Optional, Tuple, Type, Union\n\nimport requests\nfrom requests import HTTPError, Response\nfrom requests.adapters import HTTPAdapter\nfrom requests.models import PreparedRequest\n\nfrom huggingface_hub.errors import OfflineModeIsEnabled\n\nfrom .. import constants\nfrom ..errors import (\n BadRequestError,\n DisabledRepoError,\n EntryNotFoundError,\n GatedRepoError,\n HfHubHTTPError,\n RepositoryNotFoundError,\n RevisionNotFoundError,\n)\nfrom . import logging\nfrom ._fixes import JSONDecodeError\nfrom ._lfs import SliceFileObj\nfrom ._typing import HTTP_METHOD_T\n\n\nlogger = logging.get_logger(__name__)\n\n# Both headers are used by the Hub to debug failed requests.\n# `X_AMZN_TRACE_ID` is better as it also works to debug on Cloudfront and ALB.\n# If `X_AMZN_TRACE_ID` is set, the Hub will use it as well.\nX_AMZN_TRACE_ID = "X-Amzn-Trace-Id"\nX_REQUEST_ID = "x-request-id"\n\nREPO_API_REGEX = re.compile(\n r"""\n # staging or production endpoint\n ^https://[^/]+\n (\n # on /api/repo_type/repo_id\n /api/(models|datasets|spaces)/(.+)\n |\n # or /repo_id/resolve/revision/...\n /(.+)/resolve/(.+)\n )\n """,\n flags=re.VERBOSE,\n)\n\n\nclass UniqueRequestIdAdapter(HTTPAdapter):\n X_AMZN_TRACE_ID = "X-Amzn-Trace-Id"\n\n def add_headers(self, request, **kwargs):\n super().add_headers(request, **kwargs)\n\n # Add random request ID => easier for server-side debug\n if X_AMZN_TRACE_ID not in request.headers:\n request.headers[X_AMZN_TRACE_ID] = request.headers.get(X_REQUEST_ID) or str(uuid.uuid4())\n\n # Add debug log\n has_token = len(str(request.headers.get("authorization", ""))) > 0\n logger.debug(\n f"Request {request.headers[X_AMZN_TRACE_ID]}: {request.method} {request.url} (authenticated: {has_token})"\n )\n\n def send(self, request: PreparedRequest, *args, **kwargs) -> Response:\n """Catch any RequestException to append request id to the error message for debugging."""\n if constants.HF_DEBUG:\n logger.debug(f"Send: {_curlify(request)}")\n try:\n return super().send(request, *args, **kwargs)\n except requests.RequestException as e:\n request_id = request.headers.get(X_AMZN_TRACE_ID)\n if request_id is not None:\n # Taken from https://stackoverflow.com/a/58270258\n e.args = (*e.args, f"(Request ID: {request_id})")\n raise\n\n\nclass OfflineAdapter(HTTPAdapter):\n def send(self, request: PreparedRequest, *args, **kwargs) -> Response:\n raise OfflineModeIsEnabled(\n f"Cannot reach {request.url}: offline mode is enabled. To disable it, please unset the `HF_HUB_OFFLINE` environment variable."\n )\n\n\ndef _default_backend_factory() -> requests.Session:\n session = requests.Session()\n if constants.HF_HUB_OFFLINE:\n session.mount("http://", OfflineAdapter())\n session.mount("https://", OfflineAdapter())\n else:\n session.mount("http://", UniqueRequestIdAdapter())\n session.mount("https://", UniqueRequestIdAdapter())\n return session\n\n\nBACKEND_FACTORY_T = Callable[[], requests.Session]\n_GLOBAL_BACKEND_FACTORY: BACKEND_FACTORY_T = _default_backend_factory\n\n\ndef configure_http_backend(backend_factory: BACKEND_FACTORY_T = _default_backend_factory) -> None:\n """\n Configure the HTTP backend by providing a `backend_factory`. Any HTTP calls made by `huggingface_hub` will use a\n Session object instantiated by this factory. This can be useful if you are running your scripts in a specific\n environment requiring custom configuration (e.g. custom proxy or certifications).\n\n Use [`get_session`] to get a configured Session. Since `requests.Session` is not guaranteed to be thread-safe,\n `huggingface_hub` creates 1 Session instance per thread. They are all instantiated using the same `backend_factory`\n set in [`configure_http_backend`]. A LRU cache is used to cache the created sessions (and connections) between\n calls. Max size is 128 to avoid memory leaks if thousands of threads are spawned.\n\n See [this issue](https://github.com/psf/requests/issues/2766) to know more about thread-safety in `requests`.\n\n Example:\n ```py\n import requests\n from huggingface_hub import configure_http_backend, get_session\n\n # Create a factory function that returns a Session with configured proxies\n def backend_factory() -> requests.Session:\n session = requests.Session()\n session.proxies = {"http": "http://10.10.1.10:3128", "https": "https://10.10.1.11:1080"}\n return session\n\n # Set it as the default session factory\n configure_http_backend(backend_factory=backend_factory)\n\n # In practice, this is mostly done internally in `huggingface_hub`\n session = get_session()\n ```\n """\n global _GLOBAL_BACKEND_FACTORY\n _GLOBAL_BACKEND_FACTORY = backend_factory\n reset_sessions()\n\n\ndef get_session() -> requests.Session:\n """\n Get a `requests.Session` object, using the session factory from the user.\n\n Use [`get_session`] to get a configured Session. Since `requests.Session` is not guaranteed to be thread-safe,\n `huggingface_hub` creates 1 Session instance per thread. They are all instantiated using the same `backend_factory`\n set in [`configure_http_backend`]. A LRU cache is used to cache the created sessions (and connections) between\n calls. Max size is 128 to avoid memory leaks if thousands of threads are spawned.\n\n See [this issue](https://github.com/psf/requests/issues/2766) to know more about thread-safety in `requests`.\n\n Example:\n ```py\n import requests\n from huggingface_hub import configure_http_backend, get_session\n\n # Create a factory function that returns a Session with configured proxies\n def backend_factory() -> requests.Session:\n session = requests.Session()\n session.proxies = {"http": "http://10.10.1.10:3128", "https": "https://10.10.1.11:1080"}\n return session\n\n # Set it as the default session factory\n configure_http_backend(backend_factory=backend_factory)\n\n # In practice, this is mostly done internally in `huggingface_hub`\n session = get_session()\n ```\n """\n return _get_session_from_cache(process_id=os.getpid(), thread_id=threading.get_ident())\n\n\ndef reset_sessions() -> None:\n """Reset the cache of sessions.\n\n Mostly used internally when sessions are reconfigured or an SSLError is raised.\n See [`configure_http_backend`] for more details.\n """\n _get_session_from_cache.cache_clear()\n\n\n@lru_cache\ndef _get_session_from_cache(process_id: int, thread_id: int) -> requests.Session:\n """\n Create a new session per thread using global factory. Using LRU cache (maxsize 128) to avoid memory leaks when\n using thousands of threads. Cache is cleared when `configure_http_backend` is called.\n """\n return _GLOBAL_BACKEND_FACTORY()\n\n\ndef http_backoff(\n method: HTTP_METHOD_T,\n url: str,\n *,\n max_retries: int = 5,\n base_wait_time: float = 1,\n max_wait_time: float = 8,\n retry_on_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]] = (\n requests.Timeout,\n requests.ConnectionError,\n ),\n retry_on_status_codes: Union[int, Tuple[int, ...]] = HTTPStatus.SERVICE_UNAVAILABLE,\n **kwargs,\n) -> Response:\n """Wrapper around requests to retry calls on an endpoint, with exponential backoff.\n\n Endpoint call is retried on exceptions (ex: connection timeout, proxy error,...)\n and/or on specific status codes (ex: service unavailable). If the call failed more\n than `max_retries`, the exception is thrown or `raise_for_status` is called on the\n response object.\n\n Re-implement mechanisms from the `backoff` library to avoid adding an external\n dependencies to `hugging_face_hub`. See https://github.com/litl/backoff.\n\n Args:\n method (`Literal["GET", "OPTIONS", "HEAD", "POST", "PUT", "PATCH", "DELETE"]`):\n HTTP method to perform.\n url (`str`):\n The URL of the resource to fetch.\n max_retries (`int`, *optional*, defaults to `5`):\n Maximum number of retries, defaults to 5 (no retries).\n base_wait_time (`float`, *optional*, defaults to `1`):\n Duration (in seconds) to wait before retrying the first time.\n Wait time between retries then grows exponentially, capped by\n `max_wait_time`.\n max_wait_time (`float`, *optional*, defaults to `8`):\n Maximum duration (in seconds) to wait before retrying.\n retry_on_exceptions (`Type[Exception]` or `Tuple[Type[Exception]]`, *optional*):\n Define which exceptions must be caught to retry the request. Can be a single type or a tuple of types.\n By default, retry on `requests.Timeout` and `requests.ConnectionError`.\n retry_on_status_codes (`int` or `Tuple[int]`, *optional*, defaults to `503`):\n Define on which status codes the request must be retried. By default, only\n HTTP 503 Service Unavailable is retried.\n **kwargs (`dict`, *optional*):\n kwargs to pass to `requests.request`.\n\n Example:\n ```\n >>> from huggingface_hub.utils import http_backoff\n\n # Same usage as "requests.request".\n >>> response = http_backoff("GET", "https://www.google.com")\n >>> response.raise_for_status()\n\n # If you expect a Gateway Timeout from time to time\n >>> http_backoff("PUT", upload_url, data=data, retry_on_status_codes=504)\n >>> response.raise_for_status()\n ```\n\n <Tip warning={true}>\n\n When using `requests` it is possible to stream data by passing an iterator to the\n `data` argument. On http backoff this is a problem as the iterator is not reset\n after a failed call. This issue is mitigated for file objects or any IO streams\n by saving the initial position of the cursor (with `data.tell()`) and resetting the\n cursor between each call (with `data.seek()`). For arbitrary iterators, http backoff\n will fail. If this is a hard constraint for you, please let us know by opening an\n issue on [Github](https://github.com/huggingface/huggingface_hub).\n\n </Tip>\n """\n if isinstance(retry_on_exceptions, type): # Tuple from single exception type\n retry_on_exceptions = (retry_on_exceptions,)\n\n if isinstance(retry_on_status_codes, int): # Tuple from single status code\n retry_on_status_codes = (retry_on_status_codes,)\n\n nb_tries = 0\n sleep_time = base_wait_time\n\n # If `data` is used and is a file object (or any IO), it will be consumed on the\n # first HTTP request. We need to save the initial position so that the full content\n # of the file is re-sent on http backoff. See warning tip in docstring.\n io_obj_initial_pos = None\n if "data" in kwargs and isinstance(kwargs["data"], (io.IOBase, SliceFileObj)):\n io_obj_initial_pos = kwargs["data"].tell()\n\n session = get_session()\n while True:\n nb_tries += 1\n try:\n # If `data` is used and is a file object (or any IO), set back cursor to\n # initial position.\n if io_obj_initial_pos is not None:\n kwargs["data"].seek(io_obj_initial_pos)\n\n # Perform request and return if status_code is not in the retry list.\n response = session.request(method=method, url=url, **kwargs)\n if response.status_code not in retry_on_status_codes:\n return response\n\n # Wrong status code returned (HTTP 503 for instance)\n logger.warning(f"HTTP Error {response.status_code} thrown while requesting {method} {url}")\n if nb_tries > max_retries:\n response.raise_for_status() # Will raise uncaught exception\n # We return response to avoid infinite loop in the corner case where the\n # user ask for retry on a status code that doesn't raise_for_status.\n return response\n\n except retry_on_exceptions as err:\n logger.warning(f"'{err}' thrown while requesting {method} {url}")\n\n if isinstance(err, requests.ConnectionError):\n reset_sessions() # In case of SSLError it's best to reset the shared requests.Session objects\n\n if nb_tries > max_retries:\n raise err\n\n # Sleep for X seconds\n logger.warning(f"Retrying in {sleep_time}s [Retry {nb_tries}/{max_retries}].")\n time.sleep(sleep_time)\n\n # Update sleep time for next retry\n sleep_time = min(max_wait_time, sleep_time * 2) # Exponential backoff\n\n\ndef fix_hf_endpoint_in_url(url: str, endpoint: Optional[str]) -> str:\n """Replace the default endpoint in a URL by a custom one.\n\n This is useful when using a proxy and the Hugging Face Hub returns a URL with the default endpoint.\n """\n endpoint = endpoint.rstrip("/") if endpoint else constants.ENDPOINT\n # check if a proxy has been set => if yes, update the returned URL to use the proxy\n if endpoint not in (constants._HF_DEFAULT_ENDPOINT, constants._HF_DEFAULT_STAGING_ENDPOINT):\n url = url.replace(constants._HF_DEFAULT_ENDPOINT, endpoint)\n url = url.replace(constants._HF_DEFAULT_STAGING_ENDPOINT, endpoint)\n return url\n\n\ndef hf_raise_for_status(response: Response, endpoint_name: Optional[str] = None) -> None:\n """\n Internal version of `response.raise_for_status()` that will refine a\n potential HTTPError. Raised exception will be an instance of `HfHubHTTPError`.\n\n This helper is meant to be the unique method to raise_for_status when making a call\n to the Hugging Face Hub.\n\n\n Example:\n ```py\n import requests\n from huggingface_hub.utils import get_session, hf_raise_for_status, HfHubHTTPError\n\n response = get_session().post(...)\n try:\n hf_raise_for_status(response)\n except HfHubHTTPError as e:\n print(str(e)) # formatted message\n e.request_id, e.server_message # details returned by server\n\n # Complete the error message with additional information once it's raised\n e.append_to_message("\n`create_commit` expects the repository to exist.")\n raise\n ```\n\n Args:\n response (`Response`):\n Response from the server.\n endpoint_name (`str`, *optional*):\n Name of the endpoint that has been called. If provided, the error message\n will be more complete.\n\n <Tip warning={true}>\n\n Raises when the request has failed:\n\n - [`~utils.RepositoryNotFoundError`]\n If the repository to download from cannot be found. This may be because it\n doesn't exist, because `repo_type` is not set correctly, or because the repo\n is `private` and you do not have access.\n - [`~utils.GatedRepoError`]\n If the repository exists but is gated and the user is not on the authorized\n list.\n - [`~utils.RevisionNotFoundError`]\n If the repository exists but the revision couldn't be find.\n - [`~utils.EntryNotFoundError`]\n If the repository exists but the entry (e.g. the requested file) couldn't be\n find.\n - [`~utils.BadRequestError`]\n If request failed with a HTTP 400 BadRequest error.\n - [`~utils.HfHubHTTPError`]\n If request failed for a reason not listed above.\n\n </Tip>\n """\n try:\n response.raise_for_status()\n except HTTPError as e:\n error_code = response.headers.get("X-Error-Code")\n error_message = response.headers.get("X-Error-Message")\n\n if error_code == "RevisionNotFound":\n message = f"{response.status_code} Client Error." + "\n\n" + f"Revision Not Found for url: {response.url}."\n raise _format(RevisionNotFoundError, message, response) from e\n\n elif error_code == "EntryNotFound":\n message = f"{response.status_code} Client Error." + "\n\n" + f"Entry Not Found for url: {response.url}."\n raise _format(EntryNotFoundError, message, response) from e\n\n elif error_code == "GatedRepo":\n message = (\n f"{response.status_code} Client Error." + "\n\n" + f"Cannot access gated repo for url {response.url}."\n )\n raise _format(GatedRepoError, message, response) from e\n\n elif error_message == "Access to this resource is disabled.":\n message = (\n f"{response.status_code} Client Error."\n + "\n\n"\n + f"Cannot access repository for url {response.url}."\n + "\n"\n + "Access to this resource is disabled."\n )\n raise _format(DisabledRepoError, message, response) from e\n\n elif error_code == "RepoNotFound" or (\n response.status_code == 401\n and error_message != "Invalid credentials in Authorization header"\n and response.request is not None\n and response.request.url is not None\n and REPO_API_REGEX.search(response.request.url) is not None\n ):\n # 401 is misleading as it is returned for:\n # - private and gated repos if user is not authenticated\n # - missing repos\n # => for now, we process them as `RepoNotFound` anyway.\n # See https://gist.github.com/Wauplin/46c27ad266b15998ce56a6603796f0b9\n message = (\n f"{response.status_code} Client Error."\n + "\n\n"\n + f"Repository Not Found for url: {response.url}."\n + "\nPlease make sure you specified the correct `repo_id` and"\n " `repo_type`.\nIf you are trying to access a private or gated repo,"\n " make sure you are authenticated. For more details, see"\n " https://huggingface.co/docs/huggingface_hub/authentication"\n )\n raise _format(RepositoryNotFoundError, message, response) from e\n\n elif response.status_code == 400:\n message = (\n f"\n\nBad request for {endpoint_name} endpoint:" if endpoint_name is not None else "\n\nBad request:"\n )\n raise _format(BadRequestError, message, response) from e\n\n elif response.status_code == 403:\n message = (\n f"\n\n{response.status_code} Forbidden: {error_message}."\n + f"\nCannot access content at: {response.url}."\n + "\nMake sure your token has the correct permissions."\n )\n raise _format(HfHubHTTPError, message, response) from e\n\n elif response.status_code == 416:\n range_header = response.request.headers.get("Range")\n message = f"{e}. Requested range: {range_header}. Content-Range: {response.headers.get('Content-Range')}."\n raise _format(HfHubHTTPError, message, response) from e\n\n # Convert `HTTPError` into a `HfHubHTTPError` to display request information\n # as well (request id and/or server error message)\n raise _format(HfHubHTTPError, str(e), response) from e\n\n\ndef _format(error_type: Type[HfHubHTTPError], custom_message: str, response: Response) -> HfHubHTTPError:\n server_errors = []\n\n # Retrieve server error from header\n from_headers = response.headers.get("X-Error-Message")\n if from_headers is not None:\n server_errors.append(from_headers)\n\n # Retrieve server error from body\n try:\n # Case errors are returned in a JSON format\n data = response.json()\n\n error = data.get("error")\n if error is not None:\n if isinstance(error, list):\n # Case {'error': ['my error 1', 'my error 2']}\n server_errors.extend(error)\n else:\n # Case {'error': 'my error'}\n server_errors.append(error)\n\n errors = data.get("errors")\n if errors is not None:\n # Case {'errors': [{'message': 'my error 1'}, {'message': 'my error 2'}]}\n for error in errors:\n if "message" in error:\n server_errors.append(error["message"])\n\n except JSONDecodeError:\n # If content is not JSON and not HTML, append the text\n content_type = response.headers.get("Content-Type", "")\n if response.text and "html" not in content_type.lower():\n server_errors.append(response.text)\n\n # Strip all server messages\n server_errors = [str(line).strip() for line in server_errors if str(line).strip()]\n\n # Deduplicate server messages (keep order)\n # taken from https://stackoverflow.com/a/17016257\n server_errors = list(dict.fromkeys(server_errors))\n\n # Format server error\n server_message = "\n".join(server_errors)\n\n # Add server error to custom message\n final_error_message = custom_message\n if server_message and server_message.lower() not in custom_message.lower():\n if "\n\n" in custom_message:\n final_error_message += "\n" + server_message\n else:\n final_error_message += "\n\n" + server_message\n # Add Request ID\n request_id = str(response.headers.get(X_REQUEST_ID, ""))\n if request_id:\n request_id_message = f" (Request ID: {request_id})"\n else:\n # Fallback to X-Amzn-Trace-Id\n request_id = str(response.headers.get(X_AMZN_TRACE_ID, ""))\n if request_id:\n request_id_message = f" (Amzn Trace ID: {request_id})"\n if request_id and request_id.lower() not in final_error_message.lower():\n if "\n" in final_error_message:\n newline_index = final_error_message.index("\n")\n final_error_message = (\n final_error_message[:newline_index] + request_id_message + final_error_message[newline_index:]\n )\n else:\n final_error_message += request_id_message\n\n # Return\n return error_type(final_error_message.strip(), response=response, server_message=server_message or None)\n\n\ndef _curlify(request: requests.PreparedRequest) -> str:\n """Convert a `requests.PreparedRequest` into a curl command (str).\n\n Used for debug purposes only.\n\n Implementation vendored from https://github.com/ofw/curlify/blob/master/curlify.py.\n MIT License Copyright (c) 2016 Egor.\n """\n parts: List[Tuple[Any, Any]] = [\n ("curl", None),\n ("-X", request.method),\n ]\n\n for k, v in sorted(request.headers.items()):\n if k.lower() == "authorization":\n v = "<TOKEN>" # Hide authorization header, no matter its value (can be Bearer, Key, etc.)\n parts += [("-H", "{0}: {1}".format(k, v))]\n\n if request.body:\n body = request.body\n if isinstance(body, bytes):\n body = body.decode("utf-8", errors="ignore")\n elif hasattr(body, "read"):\n body = "<file-like object>" # Don't try to read it to avoid consuming the stream\n if len(body) > 1000:\n body = body[:1000] + " ... [truncated]"\n parts += [("-d", body.replace("\n", ""))]\n\n parts += [(None, request.url)]\n\n flat_parts = []\n for k, v in parts:\n if k:\n flat_parts.append(quote(k))\n if v:\n flat_parts.append(quote(v))\n\n return " ".join(flat_parts)\n\n\n# Regex to parse HTTP Range header\nRANGE_REGEX = re.compile(r"^\s*bytes\s*=\s*(\d*)\s*-\s*(\d*)\s*$", re.IGNORECASE)\n\n\ndef _adjust_range_header(original_range: Optional[str], resume_size: int) -> Optional[str]:\n """\n Adjust HTTP Range header to account for resume position.\n """\n if not original_range:\n return f"bytes={resume_size}-"\n\n if "," in original_range:\n raise ValueError(f"Multiple ranges detected - {original_range!r}, not supported yet.")\n\n match = RANGE_REGEX.match(original_range)\n if not match:\n raise RuntimeError(f"Invalid range format - {original_range!r}.")\n start, end = match.groups()\n\n if not start:\n if not end:\n raise RuntimeError(f"Invalid range format - {original_range!r}.")\n\n new_suffix = int(end) - resume_size\n new_range = f"bytes=-{new_suffix}"\n if new_suffix <= 0:\n raise RuntimeError(f"Empty new range - {new_range!r}.")\n return new_range\n\n start = int(start)\n new_start = start + resume_size\n if end:\n end = int(end)\n new_range = f"bytes={new_start}-{end}"\n if new_start > end:\n raise RuntimeError(f"Empty new range - {new_range!r}.")\n return new_range\n\n return f"bytes={new_start}-"\n
.venv\Lib\site-packages\huggingface_hub\utils\_http.py
_http.py
Python
25,531
0.95
0.163265
0.134875
node-utils
694
2024-07-11T00:55:36.429599
Apache-2.0
false
7708efca31f4fbfa4e5fdb11b987ebfa
# coding=utf-8\n# Copyright 2019-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Git LFS related utilities"""\n\nimport io\nimport os\nfrom contextlib import AbstractContextManager\nfrom typing import BinaryIO\n\n\nclass SliceFileObj(AbstractContextManager):\n """\n Utility context manager to read a *slice* of a seekable file-like object as a seekable, file-like object.\n\n This is NOT thread safe\n\n Inspired by stackoverflow.com/a/29838711/593036\n\n Credits to @julien-c\n\n Args:\n fileobj (`BinaryIO`):\n A file-like object to slice. MUST implement `tell()` and `seek()` (and `read()` of course).\n `fileobj` will be reset to its original position when exiting the context manager.\n seek_from (`int`):\n The start of the slice (offset from position 0 in bytes).\n read_limit (`int`):\n The maximum number of bytes to read from the slice.\n\n Attributes:\n previous_position (`int`):\n The previous position\n\n Examples:\n\n Reading 200 bytes with an offset of 128 bytes from a file (ie bytes 128 to 327):\n ```python\n >>> with open("path/to/file", "rb") as file:\n ... with SliceFileObj(file, seek_from=128, read_limit=200) as fslice:\n ... fslice.read(...)\n ```\n\n Reading a file in chunks of 512 bytes\n ```python\n >>> import os\n >>> chunk_size = 512\n >>> file_size = os.getsize("path/to/file")\n >>> with open("path/to/file", "rb") as file:\n ... for chunk_idx in range(ceil(file_size / chunk_size)):\n ... with SliceFileObj(file, seek_from=chunk_idx * chunk_size, read_limit=chunk_size) as fslice:\n ... chunk = fslice.read(...)\n\n ```\n """\n\n def __init__(self, fileobj: BinaryIO, seek_from: int, read_limit: int):\n self.fileobj = fileobj\n self.seek_from = seek_from\n self.read_limit = read_limit\n\n def __enter__(self):\n self._previous_position = self.fileobj.tell()\n end_of_stream = self.fileobj.seek(0, os.SEEK_END)\n self._len = min(self.read_limit, end_of_stream - self.seek_from)\n # ^^ The actual number of bytes that can be read from the slice\n self.fileobj.seek(self.seek_from, io.SEEK_SET)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.fileobj.seek(self._previous_position, io.SEEK_SET)\n\n def read(self, n: int = -1):\n pos = self.tell()\n if pos >= self._len:\n return b""\n remaining_amount = self._len - pos\n data = self.fileobj.read(remaining_amount if n < 0 else min(n, remaining_amount))\n return data\n\n def tell(self) -> int:\n return self.fileobj.tell() - self.seek_from\n\n def seek(self, offset: int, whence: int = os.SEEK_SET) -> int:\n start = self.seek_from\n end = start + self._len\n if whence in (os.SEEK_SET, os.SEEK_END):\n offset = start + offset if whence == os.SEEK_SET else end + offset\n offset = max(start, min(offset, end))\n whence = os.SEEK_SET\n elif whence == os.SEEK_CUR:\n cur_pos = self.fileobj.tell()\n offset = max(start - cur_pos, min(offset, end - cur_pos))\n else:\n raise ValueError(f"whence value {whence} is not supported")\n return self.fileobj.seek(offset, whence) - self.seek_from\n\n def __iter__(self):\n yield self.read(n=4 * 1024 * 1024)\n
.venv\Lib\site-packages\huggingface_hub\utils\_lfs.py
_lfs.py
Python
3,957
0.95
0.127273
0.164835
awesome-app
277
2023-12-18T00:12:32.052579
GPL-3.0
false
c59ba7e9eb3f699cfaca27d0829e3206
# coding=utf-8\n# Copyright 2022-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains utilities to handle paths in Huggingface Hub."""\n\nfrom fnmatch import fnmatch\nfrom pathlib import Path\nfrom typing import Callable, Generator, Iterable, List, Optional, TypeVar, Union\n\n\nT = TypeVar("T")\n\n# Always ignore `.git` and `.cache/huggingface` folders in commits\nDEFAULT_IGNORE_PATTERNS = [\n ".git",\n ".git/*",\n "*/.git",\n "**/.git/**",\n ".cache/huggingface",\n ".cache/huggingface/*",\n "*/.cache/huggingface",\n "**/.cache/huggingface/**",\n]\n# Forbidden to commit these folders\nFORBIDDEN_FOLDERS = [".git", ".cache"]\n\n\ndef filter_repo_objects(\n items: Iterable[T],\n *,\n allow_patterns: Optional[Union[List[str], str]] = None,\n ignore_patterns: Optional[Union[List[str], str]] = None,\n key: Optional[Callable[[T], str]] = None,\n) -> Generator[T, None, None]:\n """Filter repo objects based on an allowlist and a denylist.\n\n Input must be a list of paths (`str` or `Path`) or a list of arbitrary objects.\n In the later case, `key` must be provided and specifies a function of one argument\n that is used to extract a path from each element in iterable.\n\n Patterns are Unix shell-style wildcards which are NOT regular expressions. See\n https://docs.python.org/3/library/fnmatch.html for more details.\n\n Args:\n items (`Iterable`):\n List of items to filter.\n allow_patterns (`str` or `List[str]`, *optional*):\n Patterns constituting the allowlist. If provided, item paths must match at\n least one pattern from the allowlist.\n ignore_patterns (`str` or `List[str]`, *optional*):\n Patterns constituting the denylist. If provided, item paths must not match\n any patterns from the denylist.\n key (`Callable[[T], str]`, *optional*):\n Single-argument function to extract a path from each item. If not provided,\n the `items` must already be `str` or `Path`.\n\n Returns:\n Filtered list of objects, as a generator.\n\n Raises:\n :class:`ValueError`:\n If `key` is not provided and items are not `str` or `Path`.\n\n Example usage with paths:\n ```python\n >>> # Filter only PDFs that are not hidden.\n >>> list(filter_repo_objects(\n ... ["aaa.PDF", "bbb.jpg", ".ccc.pdf", ".ddd.png"],\n ... allow_patterns=["*.pdf"],\n ... ignore_patterns=[".*"],\n ... ))\n ["aaa.pdf"]\n ```\n\n Example usage with objects:\n ```python\n >>> list(filter_repo_objects(\n ... [\n ... CommitOperationAdd(path_or_fileobj="/tmp/aaa.pdf", path_in_repo="aaa.pdf")\n ... CommitOperationAdd(path_or_fileobj="/tmp/bbb.jpg", path_in_repo="bbb.jpg")\n ... CommitOperationAdd(path_or_fileobj="/tmp/.ccc.pdf", path_in_repo=".ccc.pdf")\n ... CommitOperationAdd(path_or_fileobj="/tmp/.ddd.png", path_in_repo=".ddd.png")\n ... ],\n ... allow_patterns=["*.pdf"],\n ... ignore_patterns=[".*"],\n ... key=lambda x: x.repo_in_path\n ... ))\n [CommitOperationAdd(path_or_fileobj="/tmp/aaa.pdf", path_in_repo="aaa.pdf")]\n ```\n """\n if isinstance(allow_patterns, str):\n allow_patterns = [allow_patterns]\n\n if isinstance(ignore_patterns, str):\n ignore_patterns = [ignore_patterns]\n\n if allow_patterns is not None:\n allow_patterns = [_add_wildcard_to_directories(p) for p in allow_patterns]\n if ignore_patterns is not None:\n ignore_patterns = [_add_wildcard_to_directories(p) for p in ignore_patterns]\n\n if key is None:\n\n def _identity(item: T) -> str:\n if isinstance(item, str):\n return item\n if isinstance(item, Path):\n return str(item)\n raise ValueError(f"Please provide `key` argument in `filter_repo_objects`: `{item}` is not a string.")\n\n key = _identity # Items must be `str` or `Path`, otherwise raise ValueError\n\n for item in items:\n path = key(item)\n\n # Skip if there's an allowlist and path doesn't match any\n if allow_patterns is not None and not any(fnmatch(path, r) for r in allow_patterns):\n continue\n\n # Skip if there's a denylist and path matches any\n if ignore_patterns is not None and any(fnmatch(path, r) for r in ignore_patterns):\n continue\n\n yield item\n\n\ndef _add_wildcard_to_directories(pattern: str) -> str:\n if pattern[-1] == "/":\n return pattern + "*"\n return pattern\n
.venv\Lib\site-packages\huggingface_hub\utils\_paths.py
_paths.py
Python
5,042
0.95
0.177305
0.162393
vue-tools
792
2023-08-06T11:21:23.121711
BSD-3-Clause
false
b39b476ca31bfa85064de638fd985ea2
import functools\nimport operator\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Literal, Optional, Tuple\n\n\nFILENAME_T = str\nTENSOR_NAME_T = str\nDTYPE_T = Literal["F64", "F32", "F16", "BF16", "I64", "I32", "I16", "I8", "U8", "BOOL"]\n\n\n@dataclass\nclass TensorInfo:\n """Information about a tensor.\n\n For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format.\n\n Attributes:\n dtype (`str`):\n The data type of the tensor ("F64", "F32", "F16", "BF16", "I64", "I32", "I16", "I8", "U8", "BOOL").\n shape (`List[int]`):\n The shape of the tensor.\n data_offsets (`Tuple[int, int]`):\n The offsets of the data in the file as a tuple `[BEGIN, END]`.\n parameter_count (`int`):\n The number of parameters in the tensor.\n """\n\n dtype: DTYPE_T\n shape: List[int]\n data_offsets: Tuple[int, int]\n parameter_count: int = field(init=False)\n\n def __post_init__(self) -> None:\n # Taken from https://stackoverflow.com/a/13840436\n try:\n self.parameter_count = functools.reduce(operator.mul, self.shape)\n except TypeError:\n self.parameter_count = 1 # scalar value has no shape\n\n\n@dataclass\nclass SafetensorsFileMetadata:\n """Metadata for a Safetensors file hosted on the Hub.\n\n This class is returned by [`parse_safetensors_file_metadata`].\n\n For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format.\n\n Attributes:\n metadata (`Dict`):\n The metadata contained in the file.\n tensors (`Dict[str, TensorInfo]`):\n A map of all tensors. Keys are tensor names and values are information about the corresponding tensor, as a\n [`TensorInfo`] object.\n parameter_count (`Dict[str, int]`):\n A map of the number of parameters per data type. Keys are data types and values are the number of parameters\n of that data type.\n """\n\n metadata: Dict[str, str]\n tensors: Dict[TENSOR_NAME_T, TensorInfo]\n parameter_count: Dict[DTYPE_T, int] = field(init=False)\n\n def __post_init__(self) -> None:\n parameter_count: Dict[DTYPE_T, int] = defaultdict(int)\n for tensor in self.tensors.values():\n parameter_count[tensor.dtype] += tensor.parameter_count\n self.parameter_count = dict(parameter_count)\n\n\n@dataclass\nclass SafetensorsRepoMetadata:\n """Metadata for a Safetensors repo.\n\n A repo is considered to be a Safetensors repo if it contains either a 'model.safetensors' weight file (non-shared\n model) or a 'model.safetensors.index.json' index file (sharded model) at its root.\n\n This class is returned by [`get_safetensors_metadata`].\n\n For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format.\n\n Attributes:\n metadata (`Dict`, *optional*):\n The metadata contained in the 'model.safetensors.index.json' file, if it exists. Only populated for sharded\n models.\n sharded (`bool`):\n Whether the repo contains a sharded model or not.\n weight_map (`Dict[str, str]`):\n A map of all weights. Keys are tensor names and values are filenames of the files containing the tensors.\n files_metadata (`Dict[str, SafetensorsFileMetadata]`):\n A map of all files metadata. Keys are filenames and values are the metadata of the corresponding file, as\n a [`SafetensorsFileMetadata`] object.\n parameter_count (`Dict[str, int]`):\n A map of the number of parameters per data type. Keys are data types and values are the number of parameters\n of that data type.\n """\n\n metadata: Optional[Dict]\n sharded: bool\n weight_map: Dict[TENSOR_NAME_T, FILENAME_T] # tensor name -> filename\n files_metadata: Dict[FILENAME_T, SafetensorsFileMetadata] # filename -> metadata\n parameter_count: Dict[DTYPE_T, int] = field(init=False)\n\n def __post_init__(self) -> None:\n parameter_count: Dict[DTYPE_T, int] = defaultdict(int)\n for file_metadata in self.files_metadata.values():\n for dtype, nb_parameters_ in file_metadata.parameter_count.items():\n parameter_count[dtype] += nb_parameters_\n self.parameter_count = dict(parameter_count)\n
.venv\Lib\site-packages\huggingface_hub\utils\_safetensors.py
_safetensors.py
Python
4,458
0.95
0.153153
0.011364
awesome-app
535
2024-04-14T00:25:37.721174
Apache-2.0
false
822a1f334e89febf662857acfd98d23c
# coding=utf-8\n# Copyright 2022-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Handle typing imports based on system compatibility."""\n\nimport sys\nfrom typing import Any, Callable, List, Literal, Type, TypeVar, Union, get_args, get_origin\n\n\nUNION_TYPES: List[Any] = [Union]\nif sys.version_info >= (3, 10):\n from types import UnionType\n\n UNION_TYPES += [UnionType]\n\n\nHTTP_METHOD_T = Literal["GET", "OPTIONS", "HEAD", "POST", "PUT", "PATCH", "DELETE"]\n\n# type hint meaning "function signature not changed by decorator"\nCallableT = TypeVar("CallableT", bound=Callable)\n\n_JSON_SERIALIZABLE_TYPES = (int, float, str, bool, type(None))\n\n\ndef is_jsonable(obj: Any) -> bool:\n """Check if an object is JSON serializable.\n\n This is a weak check, as it does not check for the actual JSON serialization, but only for the types of the object.\n It works correctly for basic use cases but do not guarantee an exhaustive check.\n\n Object is considered to be recursively json serializable if:\n - it is an instance of int, float, str, bool, or NoneType\n - it is a list or tuple and all its items are json serializable\n - it is a dict and all its keys are strings and all its values are json serializable\n """\n try:\n if isinstance(obj, _JSON_SERIALIZABLE_TYPES):\n return True\n if isinstance(obj, (list, tuple)):\n return all(is_jsonable(item) for item in obj)\n if isinstance(obj, dict):\n return all(isinstance(key, _JSON_SERIALIZABLE_TYPES) and is_jsonable(value) for key, value in obj.items())\n if hasattr(obj, "__json__"):\n return True\n return False\n except RecursionError:\n return False\n\n\ndef is_simple_optional_type(type_: Type) -> bool:\n """Check if a type is optional, i.e. Optional[Type] or Union[Type, None] or Type | None, where Type is a non-composite type."""\n if get_origin(type_) in UNION_TYPES:\n union_args = get_args(type_)\n if len(union_args) == 2 and type(None) in union_args:\n return True\n return False\n\n\ndef unwrap_simple_optional_type(optional_type: Type) -> Type:\n """Unwraps a simple optional type, i.e. returns Type from Optional[Type]."""\n for arg in get_args(optional_type):\n if arg is not type(None):\n return arg\n raise ValueError(f"'{optional_type}' is not an optional type")\n
.venv\Lib\site-packages\huggingface_hub\utils\_typing.py
_typing.py
Python
2,903
0.95
0.306667
0.254237
react-lib
978
2023-08-31T12:22:37.846115
BSD-3-Clause
false
4fafb66672816e4154255714825644ac
# coding=utf-8\n# Copyright 2022-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains utilities to validate argument values in `huggingface_hub`."""\n\nimport inspect\nimport re\nimport warnings\nfrom functools import wraps\nfrom itertools import chain\nfrom typing import Any, Dict\n\nfrom huggingface_hub.errors import HFValidationError\n\nfrom ._typing import CallableT\n\n\nREPO_ID_REGEX = re.compile(\n r"""\n ^\n (\b[\w\-.]+\b/)? # optional namespace (username or organization)\n \b # starts with a word boundary\n [\w\-.]{1,96} # repo_name: alphanumeric + . _ -\n \b # ends with a word boundary\n $\n """,\n flags=re.VERBOSE,\n)\n\n\ndef validate_hf_hub_args(fn: CallableT) -> CallableT:\n """Validate values received as argument for any public method of `huggingface_hub`.\n\n The goal of this decorator is to harmonize validation of arguments reused\n everywhere. By default, all defined validators are tested.\n\n Validators:\n - [`~utils.validate_repo_id`]: `repo_id` must be `"repo_name"`\n or `"namespace/repo_name"`. Namespace is a username or an organization.\n - [`~utils.smoothly_deprecate_use_auth_token`]: Use `token` instead of\n `use_auth_token` (only if `use_auth_token` is not expected by the decorated\n function - in practice, always the case in `huggingface_hub`).\n\n Example:\n ```py\n >>> from huggingface_hub.utils import validate_hf_hub_args\n\n >>> @validate_hf_hub_args\n ... def my_cool_method(repo_id: str):\n ... print(repo_id)\n\n >>> my_cool_method(repo_id="valid_repo_id")\n valid_repo_id\n\n >>> my_cool_method("other..repo..id")\n huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.\n\n >>> my_cool_method(repo_id="other..repo..id")\n huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.\n\n >>> @validate_hf_hub_args\n ... def my_cool_auth_method(token: str):\n ... print(token)\n\n >>> my_cool_auth_method(token="a token")\n "a token"\n\n >>> my_cool_auth_method(use_auth_token="a use_auth_token")\n "a use_auth_token"\n\n >>> my_cool_auth_method(token="a token", use_auth_token="a use_auth_token")\n UserWarning: Both `token` and `use_auth_token` are passed (...)\n "a token"\n ```\n\n Raises:\n [`~utils.HFValidationError`]:\n If an input is not valid.\n """\n # TODO: add an argument to opt-out validation for specific argument?\n signature = inspect.signature(fn)\n\n # Should the validator switch `use_auth_token` values to `token`? In practice, always\n # True in `huggingface_hub`. Might not be the case in a downstream library.\n check_use_auth_token = "use_auth_token" not in signature.parameters and "token" in signature.parameters\n\n @wraps(fn)\n def _inner_fn(*args, **kwargs):\n has_token = False\n for arg_name, arg_value in chain(\n zip(signature.parameters, args), # Args values\n kwargs.items(), # Kwargs values\n ):\n if arg_name in ["repo_id", "from_id", "to_id"]:\n validate_repo_id(arg_value)\n\n elif arg_name == "token" and arg_value is not None:\n has_token = True\n\n if check_use_auth_token:\n kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs)\n\n return fn(*args, **kwargs)\n\n return _inner_fn # type: ignore\n\n\ndef validate_repo_id(repo_id: str) -> None:\n """Validate `repo_id` is valid.\n\n This is not meant to replace the proper validation made on the Hub but rather to\n avoid local inconsistencies whenever possible (example: passing `repo_type` in the\n `repo_id` is forbidden).\n\n Rules:\n - Between 1 and 96 characters.\n - Either "repo_name" or "namespace/repo_name"\n - [a-zA-Z0-9] or "-", "_", "."\n - "--" and ".." are forbidden\n\n Valid: `"foo"`, `"foo/bar"`, `"123"`, `"Foo-BAR_foo.bar123"`\n\n Not valid: `"datasets/foo/bar"`, `".repo_id"`, `"foo--bar"`, `"foo.git"`\n\n Example:\n ```py\n >>> from huggingface_hub.utils import validate_repo_id\n >>> validate_repo_id(repo_id="valid_repo_id")\n >>> validate_repo_id(repo_id="other..repo..id")\n huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.\n ```\n\n Discussed in https://github.com/huggingface/huggingface_hub/issues/1008.\n In moon-landing (internal repository):\n - https://github.com/huggingface/moon-landing/blob/main/server/lib/Names.ts#L27\n - https://github.com/huggingface/moon-landing/blob/main/server/views/components/NewRepoForm/NewRepoForm.svelte#L138\n """\n if not isinstance(repo_id, str):\n # Typically, a Path is not a repo_id\n raise HFValidationError(f"Repo id must be a string, not {type(repo_id)}: '{repo_id}'.")\n\n if repo_id.count("/") > 1:\n raise HFValidationError(\n "Repo id must be in the form 'repo_name' or 'namespace/repo_name':"\n f" '{repo_id}'. Use `repo_type` argument if needed."\n )\n\n if not REPO_ID_REGEX.match(repo_id):\n raise HFValidationError(\n "Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are"\n " forbidden, '-' and '.' cannot start or end the name, max length is 96:"\n f" '{repo_id}'."\n )\n\n if "--" in repo_id or ".." in repo_id:\n raise HFValidationError(f"Cannot have -- or .. in repo_id: '{repo_id}'.")\n\n if repo_id.endswith(".git"):\n raise HFValidationError(f"Repo_id cannot end by '.git': '{repo_id}'.")\n\n\ndef smoothly_deprecate_use_auth_token(fn_name: str, has_token: bool, kwargs: Dict[str, Any]) -> Dict[str, Any]:\n """Smoothly deprecate `use_auth_token` in the `huggingface_hub` codebase.\n\n The long-term goal is to remove any mention of `use_auth_token` in the codebase in\n favor of a unique and less verbose `token` argument. This will be done a few steps:\n\n 0. Step 0: methods that require a read-access to the Hub use the `use_auth_token`\n argument (`str`, `bool` or `None`). Methods requiring write-access have a `token`\n argument (`str`, `None`). This implicit rule exists to be able to not send the\n token when not necessary (`use_auth_token=False`) even if logged in.\n\n 1. Step 1: we want to harmonize everything and use `token` everywhere (supporting\n `token=False` for read-only methods). In order not to break existing code, if\n `use_auth_token` is passed to a function, the `use_auth_token` value is passed\n as `token` instead, without any warning.\n a. Corner case: if both `use_auth_token` and `token` values are passed, a warning\n is thrown and the `use_auth_token` value is ignored.\n\n 2. Step 2: Once it is release, we should push downstream libraries to switch from\n `use_auth_token` to `token` as much as possible, but without throwing a warning\n (e.g. manually create issues on the corresponding repos).\n\n 3. Step 3: After a transitional period (6 months e.g. until April 2023?), we update\n `huggingface_hub` to throw a warning on `use_auth_token`. Hopefully, very few\n users will be impacted as it would have already been fixed.\n In addition, unit tests in `huggingface_hub` must be adapted to expect warnings\n to be thrown (but still use `use_auth_token` as before).\n\n 4. Step 4: After a normal deprecation cycle (3 releases ?), remove this validator.\n `use_auth_token` will definitely not be supported.\n In addition, we update unit tests in `huggingface_hub` to use `token` everywhere.\n\n This has been discussed in:\n - https://github.com/huggingface/huggingface_hub/issues/1094.\n - https://github.com/huggingface/huggingface_hub/pull/928\n - (related) https://github.com/huggingface/huggingface_hub/pull/1064\n """\n new_kwargs = kwargs.copy() # do not mutate input !\n\n use_auth_token = new_kwargs.pop("use_auth_token", None) # remove from kwargs\n if use_auth_token is not None:\n if has_token:\n warnings.warn(\n "Both `token` and `use_auth_token` are passed to"\n f" `{fn_name}` with non-None values. `token` is now the"\n " preferred argument to pass a User Access Token."\n " `use_auth_token` value will be ignored."\n )\n else:\n # `token` argument is not passed and a non-None value is passed in\n # `use_auth_token` => use `use_auth_token` value as `token` kwarg.\n new_kwargs["token"] = use_auth_token\n\n return new_kwargs\n
.venv\Lib\site-packages\huggingface_hub\utils\_validators.py
_validators.py
Python
9,204
0.95
0.128319
0.11236
react-lib
721
2024-10-10T09:19:57.415394
MIT
false
9090b5592ed78c36cdbb7018d783a8be
# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\n# ruff: noqa: F401\nfrom huggingface_hub.errors import (\n BadRequestError,\n CacheNotFound,\n CorruptedCacheException,\n DisabledRepoError,\n EntryNotFoundError,\n FileMetadataError,\n GatedRepoError,\n HfHubHTTPError,\n HFValidationError,\n LocalEntryNotFoundError,\n LocalTokenNotFoundError,\n NotASafetensorsRepoError,\n OfflineModeIsEnabled,\n RepositoryNotFoundError,\n RevisionNotFoundError,\n SafetensorsParsingError,\n)\n\nfrom . import tqdm as _tqdm # _tqdm is the module\nfrom ._auth import get_stored_tokens, get_token\nfrom ._cache_assets import cached_assets_path\nfrom ._cache_manager import (\n CachedFileInfo,\n CachedRepoInfo,\n CachedRevisionInfo,\n DeleteCacheStrategy,\n HFCacheInfo,\n scan_cache_dir,\n)\nfrom ._chunk_utils import chunk_iterable\nfrom ._datetime import parse_datetime\nfrom ._experimental import experimental\nfrom ._fixes import SoftTemporaryDirectory, WeakFileLock, yaml_dump\nfrom ._git_credential import list_credential_helpers, set_git_credential, unset_git_credential\nfrom ._headers import build_hf_headers, get_token_to_send\nfrom ._hf_folder import HfFolder\nfrom ._http import (\n configure_http_backend,\n fix_hf_endpoint_in_url,\n get_session,\n hf_raise_for_status,\n http_backoff,\n reset_sessions,\n)\nfrom ._pagination import paginate\nfrom ._paths import DEFAULT_IGNORE_PATTERNS, FORBIDDEN_FOLDERS, filter_repo_objects\nfrom ._runtime import (\n dump_environment_info,\n get_aiohttp_version,\n get_fastai_version,\n get_fastapi_version,\n get_fastcore_version,\n get_gradio_version,\n get_graphviz_version,\n get_hf_hub_version,\n get_hf_transfer_version,\n get_jinja_version,\n get_numpy_version,\n get_pillow_version,\n get_pydantic_version,\n get_pydot_version,\n get_python_version,\n get_tensorboard_version,\n get_tf_version,\n get_torch_version,\n is_aiohttp_available,\n is_colab_enterprise,\n is_fastai_available,\n is_fastapi_available,\n is_fastcore_available,\n is_google_colab,\n is_gradio_available,\n is_graphviz_available,\n is_hf_transfer_available,\n is_jinja_available,\n is_notebook,\n is_numpy_available,\n is_package_available,\n is_pillow_available,\n is_pydantic_available,\n is_pydot_available,\n is_safetensors_available,\n is_tensorboard_available,\n is_tf_available,\n is_torch_available,\n)\nfrom ._safetensors import SafetensorsFileMetadata, SafetensorsRepoMetadata, TensorInfo\nfrom ._subprocess import capture_output, run_interactive_subprocess, run_subprocess\nfrom ._telemetry import send_telemetry\nfrom ._typing import is_jsonable, is_simple_optional_type, unwrap_simple_optional_type\nfrom ._validators import smoothly_deprecate_use_auth_token, validate_hf_hub_args, validate_repo_id\nfrom ._xet import (\n XetConnectionInfo,\n XetFileData,\n XetTokenType,\n fetch_xet_connection_info_from_repo_info,\n parse_xet_file_data_from_response,\n refresh_xet_connection_info,\n)\nfrom .tqdm import are_progress_bars_disabled, disable_progress_bars, enable_progress_bars, tqdm, tqdm_stream_file\n
.venv\Lib\site-packages\huggingface_hub\utils\__init__.py
__init__.py
Python
3,722
0.95
0.008547
0.130435
node-utils
365
2023-12-29T15:07:34.731834
MIT
false
2549994ef6b94bb2c878ece6e3673849
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\endpoint_helpers.cpython-313.pyc
endpoint_helpers.cpython-313.pyc
Other
2,171
0.85
0.060606
0
python-kit
274
2024-09-05T04:26:36.398578
MIT
false
91a512325c2ab9b8243d0d0bf8f7f3bf
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\insecure_hashlib.cpython-313.pyc
insecure_hashlib.cpython-313.pyc
Other
717
0.7
0
0
node-utils
747
2024-08-01T18:53:26.075524
MIT
false
1461361d5d33ef0edf790bc688a97036
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\logging.cpython-313.pyc
logging.cpython-313.pyc
Other
5,953
0.95
0.047059
0
vue-tools
176
2025-01-22T19:59:49.877640
MIT
false
dc0fa6cd3fc05b409d69619da9bd60e5
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\sha.cpython-313.pyc
sha.cpython-313.pyc
Other
2,589
0.95
0.111111
0
awesome-app
341
2024-11-23T21:22:57.058363
MIT
false
8c6db951c1abf248414206d2f510727b
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\tqdm.cpython-313.pyc
tqdm.cpython-313.pyc
Other
11,206
0.95
0.138462
0.069182
awesome-app
461
2025-02-26T10:57:52.772877
GPL-3.0
false
c6b56bf06cf05f9de866d2c7842b18b7
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_auth.cpython-313.pyc
_auth.cpython-313.pyc
Other
8,816
0.8
0.0625
0.044248
awesome-app
276
2023-07-31T13:05:00.650367
BSD-3-Clause
false
5fa0011938b196327defacf16626a1d2
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_cache_assets.cpython-313.pyc
_cache_assets.cpython-313.pyc
Other
5,265
0.95
0.028037
0
node-utils
180
2025-04-05T17:46:28.150615
GPL-3.0
false
330189864fb298e649f9e11c14918bd8
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_cache_manager.cpython-313.pyc
_cache_manager.cpython-313.pyc
Other
36,018
0.95
0.018939
0
node-utils
518
2025-02-09T00:03:43.879127
Apache-2.0
false
056397ee73dabab7e953d2c49a3e792f
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_chunk_utils.cpython-313.pyc
_chunk_utils.cpython-313.pyc
Other
2,023
0.95
0.025
0.09375
vue-tools
191
2024-06-13T15:19:21.191207
MIT
false
794942f6d97bc25110e79f9e09c25564
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_datetime.cpython-313.pyc
_datetime.cpython-313.pyc
Other
2,138
0.8
0.028571
0
react-lib
715
2025-04-08T18:27:58.076790
Apache-2.0
false
14fd74ac6ae90d287a4996d24a9bdf3f
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_deprecation.cpython-313.pyc
_deprecation.cpython-313.pyc
Other
6,667
0.8
0.027778
0.015385
vue-tools
925
2024-12-04T18:52:51.667648
Apache-2.0
false
1dc3908e1de8db14a9b5f1c931ee7d7a
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_experimental.cpython-313.pyc
_experimental.cpython-313.pyc
Other
2,311
0.95
0.088235
0
vue-tools
600
2024-01-29T06:00:07.255191
MIT
false
f44a9183b7e0a750fcf59b88e5531414
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_fixes.cpython-313.pyc
_fixes.cpython-313.pyc
Other
5,663
0.8
0.057471
0.024691
vue-tools
307
2025-03-01T13:55:47.205977
BSD-3-Clause
false
ac3b6c3376753029076c3dada4defce6
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_git_credential.cpython-313.pyc
_git_credential.cpython-313.pyc
Other
5,000
0.8
0.056818
0
react-lib
402
2025-04-12T21:28:52.090328
Apache-2.0
false
5550ad342ae33adcd1b477956d4a741d
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_headers.cpython-313.pyc
_headers.cpython-313.pyc
Other
8,649
0.95
0.066667
0.016667
awesome-app
277
2024-07-04T17:02:57.516385
BSD-3-Clause
false
d9d3da065dee0714f29e63341a330316
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_hf_folder.cpython-313.pyc
_hf_folder.cpython-313.pyc
Other
2,311
0.85
0.121951
0
awesome-app
773
2025-04-22T12:02:22.232006
BSD-3-Clause
false
22d1582079249097acdb0896c3c4ac2e
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_http.cpython-313.pyc
_http.cpython-313.pyc
Other
26,655
0.95
0.05679
0.044321
awesome-app
554
2023-10-14T20:28:17.167899
GPL-3.0
false
030d5a316746cbb1fb072d6277426835
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_lfs.cpython-313.pyc
_lfs.cpython-313.pyc
Other
5,231
0.95
0.013158
0
node-utils
916
2024-11-29T06:35:04.219652
BSD-3-Clause
false
eecca8cab5481c7d1bc740a5210c9e98
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_pagination.cpython-313.pyc
_pagination.cpython-313.pyc
Other
2,122
0.8
0
0
vue-tools
377
2025-05-07T05:49:51.719716
GPL-3.0
false
b65feea44673002b5b7ad1c961c3cab8
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_paths.cpython-313.pyc
_paths.cpython-313.pyc
Other
5,074
0.95
0.053333
0.014925
python-kit
437
2024-04-09T08:10:37.108263
BSD-3-Clause
false
d936c7498c9bb1340fc0076ce50c462a
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_runtime.cpython-313.pyc
_runtime.cpython-313.pyc
Other
14,218
0.95
0.027273
0.028846
react-lib
952
2023-12-24T16:40:32.463901
BSD-3-Clause
false
f4b7b154b6c818d81722c69f314dceae
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_safetensors.cpython-313.pyc
_safetensors.cpython-313.pyc
Other
5,884
0.95
0.072917
0
python-kit
625
2024-06-12T12:28:13.759760
MIT
false
163f8393f2bd3b31b9245e93dc879c16
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_subprocess.cpython-313.pyc
_subprocess.cpython-313.pyc
Other
5,145
0.8
0.010309
0.022727
python-kit
453
2023-11-05T02:46:42.936584
MIT
false
3c1004744c7493673f8fd1fe5ab049e6
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_telemetry.cpython-313.pyc
_telemetry.cpython-313.pyc
Other
5,580
0.95
0.058824
0.042254
react-lib
749
2025-06-10T12:17:53.547873
BSD-3-Clause
false
37e5a05b8dcd9c5306667b183ea2c2dd
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_typing.cpython-313.pyc
_typing.cpython-313.pyc
Other
3,533
0.95
0.146341
0.026316
awesome-app
97
2023-08-26T02:49:41.543592
BSD-3-Clause
false
596c16ba2fdf0d2027aadb12f822c346
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_validators.cpython-313.pyc
_validators.cpython-313.pyc
Other
8,747
0.95
0.077922
0.007937
react-lib
621
2024-04-19T14:12:42.686421
GPL-3.0
false
b6853a77fccec17aac0cb4b885276fad
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\_xet.cpython-313.pyc
_xet.cpython-313.pyc
Other
8,373
0.95
0.085938
0
react-lib
349
2024-06-08T19:29:24.730421
Apache-2.0
false
9cb5b16553145ae4dd634871ff90e31c
\n\n
.venv\Lib\site-packages\huggingface_hub\utils\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
4,093
0.8
0
0
python-kit
493
2023-07-26T14:16:03.596736
Apache-2.0
false
d106884385d7e3f2c53160176fde1cbb
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\community.cpython-313.pyc
community.cpython-313.pyc
Other
14,463
0.95
0.103448
0.061983
awesome-app
661
2023-12-28T18:52:33.112055
MIT
false
41c77780d8b14016ae21002688b63be5
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\constants.cpython-313.pyc
constants.cpython-313.pyc
Other
9,948
0.8
0
0
python-kit
179
2025-04-26T13:07:56.473557
GPL-3.0
false
e181b50444b14039caca171bffd635f9
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\dataclasses.cpython-313.pyc
dataclasses.cpython-313.pyc
Other
19,803
0.95
0.084615
0.029536
python-kit
439
2023-10-17T17:05:32.430119
Apache-2.0
false
ba8860434d68d6ee70aa9f8e58a4cb17
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\errors.cpython-313.pyc
errors.cpython-313.pyc
Other
16,753
0.95
0.155914
0.00641
awesome-app
816
2024-02-17T03:23:41.902932
GPL-3.0
false
5430a915f7a7e039c697249229e9272e
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\fastai_utils.cpython-313.pyc
fastai_utils.cpython-313.pyc
Other
17,578
0.95
0.072131
0.022901
vue-tools
402
2024-04-01T01:30:09.926392
MIT
false
943866531a0d23b1a9491453470febd4
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\file_download.cpython-313.pyc
file_download.cpython-313.pyc
Other
70,840
0.75
0.073881
0.010251
node-utils
739
2023-11-09T15:01:13.300944
Apache-2.0
false
b4829d101328fd95c2321355ac30f335
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\hf_file_system.cpython-313.pyc
hf_file_system.cpython-313.pyc
Other
50,118
0.95
0.036254
0.003497
python-kit
332
2023-09-12T08:00:58.521405
Apache-2.0
false
35b93613bb64be9ca714a5962bd0d0ff
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\hub_mixin.cpython-313.pyc
hub_mixin.cpython-313.pyc
Other
37,273
0.95
0.057621
0.022044
vue-tools
811
2025-01-17T01:22:35.963544
BSD-3-Clause
false
632be91d1076730fe96e4ae6ba2c87e3
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\inference_api.cpython-313.pyc
inference_api.cpython-313.pyc
Other
8,756
0.95
0.029851
0
awesome-app
100
2024-11-27T15:36:29.500057
MIT
false
bbeef462111d8e1c60c46794c230eb04
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\keras_mixin.cpython-313.pyc
keras_mixin.cpython-313.pyc
Other
20,371
0.95
0.050938
0.017442
react-lib
520
2024-07-14T08:15:42.153593
MIT
false
82ca241251f883b3b777501ff0cc4fe7
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\lfs.cpython-313.pyc
lfs.cpython-313.pyc
Other
19,508
0.95
0.031128
0
react-lib
786
2024-06-05T20:11:41.251346
Apache-2.0
false
13cb83d3d4ba14003a1272f17ba1265f
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\repocard.cpython-313.pyc
repocard.cpython-313.pyc
Other
34,013
0.95
0.042478
0.001942
react-lib
349
2025-01-18T19:45:08.516054
GPL-3.0
false
c8a0fbdbf6d1d47493b93ad307ea7e28
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\repocard_data.cpython-313.pyc
repocard_data.cpython-313.pyc
Other
35,168
0.95
0.069565
0.002347
python-kit
53
2025-02-17T13:44:47.408299
BSD-3-Clause
false
ef30e7d1d4791507b09ef32c638b7c40
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\repository.cpython-313.pyc
repository.cpython-313.pyc
Other
64,635
0.75
0.060674
0
vue-tools
890
2024-08-02T08:02:25.315511
Apache-2.0
false
79f916ba45890cd64cd36bd6fee2c72e
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\_commit_api.cpython-313.pyc
_commit_api.cpython-313.pyc
Other
41,296
0.95
0.077075
0.006383
react-lib
205
2024-06-25T07:13:57.963365
Apache-2.0
false
17e253798b2dea7950dc72e4d2922b56
\n\n
.venv\Lib\site-packages\huggingface_hub\__pycache__\_commit_scheduler.cpython-313.pyc
_commit_scheduler.cpython-313.pyc
Other
18,229
0.95
0.038462
0.014218
awesome-app
427
2024-12-31T12:06:31.925531
BSD-3-Clause
false
7fe455288ef0d52cdff7c212f5593776