content
stringlengths 1
103k
⌀ | path
stringlengths 8
216
| filename
stringlengths 2
179
| language
stringclasses 15
values | size_bytes
int64 2
189k
| quality_score
float64 0.5
0.95
| complexity
float64 0
1
| documentation_ratio
float64 0
1
| repository
stringclasses 5
values | stars
int64 0
1k
| created_date
stringdate 2023-07-10 19:21:08
2025-07-09 19:11:45
| license
stringclasses 4
values | is_test
bool 2
classes | file_hash
stringlengths 32
32
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
import inspect\nimport re\nfrom typing import Dict, List, Tuple\n\nfrom huggingface_hub.utils import insecure_hashlib\n\nfrom .arrow import arrow\nfrom .audiofolder import audiofolder\nfrom .cache import cache\nfrom .csv import csv\nfrom .imagefolder import imagefolder\nfrom .json import json\nfrom .pandas import pandas\nfrom .parquet import parquet\nfrom .pdffolder import pdffolder\nfrom .sql import sql\nfrom .text import text\nfrom .videofolder import videofolder\nfrom .webdataset import webdataset\nfrom .xml import xml\n\n\ndef _hash_python_lines(lines: list[str]) -> str:\n filtered_lines = []\n for line in lines:\n line = re.sub(r"#.*", "", line) # remove comments\n if line:\n filtered_lines.append(line)\n full_str = "\n".join(filtered_lines)\n\n # Make a hash from all this code\n full_bytes = full_str.encode("utf-8")\n return insecure_hashlib.sha256(full_bytes).hexdigest()\n\n\n# get importable module names and hash for caching\n_PACKAGED_DATASETS_MODULES = {\n "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),\n "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),\n "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),\n "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),\n "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),\n "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),\n "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),\n "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),\n "videofolder": (videofolder.__name__, _hash_python_lines(inspect.getsource(videofolder).splitlines())),\n "pdffolder": (pdffolder.__name__, _hash_python_lines(inspect.getsource(pdffolder).splitlines())),\n "webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),\n "xml": (xml.__name__, _hash_python_lines(inspect.getsource(xml).splitlines())),\n}\n\n# get importable module names and hash for caching\n_PACKAGED_DATASETS_MODULES_2_15_HASHES = {\n "csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",\n "json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",\n "pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",\n "parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",\n "arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",\n "text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",\n "imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",\n "audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",\n}\n\n# Used to infer the module to use based on the data files extensions\n_EXTENSION_TO_MODULE: dict[str, tuple[str, dict]] = {\n ".csv": ("csv", {}),\n ".tsv": ("csv", {"sep": "\t"}),\n ".json": ("json", {}),\n ".jsonl": ("json", {}),\n # ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)\n ".ndjson": ("json", {}),\n ".parquet": ("parquet", {}),\n ".geoparquet": ("parquet", {}),\n ".gpq": ("parquet", {}),\n ".arrow": ("arrow", {}),\n ".txt": ("text", {}),\n ".tar": ("webdataset", {}),\n ".xml": ("xml", {}),\n}\n_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})\n_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})\n_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})\n_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})\n_EXTENSION_TO_MODULE.update({ext: ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})\n_EXTENSION_TO_MODULE.update({ext.upper(): ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})\n_EXTENSION_TO_MODULE.update({ext: ("pdffolder", {}) for ext in pdffolder.PdfFolder.EXTENSIONS})\n_EXTENSION_TO_MODULE.update({ext.upper(): ("pdffolder", {}) for ext in pdffolder.PdfFolder.EXTENSIONS})\n\n# Used to filter data files based on extensions given a module name\n_MODULE_TO_EXTENSIONS: dict[str, list[str]] = {}\nfor _ext, (_module, _) in _EXTENSION_TO_MODULE.items():\n _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)\n\nfor _module in _MODULE_TO_EXTENSIONS:\n _MODULE_TO_EXTENSIONS[_module].append(".zip")\n\n# Used to filter data files based on file names\n_MODULE_TO_METADATA_FILE_NAMES: Dict[str, List[str]] = {}\nfor _module in _MODULE_TO_EXTENSIONS:\n _MODULE_TO_METADATA_FILE_NAMES[_module] = []\n_MODULE_TO_METADATA_FILE_NAMES["imagefolder"] = imagefolder.ImageFolder.METADATA_FILENAMES\n_MODULE_TO_METADATA_FILE_NAMES["audiofolder"] = imagefolder.ImageFolder.METADATA_FILENAMES\n_MODULE_TO_METADATA_FILE_NAMES["videofolder"] = imagefolder.ImageFolder.METADATA_FILENAMES\n_MODULE_TO_METADATA_FILE_NAMES["pdffolder"] = imagefolder.ImageFolder.METADATA_FILENAMES\n
|
.venv\Lib\site-packages\datasets\packaged_modules\__init__.py
|
__init__.py
|
Python
| 5,212 | 0.95 | 0.153846 | 0.076087 |
python-kit
| 4 |
2024-10-09T21:26:15.444218
|
GPL-3.0
| false |
d9321244887abf106e72fe72a5bdbc6e
|
import itertools\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nimport pyarrow as pa\n\nimport datasets\nfrom datasets.table import table_cast\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\n@dataclass\nclass ArrowConfig(datasets.BuilderConfig):\n """BuilderConfig for Arrow."""\n\n features: Optional[datasets.Features] = None\n\n def __post_init__(self):\n super().__post_init__()\n\n\nclass Arrow(datasets.ArrowBasedBuilder):\n BUILDER_CONFIG_CLASS = ArrowConfig\n\n def _info(self):\n return datasets.DatasetInfo(features=self.config.features)\n\n def _split_generators(self, dl_manager):\n """We handle string, list and dicts in datafiles"""\n if not self.config.data_files:\n raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")\n dl_manager.download_config.extract_on_the_fly = True\n data_files = dl_manager.download_and_extract(self.config.data_files)\n splits = []\n for split_name, files in data_files.items():\n if isinstance(files, str):\n files = [files]\n # Use `dl_manager.iter_files` to skip hidden files in an extracted archive\n files = [dl_manager.iter_files(file) for file in files]\n # Infer features if they are stored in the arrow schema\n if self.info.features is None:\n for file in itertools.chain.from_iterable(files):\n with open(file, "rb") as f:\n try:\n reader = pa.ipc.open_stream(f)\n except (OSError, pa.lib.ArrowInvalid):\n reader = pa.ipc.open_file(f)\n self.info.features = datasets.Features.from_arrow_schema(reader.schema)\n break\n splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))\n return splits\n\n def _cast_table(self, pa_table: pa.Table) -> pa.Table:\n if self.info.features is not None:\n # more expensive cast to support nested features with keys in a different order\n # allows str <-> int/float or str to Audio for example\n pa_table = table_cast(pa_table, self.info.features.arrow_schema)\n return pa_table\n\n def _generate_tables(self, files):\n for file_idx, file in enumerate(itertools.chain.from_iterable(files)):\n with open(file, "rb") as f:\n try:\n try:\n batches = pa.ipc.open_stream(f)\n except (OSError, pa.lib.ArrowInvalid):\n reader = pa.ipc.open_file(f)\n batches = (reader.get_batch(i) for i in range(reader.num_record_batches))\n for batch_idx, record_batch in enumerate(batches):\n pa_table = pa.Table.from_batches([record_batch])\n # Uncomment for debugging (will print the Arrow table size and elements)\n # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")\n # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)\n except ValueError as e:\n logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")\n raise\n
|
.venv\Lib\site-packages\datasets\packaged_modules\arrow\arrow.py
|
arrow.py
|
Python
| 3,494 | 0.95 | 0.316456 | 0.107692 |
node-utils
| 989 |
2024-01-28T22:12:33.017525
|
GPL-3.0
| false |
046c2997852da184e8dbae6dd935332a
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\arrow\__pycache__\arrow.cpython-313.pyc
|
arrow.cpython-313.pyc
|
Other
| 6,044 | 0.8 | 0.020833 | 0.021277 |
python-kit
| 2 |
2025-04-02T02:14:52.242110
|
GPL-3.0
| false |
9cc50cfa2299d936dbbc4abed4724b8d
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\arrow\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 206 | 0.7 | 0 | 0 |
awesome-app
| 76 |
2024-04-02T22:19:40.113752
|
GPL-3.0
| false |
87fc11881a2fbbe1eb7028f9c77c9855
|
import datasets\n\nfrom ..folder_based_builder import folder_based_builder\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\nclass AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):\n """Builder Config for AudioFolder."""\n\n drop_labels: bool = None\n drop_metadata: bool = None\n\n def __post_init__(self):\n super().__post_init__()\n\n\nclass AudioFolder(folder_based_builder.FolderBasedBuilder):\n BASE_FEATURE = datasets.Audio\n BASE_COLUMN_NAME = "audio"\n BUILDER_CONFIG_CLASS = AudioFolderConfig\n EXTENSIONS: list[str] # definition at the bottom of the script\n\n\n# Obtained with:\n# ```\n# import soundfile as sf\n#\n# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]\n#\n# # .opus decoding is supported if libsndfile >= 1.0.31:\n# AUDIO_EXTENSIONS.extend([".opus"])\n# ```\n# We intentionally do not run this code on launch because:\n# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed\n# (2) To ensure the list of supported extensions is deterministic\nAUDIO_EXTENSIONS = [\n ".aiff",\n ".au",\n ".avr",\n ".caf",\n ".flac",\n ".htk",\n ".svx",\n ".mat4",\n ".mat5",\n ".mpc2k",\n ".ogg",\n ".paf",\n ".pvf",\n ".raw",\n ".rf64",\n ".sd2",\n ".sds",\n ".ircam",\n ".voc",\n ".w64",\n ".wav",\n ".nist",\n ".wavex",\n ".wve",\n ".xi",\n ".mp3",\n ".opus",\n]\nAudioFolder.EXTENSIONS = AUDIO_EXTENSIONS\n
|
.venv\Lib\site-packages\datasets\packaged_modules\audiofolder\audiofolder.py
|
audiofolder.py
|
Python
| 1,468 | 0.95 | 0.089552 | 0.214286 |
react-lib
| 497 |
2024-05-03T03:21:23.467777
|
GPL-3.0
| false |
b9ce460c2721d6d02ce3a25ed07268de
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\audiofolder\__pycache__\audiofolder.cpython-313.pyc
|
audiofolder.cpython-313.pyc
|
Other
| 1,778 | 0.7 | 0.058824 | 0 |
react-lib
| 317 |
2024-11-23T00:22:13.375324
|
Apache-2.0
| false |
f0047cf8f06290fc5ad031e040c5e43b
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\audiofolder\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 212 | 0.7 | 0 | 0 |
node-utils
| 26 |
2024-04-16T21:06:29.823225
|
BSD-3-Clause
| false |
ff86a0567bbf681cd5c116d35e1819bd
|
import glob\nimport json\nimport os\nimport shutil\nimport time\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport pyarrow as pa\n\nimport datasets\nimport datasets.config\nimport datasets.data_files\nfrom datasets.naming import camelcase_to_snakecase, filenames_for_dataset_split\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\ndef _get_modification_time(cached_directory_path):\n return (Path(cached_directory_path)).stat().st_mtime\n\n\ndef _find_hash_in_cache(\n dataset_name: str,\n config_name: Optional[str],\n cache_dir: Optional[str],\n config_kwargs: dict,\n custom_features: Optional[datasets.Features],\n) -> tuple[str, str, str]:\n if config_name or config_kwargs or custom_features:\n config_id = datasets.BuilderConfig(config_name or "default").create_config_id(\n config_kwargs=config_kwargs, custom_features=custom_features\n )\n else:\n config_id = None\n cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE))\n namespace_and_dataset_name = dataset_name.split("/")\n namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1])\n cached_relative_path = "___".join(namespace_and_dataset_name)\n cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path)\n cached_directory_paths = [\n cached_directory_path\n for cached_directory_path in glob.glob(\n os.path.join(cached_datasets_directory_path_root, config_id or "*", "*", "*")\n )\n if os.path.isdir(cached_directory_path)\n and (\n config_kwargs\n or custom_features\n or json.loads(Path(cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"]\n == Path(cached_directory_path).parts[-3] # no extra params => config_id == config_name\n )\n ]\n if not cached_directory_paths:\n cached_directory_paths = [\n cached_directory_path\n for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))\n if os.path.isdir(cached_directory_path)\n ]\n available_configs = sorted(\n {Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths}\n )\n raise ValueError(\n f"Couldn't find cache for {dataset_name}"\n + (f" for config '{config_id}'" if config_id else "")\n + (f"\nAvailable configs in the cache: {available_configs}" if available_configs else "")\n )\n # get most recent\n cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1])\n version, hash = cached_directory_path.parts[-2:]\n other_configs = [\n Path(_cached_directory_path).parts[-3]\n for _cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash))\n if os.path.isdir(_cached_directory_path)\n and (\n config_kwargs\n or custom_features\n or json.loads(Path(_cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"]\n == Path(_cached_directory_path).parts[-3] # no extra params => config_id == config_name\n )\n ]\n if not config_id and len(other_configs) > 1:\n raise ValueError(\n f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}"\n f"\nPlease specify which configuration to reload from the cache, e.g."\n f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')"\n )\n config_name = cached_directory_path.parts[-3]\n warning_msg = (\n f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} "\n f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})."\n )\n logger.warning(warning_msg)\n return config_name, version, hash\n\n\nclass Cache(datasets.ArrowBasedBuilder):\n def __init__(\n self,\n cache_dir: Optional[str] = None,\n dataset_name: Optional[str] = None,\n config_name: Optional[str] = None,\n version: Optional[str] = "0.0.0",\n hash: Optional[str] = None,\n base_path: Optional[str] = None,\n info: Optional[datasets.DatasetInfo] = None,\n features: Optional[datasets.Features] = None,\n token: Optional[Union[bool, str]] = None,\n repo_id: Optional[str] = None,\n data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]] = None,\n data_dir: Optional[str] = None,\n storage_options: Optional[dict] = None,\n writer_batch_size: Optional[int] = None,\n **config_kwargs,\n ):\n if repo_id is None and dataset_name is None:\n raise ValueError("repo_id or dataset_name is required for the Cache dataset builder")\n if data_files is not None:\n config_kwargs["data_files"] = data_files\n if data_dir is not None:\n config_kwargs["data_dir"] = data_dir\n if hash == "auto" and version == "auto":\n config_name, version, hash = _find_hash_in_cache(\n dataset_name=repo_id or dataset_name,\n config_name=config_name,\n cache_dir=cache_dir,\n config_kwargs=config_kwargs,\n custom_features=features,\n )\n elif hash == "auto" or version == "auto":\n raise NotImplementedError("Pass both hash='auto' and version='auto' instead")\n super().__init__(\n cache_dir=cache_dir,\n dataset_name=dataset_name,\n config_name=config_name,\n version=version,\n hash=hash,\n base_path=base_path,\n info=info,\n token=token,\n repo_id=repo_id,\n storage_options=storage_options,\n writer_batch_size=writer_batch_size,\n )\n\n def _info(self) -> datasets.DatasetInfo:\n return datasets.DatasetInfo()\n\n def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs):\n if not os.path.exists(self.cache_dir):\n raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}")\n if output_dir is not None and output_dir != self.cache_dir:\n shutil.copytree(self.cache_dir, output_dir)\n\n def _split_generators(self, dl_manager):\n # used to stream from cache\n if isinstance(self.info.splits, datasets.SplitDict):\n split_infos: list[datasets.SplitInfo] = list(self.info.splits.values())\n else:\n raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}")\n return [\n datasets.SplitGenerator(\n name=split_info.name,\n gen_kwargs={\n "files": filenames_for_dataset_split(\n self.cache_dir,\n dataset_name=self.dataset_name,\n split=split_info.name,\n filetype_suffix="arrow",\n shard_lengths=split_info.shard_lengths,\n )\n },\n )\n for split_info in split_infos\n ]\n\n def _generate_tables(self, files):\n # used to stream from cache\n for file_idx, file in enumerate(files):\n with open(file, "rb") as f:\n try:\n for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):\n pa_table = pa.Table.from_batches([record_batch])\n # Uncomment for debugging (will print the Arrow table size and elements)\n # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")\n # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n yield f"{file_idx}_{batch_idx}", pa_table\n except ValueError as e:\n logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")\n raise\n
|
.venv\Lib\site-packages\datasets\packaged_modules\cache\cache.py
|
cache.py
|
Python
| 8,196 | 0.95 | 0.198953 | 0.039548 |
awesome-app
| 690 |
2024-02-12T00:39:26.194636
|
Apache-2.0
| false |
f62ff705024e1c52f0f0aa0b126b6d85
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\cache\__pycache__\cache.cpython-313.pyc
|
cache.cpython-313.pyc
|
Other
| 10,464 | 0.95 | 0.03876 | 0.008 |
vue-tools
| 364 |
2023-11-08T00:54:05.893054
|
GPL-3.0
| false |
04adfb9aebbc8050fc8c783f78572f1b
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\cache\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 206 | 0.7 | 0 | 0 |
python-kit
| 765 |
2023-07-31T22:16:33.068465
|
MIT
| false |
0ac9c8133aa4d6e646b2d68be1a9007e
|
import itertools\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Optional, Union\n\nimport pandas as pd\nimport pyarrow as pa\n\nimport datasets\nimport datasets.config\nfrom datasets.features.features import require_storage_cast\nfrom datasets.table import table_cast\nfrom datasets.utils.py_utils import Literal\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n_PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"]\n_PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]\n_PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"]\n_PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"]\n_PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS = ["verbose"]\n\n\n@dataclass\nclass CsvConfig(datasets.BuilderConfig):\n """BuilderConfig for CSV."""\n\n sep: str = ","\n delimiter: Optional[str] = None\n header: Optional[Union[int, list[int], str]] = "infer"\n names: Optional[list[str]] = None\n column_names: Optional[list[str]] = None\n index_col: Optional[Union[int, str, list[int], list[str]]] = None\n usecols: Optional[Union[list[int], list[str]]] = None\n prefix: Optional[str] = None\n mangle_dupe_cols: bool = True\n engine: Optional[Literal["c", "python", "pyarrow"]] = None\n converters: dict[Union[int, str], Callable[[Any], Any]] = None\n true_values: Optional[list] = None\n false_values: Optional[list] = None\n skipinitialspace: bool = False\n skiprows: Optional[Union[int, list[int]]] = None\n nrows: Optional[int] = None\n na_values: Optional[Union[str, list[str]]] = None\n keep_default_na: bool = True\n na_filter: bool = True\n verbose: bool = False\n skip_blank_lines: bool = True\n thousands: Optional[str] = None\n decimal: str = "."\n lineterminator: Optional[str] = None\n quotechar: str = '"'\n quoting: int = 0\n escapechar: Optional[str] = None\n comment: Optional[str] = None\n encoding: Optional[str] = None\n dialect: Optional[str] = None\n error_bad_lines: bool = True\n warn_bad_lines: bool = True\n skipfooter: int = 0\n doublequote: bool = True\n memory_map: bool = False\n float_precision: Optional[str] = None\n chunksize: int = 10_000\n features: Optional[datasets.Features] = None\n encoding_errors: Optional[str] = "strict"\n on_bad_lines: Literal["error", "warn", "skip"] = "error"\n date_format: Optional[str] = None\n\n def __post_init__(self):\n super().__post_init__()\n if self.delimiter is not None:\n self.sep = self.delimiter\n if self.column_names is not None:\n self.names = self.column_names\n\n @property\n def pd_read_csv_kwargs(self):\n pd_read_csv_kwargs = {\n "sep": self.sep,\n "header": self.header,\n "names": self.names,\n "index_col": self.index_col,\n "usecols": self.usecols,\n "prefix": self.prefix,\n "mangle_dupe_cols": self.mangle_dupe_cols,\n "engine": self.engine,\n "converters": self.converters,\n "true_values": self.true_values,\n "false_values": self.false_values,\n "skipinitialspace": self.skipinitialspace,\n "skiprows": self.skiprows,\n "nrows": self.nrows,\n "na_values": self.na_values,\n "keep_default_na": self.keep_default_na,\n "na_filter": self.na_filter,\n "verbose": self.verbose,\n "skip_blank_lines": self.skip_blank_lines,\n "thousands": self.thousands,\n "decimal": self.decimal,\n "lineterminator": self.lineterminator,\n "quotechar": self.quotechar,\n "quoting": self.quoting,\n "escapechar": self.escapechar,\n "comment": self.comment,\n "encoding": self.encoding,\n "dialect": self.dialect,\n "error_bad_lines": self.error_bad_lines,\n "warn_bad_lines": self.warn_bad_lines,\n "skipfooter": self.skipfooter,\n "doublequote": self.doublequote,\n "memory_map": self.memory_map,\n "float_precision": self.float_precision,\n "chunksize": self.chunksize,\n "encoding_errors": self.encoding_errors,\n "on_bad_lines": self.on_bad_lines,\n "date_format": self.date_format,\n }\n\n # some kwargs must not be passed if they don't have a default value\n # some others are deprecated and we can also not pass them if they are the default value\n for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:\n if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):\n del pd_read_csv_kwargs[pd_read_csv_parameter]\n\n # Remove 1.3 new arguments\n if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):\n for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:\n del pd_read_csv_kwargs[pd_read_csv_parameter]\n\n # Remove 2.0 new arguments\n if not (datasets.config.PANDAS_VERSION.major >= 2):\n for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:\n del pd_read_csv_kwargs[pd_read_csv_parameter]\n\n # Remove 2.2 deprecated arguments\n if datasets.config.PANDAS_VERSION.release >= (2, 2):\n for pd_read_csv_parameter in _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS:\n if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):\n del pd_read_csv_kwargs[pd_read_csv_parameter]\n\n return pd_read_csv_kwargs\n\n\nclass Csv(datasets.ArrowBasedBuilder):\n BUILDER_CONFIG_CLASS = CsvConfig\n\n def _info(self):\n return datasets.DatasetInfo(features=self.config.features)\n\n def _split_generators(self, dl_manager):\n """We handle string, list and dicts in datafiles"""\n if not self.config.data_files:\n raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")\n dl_manager.download_config.extract_on_the_fly = True\n data_files = dl_manager.download_and_extract(self.config.data_files)\n splits = []\n for split_name, files in data_files.items():\n if isinstance(files, str):\n files = [files]\n files = [dl_manager.iter_files(file) for file in files]\n splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))\n return splits\n\n def _cast_table(self, pa_table: pa.Table) -> pa.Table:\n if self.config.features is not None:\n schema = self.config.features.arrow_schema\n if all(not require_storage_cast(feature) for feature in self.config.features.values()):\n # cheaper cast\n pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)\n else:\n # more expensive cast; allows str <-> int/float or str to Audio for example\n pa_table = table_cast(pa_table, schema)\n return pa_table\n\n def _generate_tables(self, files):\n schema = self.config.features.arrow_schema if self.config.features else None\n # dtype allows reading an int column as str\n dtype = (\n {\n name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object\n for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values())\n }\n if schema is not None\n else None\n )\n for file_idx, file in enumerate(itertools.chain.from_iterable(files)):\n csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs)\n try:\n for batch_idx, df in enumerate(csv_file_reader):\n pa_table = pa.Table.from_pandas(df)\n # Uncomment for debugging (will print the Arrow table size and elements)\n # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")\n # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n yield (file_idx, batch_idx), self._cast_table(pa_table)\n except ValueError as e:\n logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")\n raise\n
|
.venv\Lib\site-packages\datasets\packaged_modules\csv\csv.py
|
csv.py
|
Python
| 8,568 | 0.95 | 0.20202 | 0.062147 |
react-lib
| 35 |
2025-06-26T12:29:14.001092
|
BSD-3-Clause
| false |
f4a174b30dd2d3e74ce8d9dfa199baa0
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\csv\__pycache__\csv.cpython-313.pyc
|
csv.cpython-313.pyc
|
Other
| 11,866 | 0.95 | 0.00641 | 0.006579 |
vue-tools
| 961 |
2023-10-16T23:07:15.564597
|
BSD-3-Clause
| false |
9d5ce8c005368ee90e4e8f67566d2642
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\csv\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 204 | 0.7 | 0 | 0 |
vue-tools
| 80 |
2025-06-08T00:24:34.811050
|
Apache-2.0
| false |
02ab3349489541a91b46ab958f07bb55
|
import collections\nimport io\nimport itertools\nimport os\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Iterator, Optional, Union\n\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.dataset as ds\nimport pyarrow.json as paj\nimport pyarrow.parquet as pq\n\nimport datasets\nfrom datasets import config\nfrom datasets.features.features import FeatureType, _visit, _visit_with_path, _VisitPath, require_storage_cast\nfrom datasets.utils.file_utils import readline\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\ndef count_path_segments(path):\n return path.replace("\\", "/").count("/")\n\n\n@dataclass\nclass FolderBasedBuilderConfig(datasets.BuilderConfig):\n """BuilderConfig for AutoFolder."""\n\n features: Optional[datasets.Features] = None\n drop_labels: bool = None\n drop_metadata: bool = None\n filters: Optional[Union[ds.Expression, list[tuple], list[list[tuple]]]] = None\n\n def __post_init__(self):\n super().__post_init__()\n\n\nclass FolderBasedBuilder(datasets.GeneratorBasedBuilder):\n """\n Base class for generic data loaders for vision and image data.\n\n\n Abstract class attributes to be overridden by a child class:\n BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...)\n BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...)\n BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig`\n EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files\n will be included in a dataset)\n """\n\n BASE_FEATURE: type[FeatureType]\n BASE_COLUMN_NAME: str\n BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig\n EXTENSIONS: list[str]\n\n METADATA_FILENAMES: list[str] = ["metadata.csv", "metadata.jsonl", "metadata.parquet"]\n\n def _info(self):\n return datasets.DatasetInfo(features=self.config.features)\n\n def _split_generators(self, dl_manager):\n if not self.config.data_files:\n raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")\n dl_manager.download_config.extract_on_the_fly = True\n # Do an early pass if:\n # * `drop_labels` is None (default) or False, to infer the class labels\n # * `drop_metadata` is None (default) or False, to find the metadata files\n do_analyze = not self.config.drop_labels or not self.config.drop_metadata\n labels, path_depths = set(), set()\n metadata_files = collections.defaultdict(set)\n\n def analyze(files_or_archives, downloaded_files_or_dirs, split):\n if len(downloaded_files_or_dirs) == 0:\n return\n # The files are separated from the archives at this point, so check the first sample\n # to see if it's a file or a directory and iterate accordingly\n if os.path.isfile(downloaded_files_or_dirs[0]):\n original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs\n for original_file, downloaded_file in zip(original_files, downloaded_files):\n original_file, downloaded_file = str(original_file), str(downloaded_file)\n _, original_file_ext = os.path.splitext(original_file)\n if original_file_ext.lower() in self.EXTENSIONS:\n if not self.config.drop_labels:\n labels.add(os.path.basename(os.path.dirname(original_file)))\n path_depths.add(count_path_segments(original_file))\n elif os.path.basename(original_file) in self.METADATA_FILENAMES:\n metadata_files[split].add((original_file, downloaded_file))\n else:\n original_file_name = os.path.basename(original_file)\n logger.debug(\n f"The file '{original_file_name}' was ignored: it is not a {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either."\n )\n else:\n archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs\n for archive, downloaded_dir in zip(archives, downloaded_dirs):\n archive, downloaded_dir = str(archive), str(downloaded_dir)\n for downloaded_dir_file in dl_manager.iter_files(downloaded_dir):\n _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)\n if downloaded_dir_file_ext in self.EXTENSIONS:\n if not self.config.drop_labels:\n labels.add(os.path.basename(os.path.dirname(downloaded_dir_file)))\n path_depths.add(count_path_segments(downloaded_dir_file))\n elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES:\n metadata_files[split].add((None, downloaded_dir_file))\n else:\n archive_file_name = os.path.basename(archive)\n original_file_name = os.path.basename(downloaded_dir_file)\n logger.debug(\n f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not a {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either."\n )\n\n data_files = self.config.data_files\n splits = []\n for split_name, files in data_files.items():\n if isinstance(files, str):\n files = [files]\n files, archives = self._split_files_and_archives(files)\n downloaded_files = dl_manager.download(files)\n downloaded_dirs = dl_manager.download_and_extract(archives)\n if do_analyze: # drop_metadata is None or False, drop_labels is None or False\n logger.info(f"Searching for labels and/or metadata files in {split_name} data files...")\n analyze(files, downloaded_files, split_name)\n analyze(archives, downloaded_dirs, split_name)\n\n if metadata_files:\n # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False\n add_metadata = not self.config.drop_metadata\n # if `metadata_files` are found, don't add labels\n add_labels = False\n else:\n # if `metadata_files` are not found, don't add metadata\n add_metadata = False\n # if `metadata_files` are not found and `drop_labels` is None (default) -\n # add labels if files are on the same level in directory hierarchy and there is more than one label\n add_labels = (\n (len(labels) > 1 and len(path_depths) == 1)\n if self.config.drop_labels is None\n else not self.config.drop_labels\n )\n\n if add_labels:\n logger.info("Adding the labels inferred from data directories to the dataset's features...")\n if add_metadata:\n logger.info("Adding metadata to the dataset...")\n else:\n add_labels, add_metadata, metadata_files = False, False, {}\n\n splits.append(\n datasets.SplitGenerator(\n name=split_name,\n gen_kwargs={\n "files": tuple(zip(files, downloaded_files))\n + tuple((None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs),\n "metadata_files": metadata_files.get(split_name, []),\n "add_labels": add_labels,\n "add_metadata": add_metadata,\n },\n )\n )\n\n if add_metadata:\n # Verify that:\n # * all metadata files have the same set of features in each split\n # * the `file_name` key is one of the metadata keys and is of type string\n features_per_metadata_file: list[tuple[str, datasets.Features]] = []\n\n # Check that all metadata files share the same format\n metadata_ext = {\n os.path.splitext(original_metadata_file or downloaded_metadata_file)[-1]\n for original_metadata_file, downloaded_metadata_file in itertools.chain.from_iterable(\n metadata_files.values()\n )\n }\n if len(metadata_ext) > 1:\n raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}")\n metadata_ext = metadata_ext.pop()\n\n for split_metadata_files in metadata_files.values():\n pa_metadata_table = None\n for _, downloaded_metadata_file in split_metadata_files:\n for pa_metadata_table in self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext):\n break # just fetch the first rows\n if pa_metadata_table is not None:\n features_per_metadata_file.append(\n (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))\n )\n break # no need to fetch all the files\n for downloaded_metadata_file, metadata_features in features_per_metadata_file:\n if metadata_features != features_per_metadata_file[0][1]:\n raise ValueError(\n f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}"\n )\n metadata_features = features_per_metadata_file[0][1]\n feature_not_found = True\n\n def _set_feature(feature):\n nonlocal feature_not_found\n if isinstance(feature, dict):\n out = type(feature)()\n for key in feature:\n if (key == "file_name" or key.endswith("_file_name")) and feature[key] == datasets.Value(\n "string"\n ):\n key = key[: -len("_file_name")] or self.BASE_COLUMN_NAME\n out[key] = self.BASE_FEATURE()\n feature_not_found = False\n elif (key == "file_names" or key.endswith("_file_names")) and feature[\n key\n ] == datasets.Sequence(datasets.Value("string")):\n key = key[: -len("_file_names")] or (self.BASE_COLUMN_NAME + "s")\n out[key] = datasets.Sequence(self.BASE_FEATURE())\n feature_not_found = False\n elif (key == "file_names" or key.endswith("_file_names")) and feature[key] == [\n datasets.Value("string")\n ]:\n key = key[: -len("_file_names")] or (self.BASE_COLUMN_NAME + "s")\n out[key] = [self.BASE_FEATURE()]\n feature_not_found = False\n else:\n out[key] = feature[key]\n return out\n return feature\n\n metadata_features = _visit(metadata_features, _set_feature)\n\n if feature_not_found:\n raise ValueError(\n "`file_name` or `*_file_name` must be present as dictionary key (with type string) in metadata files"\n )\n else:\n metadata_features = None\n\n # Normally, we would do this in _info, but we need to know the labels and/or metadata\n # before building the features\n if self.config.features is None:\n if add_metadata:\n self.info.features = metadata_features\n elif add_labels:\n self.info.features = datasets.Features(\n {\n self.BASE_COLUMN_NAME: self.BASE_FEATURE(),\n "label": datasets.ClassLabel(names=sorted(labels)),\n }\n )\n else:\n self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()})\n\n return splits\n\n def _split_files_and_archives(self, data_files):\n files, archives = [], []\n for data_file in data_files:\n _, data_file_ext = os.path.splitext(data_file)\n if data_file_ext.lower() in self.EXTENSIONS:\n files.append(data_file)\n elif os.path.basename(data_file) in self.METADATA_FILENAMES:\n files.append(data_file)\n else:\n archives.append(data_file)\n return files, archives\n\n def _read_metadata(self, metadata_file: str, metadata_ext: str = "") -> Iterator[pa.Table]:\n """using the same logic as the Csv, Json and Parquet dataset builders to stream the data"""\n if self.config.filters is not None:\n filter_expr = (\n pq.filters_to_expression(self.config.filters)\n if isinstance(self.config.filters, list)\n else self.config.filters\n )\n else:\n filter_expr = None\n if metadata_ext == ".csv":\n chunksize = 10_000 # 10k lines\n schema = self.config.features.arrow_schema if self.config.features else None\n # dtype allows reading an int column as str\n dtype = (\n {\n name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object\n for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values())\n }\n if schema is not None\n else None\n )\n csv_file_reader = pd.read_csv(metadata_file, iterator=True, dtype=dtype, chunksize=chunksize)\n for df in csv_file_reader:\n pa_table = pa.Table.from_pandas(df)\n if self.config.filters is not None:\n pa_table = pa_table.filter(filter_expr)\n if len(pa_table) > 0:\n yield pa_table\n elif metadata_ext == ".jsonl":\n with open(metadata_file, "rb") as f:\n chunksize: int = 10 << 20 # 10MB\n # Use block_size equal to the chunk size divided by 32 to leverage multithreading\n # Set a default minimum value of 16kB if the chunk size is really small\n block_size = max(chunksize // 32, 16 << 10)\n while True:\n batch = f.read(chunksize)\n if not batch:\n break\n # Finish current line\n try:\n batch += f.readline()\n except (AttributeError, io.UnsupportedOperation):\n batch += readline(f)\n while True:\n try:\n pa_table = paj.read_json(\n io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size)\n )\n break\n except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:\n if (\n isinstance(e, pa.ArrowInvalid)\n and "straddling" not in str(e)\n or block_size > len(batch)\n ):\n raise\n else:\n # Increase the block size in case it was too small.\n # The block size will be reset for the next file.\n logger.debug(\n f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}."\n )\n block_size *= 2\n if self.config.filters is not None:\n pa_table = pa_table.filter(filter_expr)\n if len(pa_table) > 0:\n yield pa_table\n else:\n with open(metadata_file, "rb") as f:\n parquet_fragment = ds.ParquetFileFormat().make_fragment(f)\n if parquet_fragment.row_groups:\n batch_size = parquet_fragment.row_groups[0].num_rows\n else:\n batch_size = config.DEFAULT_MAX_BATCH_SIZE\n for record_batch in parquet_fragment.to_batches(\n batch_size=batch_size,\n filter=filter_expr,\n batch_readahead=0,\n fragment_readahead=0,\n ):\n yield pa.Table.from_batches([record_batch])\n\n def _generate_examples(self, files, metadata_files, add_metadata, add_labels):\n sample_idx = 0\n if add_metadata:\n feature_paths = []\n\n def find_feature_path(feature, feature_path):\n nonlocal feature_paths\n if feature_path and isinstance(feature, self.BASE_FEATURE):\n feature_paths.append(feature_path)\n\n _visit_with_path(self.info.features, find_feature_path)\n\n for original_metadata_file, downloaded_metadata_file in metadata_files:\n metadata_ext = os.path.splitext(original_metadata_file or downloaded_metadata_file)[-1]\n downloaded_metadata_dir = os.path.dirname(downloaded_metadata_file)\n\n def set_feature(item, feature_path: _VisitPath):\n if len(feature_path) == 2 and isinstance(feature_path[0], str) and feature_path[1] == 0:\n item[feature_path[0]] = item.pop("file_names", None) or item.pop(\n feature_path[0] + "_file_names", None\n )\n elif len(feature_path) == 1 and isinstance(feature_path[0], str):\n item[feature_path[0]] = item.pop("file_name", None) or item.pop(\n feature_path[0] + "_file_name", None\n )\n elif len(feature_path) == 0:\n file_relpath = os.path.normpath(item).replace("\\", "/")\n item = os.path.join(downloaded_metadata_dir, file_relpath)\n return item\n\n for pa_metadata_table in self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext):\n for sample in pa_metadata_table.to_pylist():\n for feature_path in feature_paths:\n _nested_apply(sample, feature_path, set_feature)\n yield sample_idx, sample\n sample_idx += 1\n else:\n if self.config.filters is not None:\n filter_expr = (\n pq.filters_to_expression(self.config.filters)\n if isinstance(self.config.filters, list)\n else self.config.filters\n )\n for original_file, downloaded_file_or_dir in files:\n downloaded_files = [downloaded_file_or_dir] if original_file else downloaded_file_or_dir\n for downloaded_file in downloaded_files:\n original_file_ext = os.path.splitext(original_file or downloaded_file)[-1]\n if original_file_ext.lower() not in self.EXTENSIONS:\n continue\n sample = {self.BASE_COLUMN_NAME: downloaded_file}\n if add_labels:\n sample["label"] = os.path.basename(os.path.dirname(original_file or downloaded_file))\n if self.config.filters is not None:\n pa_table = pa.Table.from_pylist([sample]).filter(filter_expr)\n if len(pa_table) == 0:\n continue\n yield sample_idx, sample\n sample_idx += 1\n\n\ndef _nested_apply(item: Any, feature_path: _VisitPath, func: Callable[[Any, _VisitPath], Any]):\n # see _visit_with_path() to see how feature paths are constructed\n item = func(item, feature_path)\n if feature_path:\n key = feature_path[0]\n if key == 0:\n for i in range(len(item)):\n item[i] = _nested_apply(item[i], feature_path[1:], func)\n else:\n item[key] = _nested_apply(item[key], feature_path[1:], func)\n return item\n
|
.venv\Lib\site-packages\datasets\packaged_modules\folder_based_builder\folder_based_builder.py
|
folder_based_builder.py
|
Python
| 21,074 | 0.95 | 0.251799 | 0.06117 |
react-lib
| 381 |
2024-06-12T01:04:45.185432
|
MIT
| false |
95ad29081dd9b1b0797b1f7fd3e804b8
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\folder_based_builder\__pycache__\folder_based_builder.cpython-313.pyc
|
folder_based_builder.cpython-313.pyc
|
Other
| 23,578 | 0.95 | 0.040936 | 0.006061 |
python-kit
| 785 |
2024-10-26T17:21:18.797471
|
GPL-3.0
| false |
b664bfeb5dc84d979a968093824722f9
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\folder_based_builder\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 221 | 0.7 | 0 | 0 |
node-utils
| 167 |
2024-03-05T06:29:49.159482
|
BSD-3-Clause
| false |
4943c802e4a9fbca606f8929fb3e7b3a
|
from dataclasses import dataclass\nfrom typing import Callable, Optional\n\nimport datasets\n\n\n@dataclass\nclass GeneratorConfig(datasets.BuilderConfig):\n generator: Optional[Callable] = None\n gen_kwargs: Optional[dict] = None\n features: Optional[datasets.Features] = None\n split: datasets.NamedSplit = datasets.Split.TRAIN\n\n def __post_init__(self):\n super().__post_init__()\n if self.generator is None:\n raise ValueError("generator must be specified")\n\n if self.gen_kwargs is None:\n self.gen_kwargs = {}\n\n\nclass Generator(datasets.GeneratorBasedBuilder):\n BUILDER_CONFIG_CLASS = GeneratorConfig\n\n def _info(self):\n return datasets.DatasetInfo(features=self.config.features)\n\n def _split_generators(self, dl_manager):\n return [datasets.SplitGenerator(name=self.config.split, gen_kwargs=self.config.gen_kwargs)]\n\n def _generate_examples(self, **gen_kwargs):\n yield from enumerate(self.config.generator(**gen_kwargs))\n
|
.venv\Lib\site-packages\datasets\packaged_modules\generator\generator.py
|
generator.py
|
Python
| 1,002 | 0.85 | 0.242424 | 0 |
awesome-app
| 705 |
2024-10-27T21:22:09.750971
|
Apache-2.0
| false |
0644154bf0af030335b3e8b3e8a5af44
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\generator\__pycache__\generator.cpython-313.pyc
|
generator.cpython-313.pyc
|
Other
| 2,633 | 0.8 | 0 | 0 |
vue-tools
| 185 |
2023-09-12T15:34:39.459024
|
GPL-3.0
| false |
03939137e246a3a8a0c6ad2c0a7df19c
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\generator\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 210 | 0.7 | 0 | 0 |
python-kit
| 61 |
2023-10-04T18:25:12.275736
|
Apache-2.0
| false |
e4eacfd558aab273c64591b5460f40c7
|
import datasets\n\nfrom ..folder_based_builder import folder_based_builder\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\nclass ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):\n """BuilderConfig for ImageFolder."""\n\n drop_labels: bool = None\n drop_metadata: bool = None\n\n def __post_init__(self):\n super().__post_init__()\n\n\nclass ImageFolder(folder_based_builder.FolderBasedBuilder):\n BASE_FEATURE = datasets.Image\n BASE_COLUMN_NAME = "image"\n BUILDER_CONFIG_CLASS = ImageFolderConfig\n EXTENSIONS: list[str] # definition at the bottom of the script\n\n\n# Obtained with:\n# ```\n# import PIL.Image\n# IMAGE_EXTENSIONS = []\n# PIL.Image.init()\n# for ext, format in PIL.Image.EXTENSION.items():\n# if format in PIL.Image.OPEN:\n# IMAGE_EXTENSIONS.append(ext[1:])\n# ```\n# We intentionally do not run this code on launch because:\n# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed\n# (2) To ensure the list of supported extensions is deterministic\nIMAGE_EXTENSIONS = [\n ".blp",\n ".bmp",\n ".dib",\n ".bufr",\n ".cur",\n ".pcx",\n ".dcx",\n ".dds",\n ".ps",\n ".eps",\n ".fit",\n ".fits",\n ".fli",\n ".flc",\n ".ftc",\n ".ftu",\n ".gbr",\n ".gif",\n ".grib",\n # ".h5", # may contain zero or several images\n # ".hdf", # may contain zero or several images\n ".png",\n ".apng",\n ".jp2",\n ".j2k",\n ".jpc",\n ".jpf",\n ".jpx",\n ".j2c",\n ".icns",\n ".ico",\n ".im",\n ".iim",\n ".tif",\n ".tiff",\n ".jfif",\n ".jpe",\n ".jpg",\n ".jpeg",\n ".mpg",\n ".mpeg",\n ".msp",\n ".pcd",\n ".pxr",\n ".pbm",\n ".pgm",\n ".ppm",\n ".pnm",\n ".psd",\n ".bw",\n ".rgb",\n ".rgba",\n ".sgi",\n ".ras",\n ".tga",\n ".icb",\n ".vda",\n ".vst",\n ".webp",\n ".wmf",\n ".emf",\n ".xbm",\n ".xpm",\n]\nImageFolder.EXTENSIONS = IMAGE_EXTENSIONS\n
|
.venv\Lib\site-packages\datasets\packaged_modules\imagefolder\imagefolder.py
|
imagefolder.py
|
Python
| 1,956 | 0.95 | 0.058252 | 0.152174 |
vue-tools
| 326 |
2023-08-11T03:38:33.397212
|
BSD-3-Clause
| false |
f1b59f8c31dc6f1e415f5563f5d6d206
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\imagefolder\__pycache__\imagefolder.cpython-313.pyc
|
imagefolder.cpython-313.pyc
|
Other
| 1,980 | 0.7 | 0.058824 | 0 |
react-lib
| 210 |
2025-03-11T04:51:49.983217
|
MIT
| false |
eaff5904696ad9dea38aa7b7a283d5be
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\imagefolder\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 212 | 0.7 | 0 | 0 |
node-utils
| 709 |
2023-08-24T20:59:09.431430
|
BSD-3-Clause
| false |
f6322916b7f0be5fbb3915def6207fbb
|
import io\nimport itertools\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.json as paj\n\nimport datasets\nimport datasets.config\nfrom datasets.table import table_cast\nfrom datasets.utils.file_utils import readline\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\ndef ujson_dumps(*args, **kwargs):\n try:\n return pd.io.json.ujson_dumps(*args, **kwargs)\n except AttributeError:\n # Before pandas-2.2.0, ujson_dumps was renamed to dumps: import ujson_dumps as dumps\n return pd.io.json.dumps(*args, **kwargs)\n\n\ndef ujson_loads(*args, **kwargs):\n try:\n return pd.io.json.ujson_loads(*args, **kwargs)\n except AttributeError:\n # Before pandas-2.2.0, ujson_loads was renamed to loads: import ujson_loads as loads\n return pd.io.json.loads(*args, **kwargs)\n\n\ndef pandas_read_json(path_or_buf, **kwargs):\n if datasets.config.PANDAS_VERSION.major >= 2:\n kwargs["dtype_backend"] = "pyarrow"\n return pd.read_json(path_or_buf, **kwargs)\n\n\n@dataclass\nclass JsonConfig(datasets.BuilderConfig):\n """BuilderConfig for JSON."""\n\n features: Optional[datasets.Features] = None\n encoding: str = "utf-8"\n encoding_errors: Optional[str] = None\n field: Optional[str] = None\n use_threads: bool = True # deprecated\n block_size: Optional[int] = None # deprecated\n chunksize: int = 10 << 20 # 10MB\n newlines_in_values: Optional[bool] = None\n\n def __post_init__(self):\n super().__post_init__()\n\n\nclass Json(datasets.ArrowBasedBuilder):\n BUILDER_CONFIG_CLASS = JsonConfig\n\n def _info(self):\n if self.config.block_size is not None:\n logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")\n self.config.chunksize = self.config.block_size\n if self.config.use_threads is not True:\n logger.warning(\n "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore."\n )\n if self.config.newlines_in_values is not None:\n raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")\n return datasets.DatasetInfo(features=self.config.features)\n\n def _split_generators(self, dl_manager):\n """We handle string, list and dicts in datafiles"""\n if not self.config.data_files:\n raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")\n dl_manager.download_config.extract_on_the_fly = True\n data_files = dl_manager.download_and_extract(self.config.data_files)\n splits = []\n for split_name, files in data_files.items():\n if isinstance(files, str):\n files = [files]\n files = [dl_manager.iter_files(file) for file in files]\n splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))\n return splits\n\n def _cast_table(self, pa_table: pa.Table) -> pa.Table:\n if self.config.features is not None:\n # adding missing columns\n for column_name in set(self.config.features) - set(pa_table.column_names):\n type = self.config.features.arrow_schema.field(column_name).type\n pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type))\n # more expensive cast to support nested structures with keys in a different order\n # allows str <-> int/float or str to Audio for example\n pa_table = table_cast(pa_table, self.config.features.arrow_schema)\n return pa_table\n\n def _generate_tables(self, files):\n for file_idx, file in enumerate(itertools.chain.from_iterable(files)):\n # If the file is one json object and if we need to look at the items in one specific field\n if self.config.field is not None:\n with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:\n dataset = ujson_loads(f.read())\n # We keep only the field we are interested in\n dataset = dataset[self.config.field]\n df = pandas_read_json(io.StringIO(ujson_dumps(dataset)))\n if df.columns.tolist() == [0]:\n df.columns = list(self.config.features) if self.config.features else ["text"]\n pa_table = pa.Table.from_pandas(df, preserve_index=False)\n yield file_idx, self._cast_table(pa_table)\n\n # If the file has one json object per line\n else:\n with open(file, "rb") as f:\n batch_idx = 0\n # Use block_size equal to the chunk size divided by 32 to leverage multithreading\n # Set a default minimum value of 16kB if the chunk size is really small\n block_size = max(self.config.chunksize // 32, 16 << 10)\n encoding_errors = (\n self.config.encoding_errors if self.config.encoding_errors is not None else "strict"\n )\n while True:\n batch = f.read(self.config.chunksize)\n if not batch:\n break\n # Finish current line\n try:\n batch += f.readline()\n except (AttributeError, io.UnsupportedOperation):\n batch += readline(f)\n # PyArrow only accepts utf-8 encoded bytes\n if self.config.encoding != "utf-8":\n batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8")\n try:\n while True:\n try:\n pa_table = paj.read_json(\n io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size)\n )\n break\n except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:\n if (\n isinstance(e, pa.ArrowInvalid)\n and "straddling" not in str(e)\n or block_size > len(batch)\n ):\n raise\n else:\n # Increase the block size in case it was too small.\n # The block size will be reset for the next file.\n logger.debug(\n f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}."\n )\n block_size *= 2\n except pa.ArrowInvalid as e:\n try:\n with open(\n file, encoding=self.config.encoding, errors=self.config.encoding_errors\n ) as f:\n df = pandas_read_json(f)\n except ValueError:\n logger.error(f"Failed to load JSON from file '{file}' with error {type(e)}: {e}")\n raise e\n if df.columns.tolist() == [0]:\n df.columns = list(self.config.features) if self.config.features else ["text"]\n try:\n pa_table = pa.Table.from_pandas(df, preserve_index=False)\n except pa.ArrowInvalid as e:\n logger.error(\n f"Failed to convert pandas DataFrame to Arrow Table from file '{file}' with error {type(e)}: {e}"\n )\n raise ValueError(\n f"Failed to convert pandas DataFrame to Arrow Table from file {file}."\n ) from None\n yield file_idx, self._cast_table(pa_table)\n break\n yield (file_idx, batch_idx), self._cast_table(pa_table)\n batch_idx += 1\n
|
.venv\Lib\site-packages\datasets\packaged_modules\json\json.py
|
json.py
|
Python
| 8,698 | 0.95 | 0.247191 | 0.089172 |
vue-tools
| 435 |
2024-08-26T13:34:27.091847
|
GPL-3.0
| false |
5c42bf9d23aa16f49ee23f32740d7f97
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\json\__pycache__\json.cpython-313.pyc
|
json.cpython-313.pyc
|
Other
| 11,452 | 0.8 | 0.009804 | 0 |
python-kit
| 592 |
2024-04-13T04:18:14.566457
|
MIT
| false |
cc0cfe6740281b02f64823647db2dce2
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\json\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 205 | 0.7 | 0 | 0 |
awesome-app
| 186 |
2024-09-18T01:50:15.102386
|
BSD-3-Clause
| false |
11e8a70b418447a8637bd2c1d9ce7ac4
|
import itertools\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nimport pandas as pd\nimport pyarrow as pa\n\nimport datasets\nfrom datasets.table import table_cast\n\n\n@dataclass\nclass PandasConfig(datasets.BuilderConfig):\n """BuilderConfig for Pandas."""\n\n features: Optional[datasets.Features] = None\n\n def __post_init__(self):\n super().__post_init__()\n\n\nclass Pandas(datasets.ArrowBasedBuilder):\n BUILDER_CONFIG_CLASS = PandasConfig\n\n def _info(self):\n warnings.warn(\n "The Pandas builder is deprecated and will be removed in the next major version of datasets.",\n FutureWarning,\n )\n return datasets.DatasetInfo(features=self.config.features)\n\n def _split_generators(self, dl_manager):\n """We handle string, list and dicts in datafiles"""\n if not self.config.data_files:\n raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")\n data_files = dl_manager.download_and_extract(self.config.data_files)\n if isinstance(data_files, (str, list, tuple)):\n files = data_files\n if isinstance(files, str):\n files = [files]\n # Use `dl_manager.iter_files` to skip hidden files in an extracted archive\n files = [dl_manager.iter_files(file) for file in files]\n return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]\n splits = []\n for split_name, files in data_files.items():\n if isinstance(files, str):\n files = [files]\n # Use `dl_manager.iter_files` to skip hidden files in an extracted archive\n files = [dl_manager.iter_files(file) for file in files]\n splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))\n return splits\n\n def _cast_table(self, pa_table: pa.Table) -> pa.Table:\n if self.config.features is not None:\n # more expensive cast to support nested features with keys in a different order\n # allows str <-> int/float or str to Audio for example\n pa_table = table_cast(pa_table, self.config.features.arrow_schema)\n return pa_table\n\n def _generate_tables(self, files):\n for i, file in enumerate(itertools.chain.from_iterable(files)):\n with open(file, "rb") as f:\n pa_table = pa.Table.from_pandas(pd.read_pickle(f))\n yield i, self._cast_table(pa_table)\n
|
.venv\Lib\site-packages\datasets\packaged_modules\pandas\pandas.py
|
pandas.py
|
Python
| 2,547 | 0.95 | 0.276923 | 0.075472 |
awesome-app
| 843 |
2023-12-30T21:12:28.242815
|
Apache-2.0
| false |
345571a0a0503332ce2fdf5e7c138ed0
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\pandas\__pycache__\pandas.cpython-313.pyc
|
pandas.cpython-313.pyc
|
Other
| 4,525 | 0.8 | 0.022727 | 0 |
awesome-app
| 31 |
2024-04-24T07:36:08.324948
|
BSD-3-Clause
| false |
7a627e3a88095b0d47ede0994684b019
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\pandas\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 207 | 0.7 | 0 | 0 |
awesome-app
| 887 |
2024-06-13T04:41:10.612443
|
BSD-3-Clause
| false |
5f1dfb9bea034d02a62f6b8bbd8c209c
|
import itertools\nfrom dataclasses import dataclass\nfrom typing import Optional, Union\n\nimport pyarrow as pa\nimport pyarrow.dataset as ds\nimport pyarrow.parquet as pq\n\nimport datasets\nfrom datasets.table import table_cast\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\n@dataclass\nclass ParquetConfig(datasets.BuilderConfig):\n """BuilderConfig for Parquet."""\n\n batch_size: Optional[int] = None\n columns: Optional[list[str]] = None\n features: Optional[datasets.Features] = None\n filters: Optional[Union[ds.Expression, list[tuple], list[list[tuple]]]] = None\n\n def __post_init__(self):\n super().__post_init__()\n\n\nclass Parquet(datasets.ArrowBasedBuilder):\n BUILDER_CONFIG_CLASS = ParquetConfig\n\n def _info(self):\n if (\n self.config.columns is not None\n and self.config.features is not None\n and set(self.config.columns) != set(self.config.features)\n ):\n raise ValueError(\n "The columns and features argument must contain the same columns, but got ",\n f"{self.config.columns} and {self.config.features}",\n )\n return datasets.DatasetInfo(features=self.config.features)\n\n def _split_generators(self, dl_manager):\n """We handle string, list and dicts in datafiles"""\n if not self.config.data_files:\n raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")\n dl_manager.download_config.extract_on_the_fly = True\n data_files = dl_manager.download_and_extract(self.config.data_files)\n splits = []\n for split_name, files in data_files.items():\n if isinstance(files, str):\n files = [files]\n # Use `dl_manager.iter_files` to skip hidden files in an extracted archive\n files = [dl_manager.iter_files(file) for file in files]\n # Infer features if they are stored in the arrow schema\n if self.info.features is None:\n for file in itertools.chain.from_iterable(files):\n with open(file, "rb") as f:\n self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))\n break\n splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))\n if self.config.columns is not None and set(self.config.columns) != set(self.info.features):\n self.info.features = datasets.Features(\n {col: feat for col, feat in self.info.features.items() if col in self.config.columns}\n )\n return splits\n\n def _cast_table(self, pa_table: pa.Table) -> pa.Table:\n if self.info.features is not None:\n # more expensive cast to support nested features with keys in a different order\n # allows str <-> int/float or str to Audio for example\n pa_table = table_cast(pa_table, self.info.features.arrow_schema)\n return pa_table\n\n def _generate_tables(self, files):\n if self.config.features is not None and self.config.columns is not None:\n if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):\n raise ValueError(\n f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"\n )\n filter_expr = (\n pq.filters_to_expression(self.config.filters)\n if isinstance(self.config.filters, list)\n else self.config.filters\n )\n for file_idx, file in enumerate(itertools.chain.from_iterable(files)):\n with open(file, "rb") as f:\n parquet_fragment = ds.ParquetFileFormat().make_fragment(f)\n if parquet_fragment.row_groups:\n batch_size = self.config.batch_size or parquet_fragment.row_groups[0].num_rows\n try:\n for batch_idx, record_batch in enumerate(\n parquet_fragment.to_batches(\n batch_size=batch_size,\n columns=self.config.columns,\n filter=filter_expr,\n batch_readahead=0,\n fragment_readahead=0,\n )\n ):\n pa_table = pa.Table.from_batches([record_batch])\n # Uncomment for debugging (will print the Arrow table size and elements)\n # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")\n # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)\n except ValueError as e:\n logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")\n raise\n
|
.venv\Lib\site-packages\datasets\packaged_modules\parquet\parquet.py
|
parquet.py
|
Python
| 5,099 | 0.95 | 0.284404 | 0.073684 |
python-kit
| 327 |
2025-04-25T02:36:22.363419
|
GPL-3.0
| false |
69ed585d3c9e65c5b1a77dfb29008316
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\parquet\__pycache__\parquet.cpython-313.pyc
|
parquet.cpython-313.pyc
|
Other
| 8,302 | 0.8 | 0.014085 | 0 |
vue-tools
| 916 |
2024-05-06T19:17:09.717338
|
MIT
| false |
a018a1f34d7bd698340efae0f26a042d
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\parquet\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 208 | 0.7 | 0 | 0 |
python-kit
| 772 |
2024-08-04T17:41:17.241620
|
BSD-3-Clause
| false |
5269598bec056b17a3d7cd3b3e0b10b6
|
import datasets\n\nfrom ..folder_based_builder import folder_based_builder\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\nclass PdfFolderConfig(folder_based_builder.FolderBasedBuilderConfig):\n """BuilderConfig for ImageFolder."""\n\n drop_labels: bool = None\n drop_metadata: bool = None\n\n def __post_init__(self):\n super().__post_init__()\n\n\nclass PdfFolder(folder_based_builder.FolderBasedBuilder):\n BASE_FEATURE = datasets.Pdf\n BASE_COLUMN_NAME = "pdf"\n BUILDER_CONFIG_CLASS = PdfFolderConfig\n EXTENSIONS: list[str] = [".pdf"]\n
|
.venv\Lib\site-packages\datasets\packaged_modules\pdffolder\pdffolder.py
|
pdffolder.py
|
Python
| 565 | 0.85 | 0.173913 | 0 |
react-lib
| 216 |
2023-07-17T02:57:12.164175
|
MIT
| false |
7bd77453d73a673d4aeb84e6deab2e73
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\pdffolder\__pycache__\pdffolder.cpython-313.pyc
|
pdffolder.cpython-313.pyc
|
Other
| 1,548 | 0.8 | 0.066667 | 0 |
node-utils
| 967 |
2025-02-11T06:29:36.339633
|
BSD-3-Clause
| false |
66b32d61f57ddefb565e69762011dbfb
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\pdffolder\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 210 | 0.7 | 0 | 0 |
awesome-app
| 427 |
2023-12-13T17:29:42.257349
|
MIT
| false |
ae31ca369e17fbf79f961c926bedf8fa
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\spark\__pycache__\spark.cpython-313.pyc
|
spark.cpython-313.pyc
|
Other
| 19,884 | 0.95 | 0.006329 | 0.012987 |
react-lib
| 482 |
2024-09-26T19:33:03.106642
|
BSD-3-Clause
| false |
33981bf54ea5390f4ddfb854e1bbec00
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\spark\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 206 | 0.7 | 0 | 0 |
vue-tools
| 937 |
2023-09-06T04:42:19.784578
|
GPL-3.0
| false |
26a31173b8384d10036e523eb39178e8
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\sql\__pycache__\sql.cpython-313.pyc
|
sql.cpython-313.pyc
|
Other
| 7,185 | 0.95 | 0.034483 | 0 |
node-utils
| 113 |
2024-04-21T03:33:25.953820
|
GPL-3.0
| false |
533db2cae71cc59c5e56e66c27f61448
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\sql\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 204 | 0.7 | 0 | 0 |
python-kit
| 739 |
2025-06-13T13:45:34.007146
|
Apache-2.0
| false |
18ab9128e04e2c5b85891d687b242d6e
|
import itertools\nfrom dataclasses import dataclass\nfrom io import StringIO\nfrom typing import Optional\n\nimport pyarrow as pa\n\nimport datasets\nfrom datasets.features.features import require_storage_cast\nfrom datasets.table import table_cast\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\n@dataclass\nclass TextConfig(datasets.BuilderConfig):\n """BuilderConfig for text files."""\n\n features: Optional[datasets.Features] = None\n encoding: str = "utf-8"\n encoding_errors: Optional[str] = None\n chunksize: int = 10 << 20 # 10MB\n keep_linebreaks: bool = False\n sample_by: str = "line"\n\n\nclass Text(datasets.ArrowBasedBuilder):\n BUILDER_CONFIG_CLASS = TextConfig\n\n def _info(self):\n return datasets.DatasetInfo(features=self.config.features)\n\n def _split_generators(self, dl_manager):\n """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].\n\n If str or List[str], then the dataset returns only the 'train' split.\n If dict, then keys should be from the `datasets.Split` enum.\n """\n if not self.config.data_files:\n raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")\n dl_manager.download_config.extract_on_the_fly = True\n data_files = dl_manager.download_and_extract(self.config.data_files)\n splits = []\n for split_name, files in data_files.items():\n if isinstance(files, str):\n files = [files]\n files = [dl_manager.iter_files(file) for file in files]\n splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))\n return splits\n\n def _cast_table(self, pa_table: pa.Table) -> pa.Table:\n if self.config.features is not None:\n schema = self.config.features.arrow_schema\n if all(not require_storage_cast(feature) for feature in self.config.features.values()):\n # cheaper cast\n pa_table = pa_table.cast(schema)\n else:\n # more expensive cast; allows str <-> int/float or str to Audio for example\n pa_table = table_cast(pa_table, schema)\n return pa_table\n else:\n return pa_table.cast(pa.schema({"text": pa.string()}))\n\n def _generate_tables(self, files):\n pa_table_names = list(self.config.features) if self.config.features is not None else ["text"]\n for file_idx, file in enumerate(itertools.chain.from_iterable(files)):\n # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"\n with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:\n if self.config.sample_by == "line":\n batch_idx = 0\n while True:\n batch = f.read(self.config.chunksize)\n if not batch:\n break\n batch += f.readline() # finish current line\n # StringIO.readlines, by default splits only on "\n" (and keeps line breaks)\n batch = StringIO(batch).readlines()\n if not self.config.keep_linebreaks:\n batch = [line.rstrip("\n") for line in batch]\n pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names)\n # Uncomment for debugging (will print the Arrow table size and elements)\n # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")\n # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n yield (file_idx, batch_idx), self._cast_table(pa_table)\n batch_idx += 1\n elif self.config.sample_by == "paragraph":\n batch_idx = 0\n batch = ""\n while True:\n new_batch = f.read(self.config.chunksize)\n if not new_batch:\n break\n batch += new_batch\n batch += f.readline() # finish current line\n batch = batch.split("\n\n")\n pa_table = pa.Table.from_arrays(\n [pa.array([example for example in batch[:-1] if example])], names=pa_table_names\n )\n # Uncomment for debugging (will print the Arrow table size and elements)\n # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")\n # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n yield (file_idx, batch_idx), self._cast_table(pa_table)\n batch_idx += 1\n batch = batch[-1]\n if batch:\n pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names)\n yield (file_idx, batch_idx), self._cast_table(pa_table)\n elif self.config.sample_by == "document":\n text = f.read()\n pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names)\n yield file_idx, self._cast_table(pa_table)\n
|
.venv\Lib\site-packages\datasets\packaged_modules\text\text.py
|
text.py
|
Python
| 5,516 | 0.95 | 0.276786 | 0.102041 |
vue-tools
| 104 |
2024-06-07T07:02:52.986081
|
Apache-2.0
| false |
5c781bcc543d85a33dbdb89e5f750ec6
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\text\__pycache__\text.cpython-313.pyc
|
text.cpython-313.pyc
|
Other
| 7,358 | 0.95 | 0.014493 | 0 |
vue-tools
| 922 |
2024-02-16T22:11:24.073433
|
BSD-3-Clause
| false |
83187175e53bd1d3841f6c9f494a3bf6
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\text\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 205 | 0.7 | 0 | 0 |
python-kit
| 739 |
2025-05-24T16:59:20.696452
|
MIT
| false |
96958b01773353fec618603ca614f5c0
|
import datasets\n\nfrom ..folder_based_builder import folder_based_builder\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\nclass VideoFolderConfig(folder_based_builder.FolderBasedBuilderConfig):\n """BuilderConfig for ImageFolder."""\n\n drop_labels: bool = None\n drop_metadata: bool = None\n\n def __post_init__(self):\n super().__post_init__()\n\n\nclass VideoFolder(folder_based_builder.FolderBasedBuilder):\n BASE_FEATURE = datasets.Video\n BASE_COLUMN_NAME = "video"\n BUILDER_CONFIG_CLASS = VideoFolderConfig\n EXTENSIONS: list[str] # definition at the bottom of the script\n\n\n# TODO: initial list, we should check the compatibility of other formats\nVIDEO_EXTENSIONS = [\n ".mkv",\n ".mp4",\n ".avi",\n ".mpeg",\n ".mov",\n]\nVideoFolder.EXTENSIONS = VIDEO_EXTENSIONS\n
|
.venv\Lib\site-packages\datasets\packaged_modules\videofolder\videofolder.py
|
videofolder.py
|
Python
| 807 | 0.95 | 0.117647 | 0.043478 |
node-utils
| 58 |
2025-06-03T07:14:45.395028
|
Apache-2.0
| false |
4a2e579d474aa20341e0e980ca9f200b
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\videofolder\__pycache__\videofolder.cpython-313.pyc
|
videofolder.cpython-313.pyc
|
Other
| 1,635 | 0.7 | 0.058824 | 0 |
awesome-app
| 968 |
2024-07-20T15:52:09.418549
|
MIT
| false |
ca3a63e8b8d0e8644c69bbd77da1d128
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\videofolder\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 212 | 0.7 | 0 | 0 |
awesome-app
| 204 |
2024-04-12T20:55:16.577689
|
MIT
| false |
3d80c2cc0684014466e51bb6f6abd00c
|
#\n# Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved.\n# This file coems from the WebDataset library.\n# See the LICENSE file for licensing terms (BSD-style).\n#\n\n"""\nBinary tensor encodings for PyTorch and NumPy.\n\nThis defines efficient binary encodings for tensors. The format is 8 byte\naligned and can be used directly for computations when transmitted, say,\nvia RDMA. The format is supported by WebDataset with the `.ten` filename\nextension. It is also used by Tensorcom, Tensorcom RDMA, and can be used\nfor fast tensor storage with LMDB and in disk files (which can be memory\nmapped)\n\nData is encoded as a series of chunks:\n\n- magic number (int64)\n- length in bytes (int64)\n- bytes (multiple of 64 bytes long)\n\nArrays are a header chunk followed by a data chunk.\nHeader chunks have the following structure:\n\n- dtype (int64)\n- 8 byte array name\n- ndim (int64)\n- dim[0]\n- dim[1]\n- ...\n"""\n\nimport struct\nimport sys\n\nimport numpy as np\n\n\ndef bytelen(a):\n """Determine the length of a in bytes."""\n if hasattr(a, "nbytes"):\n return a.nbytes\n elif isinstance(a, (bytearray, bytes)):\n return len(a)\n else:\n raise ValueError(a, "cannot determine nbytes")\n\n\ndef bytedata(a):\n """Return a the raw data corresponding to a."""\n if isinstance(a, (bytearray, bytes, memoryview)):\n return a\n elif hasattr(a, "data"):\n return a.data\n else:\n raise ValueError(a, "cannot return bytedata")\n\n\n# tables for converting between long/short NumPy dtypes\n\nlong_to_short = """\nfloat16 f2\nfloat32 f4\nfloat64 f8\nint8 i1\nint16 i2\nint32 i4\nint64 i8\nuint8 u1\nuint16 u2\nunit32 u4\nuint64 u8\n""".strip()\nlong_to_short = [x.split() for x in long_to_short.split("\n")]\nlong_to_short = {x[0]: x[1] for x in long_to_short}\nshort_to_long = {v: k for k, v in long_to_short.items()}\n\n\ndef check_acceptable_input_type(data, allow64):\n """Check that the data has an acceptable type for tensor encoding.\n\n :param data: array\n :param allow64: allow 64 bit types\n """\n for a in data:\n if a.dtype.name not in long_to_short:\n raise ValueError("unsupported dataypte")\n if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]:\n raise ValueError("64 bit datatypes not allowed unless explicitly enabled")\n\n\ndef str64(s):\n """Convert a string to an int64."""\n s = s + "\0" * (8 - len(s))\n s = s.encode("ascii")\n return struct.unpack("@q", s)[0]\n\n\ndef unstr64(i):\n """Convert an int64 to a string."""\n b = struct.pack("@q", i)\n return b.decode("ascii").strip("\0")\n\n\ndef check_infos(data, infos, required_infos=None):\n """Verify the info strings."""\n if required_infos is False or required_infos is None:\n return data\n if required_infos is True:\n return data, infos\n if not isinstance(required_infos, (tuple, list)):\n raise ValueError("required_infos must be tuple or list")\n for required, actual in zip(required_infos, infos):\n raise ValueError(f"actual info {actual} doesn't match required info {required}")\n return data\n\n\ndef encode_header(a, info=""):\n """Encode an array header as a byte array."""\n if a.ndim >= 10:\n raise ValueError("too many dimensions")\n if a.nbytes != np.prod(a.shape) * a.itemsize:\n raise ValueError("mismatch between size and shape")\n if a.dtype.name not in long_to_short:\n raise ValueError("unsupported array type")\n header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape)\n return bytedata(np.array(header, dtype="i8"))\n\n\ndef decode_header(h):\n """Decode a byte array into an array header."""\n h = np.frombuffer(h, dtype="i8")\n if unstr64(h[0]) not in short_to_long:\n raise ValueError("unsupported array type")\n dtype = np.dtype(short_to_long[unstr64(h[0])])\n info = unstr64(h[1])\n rank = int(h[2])\n shape = tuple(h[3 : 3 + rank])\n return shape, dtype, info\n\n\ndef encode_list(l, infos=None): # noqa: E741\n """Given a list of arrays, encode them into a list of byte arrays."""\n if infos is None:\n infos = [""]\n else:\n if len(l) != len(infos):\n raise ValueError(f"length of list {l} must muatch length of infos {infos}")\n result = []\n for i, a in enumerate(l):\n header = encode_header(a, infos[i % len(infos)])\n result += [header, bytedata(a)]\n return result\n\n\ndef decode_list(l, infos=False): # noqa: E741\n """Given a list of byte arrays, decode them into arrays."""\n result = []\n infos0 = []\n for header, data in zip(l[::2], l[1::2]):\n shape, dtype, info = decode_header(header)\n a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape)\n result += [a]\n infos0 += [info]\n return check_infos(result, infos0, infos)\n\n\nmagic_str = "~TenBin~"\nmagic = str64(magic_str)\nmagic_bytes = unstr64(magic).encode("ascii")\n\n\ndef roundup(n, k=64):\n """Round up to the next multiple of 64."""\n return k * ((n + k - 1) // k)\n\n\ndef encode_chunks(l): # noqa: E741\n """Encode a list of chunks into a single byte array, with lengths and magics.."""\n size = sum(16 + roundup(b.nbytes) for b in l)\n result = bytearray(size)\n offset = 0\n for b in l:\n result[offset : offset + 8] = magic_bytes\n offset += 8\n result[offset : offset + 8] = struct.pack("@q", b.nbytes)\n offset += 8\n result[offset : offset + bytelen(b)] = b\n offset += roundup(bytelen(b))\n return result\n\n\ndef decode_chunks(buf):\n """Decode a byte array into a list of chunks."""\n result = []\n offset = 0\n total = bytelen(buf)\n while offset < total:\n if magic_bytes != buf[offset : offset + 8]:\n raise ValueError("magic bytes mismatch")\n offset += 8\n nbytes = struct.unpack("@q", buf[offset : offset + 8])[0]\n offset += 8\n b = buf[offset : offset + nbytes]\n offset += roundup(nbytes)\n result.append(b)\n return result\n\n\ndef encode_buffer(l, infos=None): # noqa: E741\n """Encode a list of arrays into a single byte array."""\n if not isinstance(l, list):\n raise ValueError("requires list")\n return encode_chunks(encode_list(l, infos=infos))\n\n\ndef decode_buffer(buf, infos=False):\n """Decode a byte array into a list of arrays."""\n return decode_list(decode_chunks(buf), infos=infos)\n\n\ndef write_chunk(stream, buf):\n """Write a byte chunk to the stream with magics, length, and padding."""\n nbytes = bytelen(buf)\n stream.write(magic_bytes)\n stream.write(struct.pack("@q", nbytes))\n stream.write(bytedata(buf))\n padding = roundup(nbytes) - nbytes\n if padding > 0:\n stream.write(b"\0" * padding)\n\n\ndef read_chunk(stream):\n """Read a byte chunk from a stream with magics, length, and padding."""\n magic = stream.read(8)\n if magic == b"":\n return None\n if magic != magic_bytes:\n raise ValueError("magic number does not match")\n nbytes = stream.read(8)\n nbytes = struct.unpack("@q", nbytes)[0]\n if nbytes < 0:\n raise ValueError("negative nbytes")\n data = stream.read(nbytes)\n padding = roundup(nbytes) - nbytes\n if padding > 0:\n stream.read(padding)\n return data\n\n\ndef write(stream, l, infos=None): # noqa: E741\n """Write a list of arrays to a stream, with magics, length, and padding."""\n for chunk in encode_list(l, infos=infos):\n write_chunk(stream, chunk)\n\n\ndef read(stream, n=sys.maxsize, infos=False):\n """Read a list of arrays from a stream, with magics, length, and padding."""\n chunks = []\n for _ in range(n):\n header = read_chunk(stream)\n if header is None:\n break\n data = read_chunk(stream)\n if data is None:\n raise ValueError("premature EOF")\n chunks += [header, data]\n return decode_list(chunks, infos=infos)\n\n\ndef save(fname, *args, infos=None, nocheck=False):\n """Save a list of arrays to a file, with magics, length, and padding."""\n if not nocheck and not fname.endswith(".ten"):\n raise ValueError("file name should end in .ten")\n with open(fname, "wb") as stream:\n write(stream, args, infos=infos)\n\n\ndef load(fname, infos=False, nocheck=False):\n """Read a list of arrays from a file, with magics, length, and padding."""\n if not nocheck and not fname.endswith(".ten"):\n raise ValueError("file name should end in .ten")\n with open(fname, "rb") as stream:\n return read(stream, infos=infos)\n
|
.venv\Lib\site-packages\datasets\packaged_modules\webdataset\_tenbin.py
|
_tenbin.py
|
Python
| 8,533 | 0.95 | 0.224561 | 0.026201 |
react-lib
| 68 |
2023-09-29T12:19:07.357006
|
MIT
| false |
006e4ac40af155daee1a4dbbbbe2b81e
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\webdataset\__pycache__\webdataset.cpython-313.pyc
|
webdataset.cpython-313.pyc
|
Other
| 11,627 | 0.8 | 0 | 0 |
vue-tools
| 920 |
2024-03-15T20:06:28.826492
|
BSD-3-Clause
| false |
5c9183840afc31bebd9a723311ba9f2f
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\webdataset\__pycache__\_tenbin.cpython-313.pyc
|
_tenbin.cpython-313.pyc
|
Other
| 13,172 | 0.95 | 0.027322 | 0 |
react-lib
| 99 |
2024-06-24T04:22:29.521881
|
GPL-3.0
| false |
13d991a24f9b36f545c0caab3453c093
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\webdataset\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 211 | 0.7 | 0 | 0 |
vue-tools
| 144 |
2025-07-01T19:39:39.100773
|
GPL-3.0
| false |
bcea44370459ffe3bfc80191f0311bfa
|
import itertools\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nimport pyarrow as pa\n\nimport datasets\nfrom datasets.features.features import require_storage_cast\nfrom datasets.table import table_cast\n\n\nlogger = datasets.utils.logging.get_logger(__name__)\n\n\n@dataclass\nclass XmlConfig(datasets.BuilderConfig):\n """BuilderConfig for xml files."""\n\n features: Optional[datasets.Features] = None\n encoding: str = "utf-8"\n encoding_errors: Optional[str] = None\n\n\nclass Xml(datasets.ArrowBasedBuilder):\n BUILDER_CONFIG_CLASS = XmlConfig\n\n def _info(self):\n return datasets.DatasetInfo(features=self.config.features)\n\n def _split_generators(self, dl_manager):\n """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].\n\n If str or List[str], then the dataset returns only the 'train' split.\n If dict, then keys should be from the `datasets.Split` enum.\n """\n if not self.config.data_files:\n raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")\n dl_manager.download_config.extract_on_the_fly = True\n data_files = dl_manager.download_and_extract(self.config.data_files)\n splits = []\n for split_name, files in data_files.items():\n if isinstance(files, str):\n files = [files]\n files = [dl_manager.iter_files(file) for file in files]\n splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))\n return splits\n\n def _cast_table(self, pa_table: pa.Table) -> pa.Table:\n if self.config.features is not None:\n schema = self.config.features.arrow_schema\n if all(not require_storage_cast(feature) for feature in self.config.features.values()):\n # cheaper cast\n pa_table = pa_table.cast(schema)\n else:\n # more expensive cast; allows str <-> int/float or str to Audio for example\n pa_table = table_cast(pa_table, schema)\n return pa_table\n else:\n return pa_table.cast(pa.schema({"xml": pa.string()}))\n\n def _generate_tables(self, files):\n pa_table_names = list(self.config.features) if self.config.features is not None else ["xml"]\n for file_idx, file in enumerate(itertools.chain.from_iterable(files)):\n # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"\n with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:\n xml = f.read()\n pa_table = pa.Table.from_arrays([pa.array([xml])], names=pa_table_names)\n yield file_idx, self._cast_table(pa_table)\n
|
.venv\Lib\site-packages\datasets\packaged_modules\xml\xml.py
|
xml.py
|
Python
| 2,822 | 0.95 | 0.25 | 0.055556 |
react-lib
| 158 |
2025-02-24T11:44:06.056702
|
GPL-3.0
| false |
09643a3f1b6eb5a69204c97d24a63e42
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\xml\__pycache__\xml.cpython-313.pyc
|
xml.cpython-313.pyc
|
Other
| 5,227 | 0.95 | 0.02381 | 0 |
python-kit
| 832 |
2025-02-14T01:06:28.828310
|
BSD-3-Clause
| false |
d3e3e1ccfefd04c9947cfb691db5fd19
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\xml\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 204 | 0.7 | 0 | 0 |
react-lib
| 561 |
2023-07-18T20:49:51.406531
|
MIT
| false |
802426265f502ffaaaf06003d52d0b84
|
\n\n
|
.venv\Lib\site-packages\datasets\packaged_modules\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 6,903 | 0.8 | 0 | 0 |
react-lib
| 791 |
2024-01-14T08:01:05.709170
|
MIT
| false |
22196f3ba818cd576ec5340f19bf7ebc
|
import contextlib\nfrom multiprocessing import Pool, RLock\n\nfrom tqdm.auto import tqdm\n\nfrom ..utils import experimental, logging\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass ParallelBackendConfig:\n backend_name = None\n\n\n@experimental\ndef parallel_map(function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func):\n """\n **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either\n multiprocessing.Pool or joblib for parallelization.\n\n Args:\n function (`Callable[[Any], Any]`): Function to be applied to `iterable`.\n iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to.\n num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib).\n types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements.\n disable_tqdm (`bool`): Whether to disable the tqdm progressbar.\n desc (`str`): Prefix for the tqdm progressbar.\n single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`.\n Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an\n element of `iterable`, and `rank` is used for progress bar.\n """\n if ParallelBackendConfig.backend_name is None:\n return _map_with_multiprocessing_pool(\n function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func\n )\n\n return _map_with_joblib(\n function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func\n )\n\n\ndef _map_with_multiprocessing_pool(\n function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func\n):\n num_proc = num_proc if num_proc <= len(iterable) else len(iterable)\n split_kwds = [] # We organize the splits ourselve (contiguous splits)\n for index in range(num_proc):\n div = len(iterable) // num_proc\n mod = len(iterable) % num_proc\n start = div * index + min(index, mod)\n end = start + div + (1 if index < mod else 0)\n split_kwds.append((function, iterable[start:end], batched, batch_size, types, index, disable_tqdm, desc))\n\n if len(iterable) != sum(len(i[1]) for i in split_kwds):\n raise ValueError(\n f"Error dividing inputs iterable among processes. "\n f"Total number of objects {len(iterable)}, "\n f"length: {sum(len(i[1]) for i in split_kwds)}"\n )\n\n logger.info(\n f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}"\n )\n initargs, initializer = None, None\n if not disable_tqdm:\n initargs, initializer = (RLock(),), tqdm.set_lock\n with Pool(num_proc, initargs=initargs, initializer=initializer) as pool:\n mapped = pool.map(single_map_nested_func, split_kwds)\n logger.info(f"Finished {num_proc} processes")\n mapped = [obj for proc_res in mapped for obj in proc_res]\n logger.info(f"Unpacked {len(mapped)} objects")\n\n return mapped\n\n\ndef _map_with_joblib(\n function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func\n):\n # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,\n # and it requires monkey-patching joblib internal classes which is subject to change\n import joblib\n\n with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc):\n return joblib.Parallel()(\n joblib.delayed(single_map_nested_func)((function, obj, batched, batch_size, types, None, True, None))\n for obj in iterable\n )\n\n\n@experimental\n@contextlib.contextmanager\ndef parallel_backend(backend_name: str):\n """\n **Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization\n implemented by joblib.\n\n Args:\n backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib.\n\n Example usage:\n ```py\n with parallel_backend('spark'):\n dataset = load_dataset(..., num_proc=2)\n ```\n """\n ParallelBackendConfig.backend_name = backend_name\n\n if backend_name == "spark":\n from joblibspark import register_spark\n\n register_spark()\n\n # TODO: call create_cache_and_write_probe if "download" in steps\n # TODO: raise NotImplementedError when Dataset.map etc is called\n\n try:\n yield\n finally:\n ParallelBackendConfig.backend_name = None\n
|
.venv\Lib\site-packages\datasets\parallel\parallel.py
|
parallel.py
|
Python
| 4,738 | 0.95 | 0.35 | 0.06383 |
node-utils
| 734 |
2024-09-18T21:53:06.819111
|
BSD-3-Clause
| false |
2de8b05effdcf0ddfaee42f43d2470d4
|
from .parallel import ParallelBackendConfig, parallel_backend, parallel_map\n
|
.venv\Lib\site-packages\datasets\parallel\__init__.py
|
__init__.py
|
Python
| 76 | 0.65 | 0 | 0 |
awesome-app
| 363 |
2025-02-04T16:28:38.221658
|
MIT
| false |
b6d27f543b320ae34ce5347c6fa2251e
|
\n\n
|
.venv\Lib\site-packages\datasets\parallel\__pycache__\parallel.cpython-313.pyc
|
parallel.cpython-313.pyc
|
Other
| 6,147 | 0.95 | 0.172414 | 0.024096 |
python-kit
| 242 |
2025-06-16T13:44:45.298184
|
GPL-3.0
| false |
6a19d401092a1869f920da2817436867
|
\n\n
|
.venv\Lib\site-packages\datasets\parallel\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 302 | 0.7 | 0 | 0 |
node-utils
| 237 |
2024-09-08T13:22:11.675385
|
MIT
| false |
8e67313ed48d25f63137ce2a71415852
|
from typing import Callable\n\n\ndef is_documented_by(function_with_docstring: Callable):\n """Decorator to share docstrings across common functions.\n\n Args:\n function_with_docstring (`Callable`): Name of the function with the docstring.\n """\n\n def wrapper(target_function):\n target_function.__doc__ = function_with_docstring.__doc__\n return target_function\n\n return wrapper\n
|
.venv\Lib\site-packages\datasets\utils\doc_utils.py
|
doc_utils.py
|
Python
| 407 | 0.85 | 0.2 | 0 |
python-kit
| 410 |
2024-05-08T04:45:50.797843
|
Apache-2.0
| false |
f5c75494ca79fb7da3f8a8b3cb16de7a
|
"""Contains utilities to flag a feature as "experimental" in datasets."""\n\nimport warnings\nfrom functools import wraps\nfrom typing import Callable\n\n\ndef experimental(fn: Callable) -> Callable:\n """Decorator to flag a feature as experimental.\n\n An experimental feature trigger a warning when used as it might be subject to breaking changes in the future.\n\n Args:\n fn (`Callable`):\n The function to flag as experimental.\n\n Returns:\n `Callable`: The decorated function.\n\n Example:\n\n ```python\n >>> from datasets.utils import experimental\n\n >>> @experimental\n ... def my_function():\n ... print("Hello world!")\n\n >>> my_function()\n UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future.\n Hello world!\n ```\n """\n\n @wraps(fn)\n def _inner_fn(*args, **kwargs):\n warnings.warn(\n (f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future."),\n UserWarning,\n )\n return fn(*args, **kwargs)\n\n return _inner_fn\n
|
.venv\Lib\site-packages\datasets\utils\experimental.py
|
experimental.py
|
Python
| 1,097 | 0.85 | 0.116279 | 0 |
awesome-app
| 619 |
2024-04-17T12:39:53.161343
|
GPL-3.0
| false |
b7bdb6ec736944c75c87e8bbb4443d39
|
import bz2\nimport gzip\nimport lzma\nimport os\nimport shutil\nimport struct\nimport tarfile\nimport warnings\nimport zipfile\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nfrom .. import config\nfrom ._filelock import FileLock\nfrom .logging import get_logger\n\n\nlogger = get_logger(__name__)\n\n\nclass ExtractManager:\n def __init__(self, cache_dir: Optional[str] = None):\n self.extract_dir = (\n os.path.join(cache_dir, config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH\n )\n self.extractor = Extractor\n\n def _get_output_path(self, path: str) -> str:\n from .file_utils import hash_url_to_filename\n\n # Path where we extract compressed archives\n # We extract in the cache dir, and get the extracted path name by hashing the original path"\n abs_path = os.path.abspath(path)\n return os.path.join(self.extract_dir, hash_url_to_filename(abs_path))\n\n def _do_extract(self, output_path: str, force_extract: bool) -> bool:\n return force_extract or (\n not os.path.isfile(output_path) and not (os.path.isdir(output_path) and os.listdir(output_path))\n )\n\n def extract(self, input_path: str, force_extract: bool = False) -> str:\n extractor_format = self.extractor.infer_extractor_format(input_path)\n if not extractor_format:\n return input_path\n output_path = self._get_output_path(input_path)\n if self._do_extract(output_path, force_extract):\n self.extractor.extract(input_path, output_path, extractor_format)\n return output_path\n\n\nclass BaseExtractor(ABC):\n @classmethod\n @abstractmethod\n def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: ...\n\n @staticmethod\n @abstractmethod\n def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: ...\n\n\nclass MagicNumberBaseExtractor(BaseExtractor, ABC):\n magic_numbers: list[bytes] = []\n\n @staticmethod\n def read_magic_number(path: Union[Path, str], magic_number_length: int):\n with open(path, "rb") as f:\n return f.read(magic_number_length)\n\n @classmethod\n def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:\n if not magic_number:\n magic_number_length = max(len(cls_magic_number) for cls_magic_number in cls.magic_numbers)\n try:\n magic_number = cls.read_magic_number(path, magic_number_length)\n except OSError:\n return False\n return any(magic_number.startswith(cls_magic_number) for cls_magic_number in cls.magic_numbers)\n\n\nclass TarExtractor(BaseExtractor):\n @classmethod\n def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool:\n return tarfile.is_tarfile(path)\n\n @staticmethod\n def safemembers(members, output_path):\n """\n Fix for CVE-2007-4559\n Desc:\n Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile\n module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot)\n sequence in filenames in a TAR archive, a related issue to CVE-2001-1267.\n See: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-4559\n From: https://stackoverflow.com/a/10077309\n """\n\n def resolved(path: str) -> str:\n return os.path.realpath(os.path.abspath(path))\n\n def badpath(path: str, base: str) -> bool:\n # joinpath will ignore base if path is absolute\n return not resolved(os.path.join(base, path)).startswith(base)\n\n def badlink(info, base: str) -> bool:\n # Links are interpreted relative to the directory containing the link\n tip = resolved(os.path.join(base, os.path.dirname(info.name)))\n return badpath(info.linkname, base=tip)\n\n base = resolved(output_path)\n\n for finfo in members:\n if badpath(finfo.name, base):\n logger.error(f"Extraction of {finfo.name} is blocked (illegal path)")\n elif finfo.issym() and badlink(finfo, base):\n logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}")\n elif finfo.islnk() and badlink(finfo, base):\n logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}")\n else:\n yield finfo\n\n @staticmethod\n def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:\n os.makedirs(output_path, exist_ok=True)\n tar_file = tarfile.open(input_path)\n tar_file.extractall(output_path, members=TarExtractor.safemembers(tar_file, output_path))\n tar_file.close()\n\n\nclass GzipExtractor(MagicNumberBaseExtractor):\n magic_numbers = [b"\x1f\x8b"]\n\n @staticmethod\n def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:\n with gzip.open(input_path, "rb") as gzip_file:\n with open(output_path, "wb") as extracted_file:\n shutil.copyfileobj(gzip_file, extracted_file)\n\n\nclass ZipExtractor(MagicNumberBaseExtractor):\n magic_numbers = [\n b"PK\x03\x04",\n b"PK\x05\x06", # empty archive\n b"PK\x07\x08", # spanned archive\n ]\n\n @classmethod\n def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:\n if super().is_extractable(path, magic_number=magic_number):\n return True\n try:\n # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.\n # From: https://github.com/python/cpython/pull/5053\n from zipfile import (\n _CD_SIGNATURE,\n _ECD_DISK_NUMBER,\n _ECD_DISK_START,\n _ECD_ENTRIES_TOTAL,\n _ECD_OFFSET,\n _ECD_SIZE,\n _EndRecData,\n sizeCentralDir,\n stringCentralDir,\n structCentralDir,\n )\n\n with open(path, "rb") as fp:\n endrec = _EndRecData(fp)\n if endrec:\n if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:\n return True # Empty zipfiles are still zipfiles\n elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:\n fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk\n if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:\n data = fp.read(sizeCentralDir) # CD is where we expect it to be\n if len(data) == sizeCentralDir:\n centdir = struct.unpack(structCentralDir, data) # CD is the right size\n if centdir[_CD_SIGNATURE] == stringCentralDir:\n return True # First central directory entry has correct magic number\n return False\n except Exception: # catch all errors in case future python versions change the zipfile internals\n return False\n\n @staticmethod\n def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:\n os.makedirs(output_path, exist_ok=True)\n with zipfile.ZipFile(input_path, "r") as zip_file:\n zip_file.extractall(output_path)\n zip_file.close()\n\n\nclass XzExtractor(MagicNumberBaseExtractor):\n magic_numbers = [b"\xfd\x37\x7a\x58\x5a\x00"]\n\n @staticmethod\n def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:\n with lzma.open(input_path) as compressed_file:\n with open(output_path, "wb") as extracted_file:\n shutil.copyfileobj(compressed_file, extracted_file)\n\n\nclass RarExtractor(MagicNumberBaseExtractor):\n magic_numbers = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID\n\n @staticmethod\n def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:\n if not config.RARFILE_AVAILABLE:\n raise ImportError("Please pip install rarfile")\n import rarfile\n\n os.makedirs(output_path, exist_ok=True)\n rf = rarfile.RarFile(input_path)\n rf.extractall(output_path)\n rf.close()\n\n\nclass ZstdExtractor(MagicNumberBaseExtractor):\n magic_numbers = [b"\x28\xb5\x2f\xfd"]\n\n @staticmethod\n def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:\n if not config.ZSTANDARD_AVAILABLE:\n raise ImportError("Please pip install zstandard")\n import zstandard as zstd\n\n dctx = zstd.ZstdDecompressor()\n with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh:\n dctx.copy_stream(ifh, ofh)\n\n\nclass Bzip2Extractor(MagicNumberBaseExtractor):\n magic_numbers = [b"\x42\x5a\x68"]\n\n @staticmethod\n def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:\n with bz2.open(input_path, "rb") as compressed_file:\n with open(output_path, "wb") as extracted_file:\n shutil.copyfileobj(compressed_file, extracted_file)\n\n\nclass SevenZipExtractor(MagicNumberBaseExtractor):\n magic_numbers = [b"\x37\x7a\xbc\xaf\x27\x1c"]\n\n @staticmethod\n def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:\n if not config.PY7ZR_AVAILABLE:\n raise ImportError("Please pip install py7zr")\n import py7zr\n\n os.makedirs(output_path, exist_ok=True)\n with py7zr.SevenZipFile(input_path, "r") as archive:\n archive.extractall(output_path)\n\n\nclass Lz4Extractor(MagicNumberBaseExtractor):\n magic_numbers = [b"\x04\x22\x4d\x18"]\n\n @staticmethod\n def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:\n if not config.LZ4_AVAILABLE:\n raise ImportError("Please pip install lz4")\n import lz4.frame\n\n with lz4.frame.open(input_path, "rb") as compressed_file:\n with open(output_path, "wb") as extracted_file:\n shutil.copyfileobj(compressed_file, extracted_file)\n\n\nclass Extractor:\n # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)\n extractors: dict[str, type[BaseExtractor]] = {\n "tar": TarExtractor,\n "gzip": GzipExtractor,\n "zip": ZipExtractor,\n "xz": XzExtractor,\n "rar": RarExtractor,\n "zstd": ZstdExtractor,\n "bz2": Bzip2Extractor,\n "7z": SevenZipExtractor, # <Added version="2.4.0"/>\n "lz4": Lz4Extractor, # <Added version="2.4.0"/>\n }\n\n @classmethod\n def _get_magic_number_max_length(cls):\n return max(\n len(extractor_magic_number)\n for extractor in cls.extractors.values()\n if issubclass(extractor, MagicNumberBaseExtractor)\n for extractor_magic_number in extractor.magic_numbers\n )\n\n @staticmethod\n def _read_magic_number(path: Union[Path, str], magic_number_length: int):\n try:\n return MagicNumberBaseExtractor.read_magic_number(path, magic_number_length=magic_number_length)\n except OSError:\n return b""\n\n @classmethod\n def is_extractable(cls, path: Union[Path, str], return_extractor: bool = False) -> bool:\n warnings.warn(\n "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "\n "Use 'infer_extractor_format' instead.",\n category=FutureWarning,\n )\n extractor_format = cls.infer_extractor_format(path)\n if extractor_format:\n return True if not return_extractor else (True, cls.extractors[extractor_format])\n return False if not return_extractor else (False, None)\n\n @classmethod\n def infer_extractor_format(cls, path: Union[Path, str]) -> Optional[str]: # <Added version="2.4.0"/>\n magic_number_max_length = cls._get_magic_number_max_length()\n magic_number = cls._read_magic_number(path, magic_number_max_length)\n for extractor_format, extractor in cls.extractors.items():\n if extractor.is_extractable(path, magic_number=magic_number):\n return extractor_format\n\n @classmethod\n def extract(\n cls,\n input_path: Union[Path, str],\n output_path: Union[Path, str],\n extractor_format: str,\n ) -> None:\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n # Prevent parallel extractions\n lock_path = str(Path(output_path).with_suffix(".lock"))\n with FileLock(lock_path):\n shutil.rmtree(output_path, ignore_errors=True)\n extractor = cls.extractors[extractor_format]\n return extractor.extract(input_path, output_path)\n
|
.venv\Lib\site-packages\datasets\utils\extract.py
|
extract.py
|
Python
| 13,021 | 0.95 | 0.21988 | 0.02963 |
react-lib
| 458 |
2024-02-26T22:26:33.686795
|
BSD-3-Clause
| false |
8fa89e208454cf77e190a5795815ba0c
|
# deprecated, please use the `filelock` package instead\n\nfrom filelock import ( # noqa: F401 # imported for backward compatibility TODO: remove in 3.0.0\n BaseFileLock,\n SoftFileLock,\n Timeout,\n UnixFileLock,\n WindowsFileLock,\n)\n\nfrom ._filelock import FileLock # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0\n
|
.venv\Lib\site-packages\datasets\utils\filelock.py
|
filelock.py
|
Python
| 352 | 0.95 | 0.181818 | 0.111111 |
vue-tools
| 4 |
2023-09-28T09:09:20.886429
|
MIT
| false |
4f7728792acbd9a7a8a5bdf0bd38af31
|
from functools import partial\n\nfrom huggingface_hub import hf_hub_url\nfrom huggingface_hub.utils import get_session, hf_raise_for_status\n\n\nhf_dataset_url = partial(hf_hub_url, repo_type="dataset")\n\n\ndef check_auth(hf_api, repo_id, token=None):\n headers = hf_api._build_hf_headers(token=token)\n path = f"{hf_api.endpoint}/api/datasets/{repo_id}/auth-check"\n r = get_session().get(path, headers=headers)\n hf_raise_for_status(r)\n
|
.venv\Lib\site-packages\datasets\utils\hub.py
|
hub.py
|
Python
| 438 | 0.85 | 0.071429 | 0 |
react-lib
| 1 |
2025-05-27T00:32:34.379058
|
GPL-3.0
| false |
56935fee68242dcc83d87690c39a1b7a
|
import enum\nimport os\nfrom typing import Optional\n\nfrom huggingface_hub.utils import insecure_hashlib\n\nfrom .. import config\nfrom ..exceptions import (\n ExpectedMoreDownloadedFilesError,\n ExpectedMoreSplitsError,\n NonMatchingChecksumError,\n NonMatchingSplitsSizesError,\n UnexpectedDownloadedFileError,\n UnexpectedSplitsError,\n)\nfrom .logging import get_logger\n\n\nlogger = get_logger(__name__)\n\n\nclass VerificationMode(enum.Enum):\n """`Enum` that specifies which verification checks to run.\n\n The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns\n when generating/downloading a dataset for the first time.\n\n The verification modes:\n\n | | Verification checks |\n |---------------------------|------------------------------------------------------------------------------ |\n | `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder |\n | | and the validity (number of files, checksums, etc.) of downloaded files |\n | `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files |\n | `NO_CHECKS` | None |\n\n """\n\n ALL_CHECKS = "all_checks"\n BASIC_CHECKS = "basic_checks"\n NO_CHECKS = "no_checks"\n\n\ndef verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None):\n if expected_checksums is None:\n logger.info("Unable to verify checksums.")\n return\n if len(set(expected_checksums) - set(recorded_checksums)) > 0:\n raise ExpectedMoreDownloadedFilesError(str(set(expected_checksums) - set(recorded_checksums)))\n if len(set(recorded_checksums) - set(expected_checksums)) > 0:\n raise UnexpectedDownloadedFileError(str(set(recorded_checksums) - set(expected_checksums)))\n bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]\n for_verification_name = " for " + verification_name if verification_name is not None else ""\n if len(bad_urls) > 0:\n raise NonMatchingChecksumError(\n f"Checksums didn't match{for_verification_name}:\n"\n f"{bad_urls}\n"\n "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error"\n )\n logger.info("All the checksums matched successfully" + for_verification_name)\n\n\ndef verify_splits(expected_splits: Optional[dict], recorded_splits: dict):\n if expected_splits is None:\n logger.info("Unable to verify splits sizes.")\n return\n if len(set(expected_splits) - set(recorded_splits)) > 0:\n raise ExpectedMoreSplitsError(str(set(expected_splits) - set(recorded_splits)))\n if len(set(recorded_splits) - set(expected_splits)) > 0:\n raise UnexpectedSplitsError(str(set(recorded_splits) - set(expected_splits)))\n bad_splits = [\n {"expected": expected_splits[name], "recorded": recorded_splits[name]}\n for name in expected_splits\n if expected_splits[name].num_examples != recorded_splits[name].num_examples\n ]\n if len(bad_splits) > 0:\n raise NonMatchingSplitsSizesError(str(bad_splits))\n logger.info("All the splits matched successfully.")\n\n\ndef get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict:\n """Compute the file size and the sha256 checksum of a file"""\n if record_checksum:\n m = insecure_hashlib.sha256()\n with open(path, "rb") as f:\n for chunk in iter(lambda: f.read(1 << 20), b""):\n m.update(chunk)\n checksum = m.hexdigest()\n else:\n checksum = None\n return {"num_bytes": os.path.getsize(path), "checksum": checksum}\n\n\ndef is_small_dataset(dataset_size):\n """Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.\n\n Args:\n dataset_size (int): Dataset size in bytes.\n\n Returns:\n bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.\n """\n if dataset_size and config.IN_MEMORY_MAX_SIZE:\n return dataset_size < config.IN_MEMORY_MAX_SIZE\n else:\n return False\n
|
.venv\Lib\site-packages\datasets\utils\info_utils.py
|
info_utils.py
|
Python
| 4,330 | 0.85 | 0.226415 | 0 |
react-lib
| 335 |
2025-01-31T07:45:04.637876
|
GPL-3.0
| false |
dbf4e5a02461c5f9efe8c379ab24f5a3
|
# Copyright 2020 Optuna, Hugging Face\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Logging utilities."""\n\nimport logging\nimport os\nfrom logging import (\n CRITICAL, # NOQA\n DEBUG, # NOQA\n ERROR, # NOQA\n FATAL, # NOQA\n INFO, # NOQA\n NOTSET, # NOQA\n WARN, # NOQA\n WARNING, # NOQA\n)\nfrom typing import Optional\n\nfrom .tqdm import ( # noqa: F401 # imported for backward compatibility\n disable_progress_bar,\n enable_progress_bar,\n is_progress_bar_enabled,\n tqdm,\n)\n\n\nlog_levels = {\n "debug": logging.DEBUG,\n "info": logging.INFO,\n "warning": logging.WARNING,\n "error": logging.ERROR,\n "critical": logging.CRITICAL,\n}\n\n_default_log_level = logging.WARNING\n\n\ndef _get_default_logging_level():\n """\n If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level.\n If it is not - fall back to ``_default_log_level``\n """\n env_level_str = os.getenv("DATASETS_VERBOSITY", None)\n if env_level_str:\n if env_level_str in log_levels:\n return log_levels[env_level_str]\n else:\n logging.getLogger().warning(\n f"Unknown option DATASETS_VERBOSITY={env_level_str}, has to be one of: {', '.join(log_levels.keys())}"\n )\n return _default_log_level\n\n\ndef _get_library_name() -> str:\n return __name__.split(".")[0]\n\n\ndef _get_library_root_logger() -> logging.Logger:\n return logging.getLogger(_get_library_name())\n\n\ndef _configure_library_root_logger() -> None:\n # Apply our default configuration to the library root logger.\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(logging.StreamHandler())\n library_root_logger.setLevel(_get_default_logging_level())\n\n\ndef _reset_library_root_logger() -> None:\n library_root_logger = _get_library_root_logger()\n library_root_logger.setLevel(logging.NOTSET)\n\n\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\n """Return a logger with the specified name.\n This function can be used in dataset scripts.\n """\n if name is None:\n name = _get_library_name()\n return logging.getLogger(name)\n\n\ndef get_verbosity() -> int:\n """Return the current level for the HuggingFace datasets library's root logger.\n Returns:\n Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.\n\n <Tip>\n\n HuggingFace datasets library has following logging levels:\n - `datasets.logging.CRITICAL`, `datasets.logging.FATAL`\n - `datasets.logging.ERROR`\n - `datasets.logging.WARNING`, `datasets.logging.WARN`\n - `datasets.logging.INFO`\n - `datasets.logging.DEBUG`\n\n </Tip>\n """\n return _get_library_root_logger().getEffectiveLevel()\n\n\ndef set_verbosity(verbosity: int) -> None:\n """Set the level for the Hugging Face Datasets library's root logger.\n Args:\n verbosity:\n Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.\n """\n _get_library_root_logger().setLevel(verbosity)\n\n\ndef set_verbosity_info():\n """Set the level for the Hugging Face datasets library's root logger to `INFO`.\n\n This will display most of the logging information and tqdm bars.\n\n Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`.\n """\n return set_verbosity(INFO)\n\n\ndef set_verbosity_warning():\n """Set the level for the Hugging Face datasets library's root logger to `WARNING`.\n\n This will display only the warning and errors logging information and tqdm bars.\n\n Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`.\n """\n return set_verbosity(WARNING)\n\n\ndef set_verbosity_debug():\n """Set the level for the Hugging Face datasets library's root logger to `DEBUG`.\n\n This will display all the logging information and tqdm bars.\n\n Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`.\n """\n return set_verbosity(DEBUG)\n\n\ndef set_verbosity_error():\n """Set the level for the Hugging Face datasets library's root logger to `ERROR`.\n\n This will display only the errors logging information and tqdm bars.\n\n Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`.\n """\n return set_verbosity(ERROR)\n\n\ndef disable_propagation() -> None:\n """Disable propagation of the library log outputs.\n Note that log propagation is disabled by default.\n """\n _get_library_root_logger().propagate = False\n\n\ndef enable_propagation() -> None:\n """Enable propagation of the library log outputs.\n Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has\n been configured.\n """\n _get_library_root_logger().propagate = True\n\n\n# Configure the library root logger at the module level (singleton-like)\n_configure_library_root_logger()\n
|
.venv\Lib\site-packages\datasets\utils\logging.py
|
logging.py
|
Python
| 5,382 | 0.95 | 0.151685 | 0.113636 |
python-kit
| 371 |
2025-03-29T12:11:18.529632
|
Apache-2.0
| false |
cbb648957dce0236940b9a4bee9df00b
|
import re\nimport textwrap\nfrom collections import Counter\nfrom itertools import groupby\nfrom operator import itemgetter\nfrom typing import Any, ClassVar, Optional\n\nimport yaml\nfrom huggingface_hub import DatasetCardData\n\nfrom ..config import METADATA_CONFIGS_FIELD\nfrom ..features import Features\nfrom ..info import DatasetInfo, DatasetInfosDict\nfrom ..naming import _split_re\nfrom ..utils.logging import get_logger\n\n\nlogger = get_logger(__name__)\n\n\nclass _NoDuplicateSafeLoader(yaml.SafeLoader):\n def _check_no_duplicates_on_constructed_node(self, node):\n keys = [self.constructed_objects[key_node] for key_node, _ in node.value]\n keys = [tuple(key) if isinstance(key, list) else key for key in keys]\n counter = Counter(keys)\n duplicate_keys = [key for key in counter if counter[key] > 1]\n if duplicate_keys:\n raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}")\n\n def construct_mapping(self, node, deep=False):\n mapping = super().construct_mapping(node, deep=deep)\n self._check_no_duplicates_on_constructed_node(node)\n return mapping\n\n\ndef _split_yaml_from_readme(readme_content: str) -> tuple[Optional[str], str]:\n full_content = list(readme_content.splitlines())\n if full_content and full_content[0] == "---" and "---" in full_content[1:]:\n sep_idx = full_content[1:].index("---") + 1\n yamlblock = "\n".join(full_content[1:sep_idx])\n return yamlblock, "\n".join(full_content[sep_idx + 1 :])\n\n return None, "\n".join(full_content)\n\n\nclass MetadataConfigs(dict[str, dict[str, Any]]):\n """Should be in format {config_name: {**config_params}}."""\n\n FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD\n\n @staticmethod\n def _raise_if_data_files_field_not_valid(metadata_config: dict):\n yaml_data_files = metadata_config.get("data_files")\n if yaml_data_files is not None:\n yaml_error_message = textwrap.dedent(\n f"""\n Expected data_files in YAML to be either a string or a list of strings\n or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files}\n Examples of data_files in YAML:\n\n data_files: data.csv\n\n data_files: data/*.png\n\n data_files:\n - part0/*\n - part1/*\n\n data_files:\n - split: train\n path: train/*\n - split: test\n path: test/*\n\n data_files:\n - split: train\n path:\n - train/part1/*\n - train/part2/*\n - split: test\n path: test/*\n\n PS: some symbols like dashes '-' are not allowed in split names\n """\n )\n if not isinstance(yaml_data_files, (list, str)):\n raise ValueError(yaml_error_message)\n if isinstance(yaml_data_files, list):\n for yaml_data_files_item in yaml_data_files:\n if (\n not isinstance(yaml_data_files_item, (str, dict))\n or isinstance(yaml_data_files_item, dict)\n and not (\n len(yaml_data_files_item) == 2\n and "split" in yaml_data_files_item\n and re.match(_split_re, yaml_data_files_item["split"])\n and isinstance(yaml_data_files_item.get("path"), (str, list))\n )\n ):\n raise ValueError(yaml_error_message)\n\n @classmethod\n def _from_exported_parquet_files_and_dataset_infos(\n cls,\n parquet_commit_hash: str,\n exported_parquet_files: list[dict[str, Any]],\n dataset_infos: DatasetInfosDict,\n ) -> "MetadataConfigs":\n metadata_configs = {\n config_name: {\n "data_files": [\n {\n "split": split_name,\n "path": [\n parquet_file["url"].replace("refs%2Fconvert%2Fparquet", parquet_commit_hash)\n for parquet_file in parquet_files_for_split\n ],\n }\n for split_name, parquet_files_for_split in groupby(parquet_files_for_config, itemgetter("split"))\n ],\n "version": str(dataset_infos.get(config_name, DatasetInfo()).version or "0.0.0"),\n }\n for config_name, parquet_files_for_config in groupby(exported_parquet_files, itemgetter("config"))\n }\n if dataset_infos:\n # Preserve order of configs and splits\n metadata_configs = {\n config_name: {\n "data_files": [\n data_file\n for split_name in dataset_info.splits\n for data_file in metadata_configs[config_name]["data_files"]\n if data_file["split"] == split_name\n ],\n "version": metadata_configs[config_name]["version"],\n }\n for config_name, dataset_info in dataset_infos.items()\n }\n return cls(metadata_configs)\n\n @classmethod\n def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs":\n if dataset_card_data.get(cls.FIELD_NAME):\n metadata_configs = dataset_card_data[cls.FIELD_NAME]\n if not isinstance(metadata_configs, list):\n raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'")\n for metadata_config in metadata_configs:\n if "config_name" not in metadata_config:\n raise ValueError(\n f"Each config must include `config_name` field with a string name of a config, "\n f"but got {metadata_config}. "\n )\n cls._raise_if_data_files_field_not_valid(metadata_config)\n return cls(\n {\n config.pop("config_name"): {\n param: value if param != "features" else Features._from_yaml_list(value)\n for param, value in config.items()\n }\n for metadata_config in metadata_configs\n if (config := metadata_config.copy())\n }\n )\n return cls()\n\n def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:\n if self:\n for metadata_config in self.values():\n self._raise_if_data_files_field_not_valid(metadata_config)\n current_metadata_configs = self.from_dataset_card_data(dataset_card_data)\n total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items()))\n for config_name, config_metadata in total_metadata_configs.items():\n config_metadata.pop("config_name", None)\n dataset_card_data[self.FIELD_NAME] = [\n {"config_name": config_name, **config_metadata}\n for config_name, config_metadata in total_metadata_configs.items()\n ]\n\n def get_default_config_name(self) -> Optional[str]:\n default_config_name = None\n for config_name, metadata_config in self.items():\n if len(self) == 1 or config_name == "default" or metadata_config.get("default"):\n if default_config_name is None:\n default_config_name = config_name\n else:\n raise ValueError(\n f"Dataset has several default configs: '{default_config_name}' and '{config_name}'."\n )\n return default_config_name\n\n\n# DEPRECATED - just here to support old versions of evaluate like 0.2.2\n# To support new tasks on the Hugging Face Hub, please open a PR for this file:\n# https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts\nknown_task_ids = {\n "image-classification": [],\n "translation": [],\n "image-segmentation": [],\n "fill-mask": [],\n "automatic-speech-recognition": [],\n "token-classification": [],\n "sentence-similarity": [],\n "audio-classification": [],\n "question-answering": [],\n "summarization": [],\n "zero-shot-classification": [],\n "table-to-text": [],\n "feature-extraction": [],\n "other": [],\n "multiple-choice": [],\n "text-classification": [],\n "text-to-image": [],\n "text2text-generation": [],\n "zero-shot-image-classification": [],\n "tabular-classification": [],\n "tabular-regression": [],\n "image-to-image": [],\n "tabular-to-text": [],\n "unconditional-image-generation": [],\n "text-retrieval": [],\n "text-to-speech": [],\n "object-detection": [],\n "audio-to-audio": [],\n "text-generation": [],\n "conversational": [],\n "table-question-answering": [],\n "visual-question-answering": [],\n "image-to-text": [],\n "reinforcement-learning": [],\n "voice-activity-detection": [],\n "time-series-forecasting": [],\n "document-question-answering": [],\n}\n
|
.venv\Lib\site-packages\datasets\utils\metadata.py
|
metadata.py
|
Python
| 9,367 | 0.95 | 0.197425 | 0.019324 |
vue-tools
| 175 |
2024-11-08T08:30:43.833907
|
Apache-2.0
| false |
a7d73b64420f271b7a534676b1867a5c
|
from importlib import import_module\n\nfrom .logging import get_logger\n\n\nlogger = get_logger(__name__)\n\n\nclass _PatchedModuleObj:\n """Set all the modules components as attributes of the _PatchedModuleObj object."""\n\n def __init__(self, module, attrs=None):\n attrs = attrs or []\n if module is not None:\n for key in module.__dict__:\n if key in attrs or not key.startswith("__"):\n setattr(self, key, getattr(module, key))\n self._original_module = module._original_module if isinstance(module, _PatchedModuleObj) else module\n\n\nclass patch_submodule:\n """\n Patch a submodule attribute of an object, by keeping all other submodules intact at all levels.\n\n Example::\n\n >>> import importlib\n >>> from datasets.load import dataset_module_factory\n >>> from datasets.streaming import patch_submodule, xjoin\n >>>\n >>> dataset_module = dataset_module_factory("snli")\n >>> snli_module = importlib.import_module(dataset_module.module_path)\n >>> patcher = patch_submodule(snli_module, "os.path.join", xjoin)\n >>> patcher.start()\n >>> assert snli_module.os.path.join is xjoin\n """\n\n _active_patches = []\n\n def __init__(self, obj, target: str, new, attrs=None):\n self.obj = obj\n self.target = target\n self.new = new\n self.key = target.split(".")[0]\n self.original = {}\n self.attrs = attrs or []\n\n def __enter__(self):\n *submodules, target_attr = self.target.split(".")\n\n # Patch modules:\n # it's used to patch attributes of submodules like "os.path.join";\n # in this case we need to patch "os" and "os.path"\n\n for i in range(len(submodules)):\n try:\n submodule = import_module(".".join(submodules[: i + 1]))\n except ModuleNotFoundError:\n continue\n # We iterate over all the globals in self.obj in case we find "os" or "os.path"\n for attr in self.obj.__dir__():\n obj_attr = getattr(self.obj, attr)\n # We don't check for the name of the global, but rather if its value *is* "os" or "os.path".\n # This allows to patch renamed modules like "from os import path as ospath".\n if obj_attr is submodule or (\n isinstance(obj_attr, _PatchedModuleObj) and obj_attr._original_module is submodule\n ):\n self.original[attr] = obj_attr\n # patch at top level\n setattr(self.obj, attr, _PatchedModuleObj(obj_attr, attrs=self.attrs))\n patched = getattr(self.obj, attr)\n # construct lower levels patches\n for key in submodules[i + 1 :]:\n setattr(patched, key, _PatchedModuleObj(getattr(patched, key, None), attrs=self.attrs))\n patched = getattr(patched, key)\n # finally set the target attribute\n setattr(patched, target_attr, self.new)\n\n # Patch attribute itself:\n # it's used for builtins like "open",\n # and also to patch "os.path.join" we may also need to patch "join"\n # itself if it was imported as "from os.path import join".\n\n if submodules: # if it's an attribute of a submodule like "os.path.join"\n try:\n attr_value = getattr(import_module(".".join(submodules)), target_attr)\n except (AttributeError, ModuleNotFoundError):\n return\n # We iterate over all the globals in self.obj in case we find "os.path.join"\n for attr in self.obj.__dir__():\n # We don't check for the name of the global, but rather if its value *is* "os.path.join".\n # This allows to patch renamed attributes like "from os.path import join as pjoin".\n if getattr(self.obj, attr) is attr_value:\n self.original[attr] = getattr(self.obj, attr)\n setattr(self.obj, attr, self.new)\n elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"\n self.original[target_attr] = globals()["__builtins__"][target_attr]\n setattr(self.obj, target_attr, self.new)\n else:\n raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule.")\n\n def __exit__(self, *exc_info):\n for attr in list(self.original):\n setattr(self.obj, attr, self.original.pop(attr))\n\n def start(self):\n """Activate a patch."""\n self.__enter__()\n self._active_patches.append(self)\n\n def stop(self):\n """Stop an active patch."""\n try:\n self._active_patches.remove(self)\n except ValueError:\n # If the patch hasn't been started this will fail\n return None\n\n return self.__exit__()\n
|
.venv\Lib\site-packages\datasets\utils\patching.py
|
patching.py
|
Python
| 4,955 | 0.95 | 0.260504 | 0.183673 |
node-utils
| 438 |
2023-07-23T12:05:49.894627
|
Apache-2.0
| false |
5ca94d2c74556c0fb36f6191a3c2eeb7
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n"""Some python utils function and classes."""\n\nimport copy\nimport functools\nimport itertools\nimport multiprocessing.pool\nimport os\nimport queue\nimport re\nimport types\nimport warnings\nfrom collections.abc import Iterable\nfrom contextlib import contextmanager\nfrom dataclasses import fields, is_dataclass\nfrom multiprocessing import Manager\nfrom pathlib import Path\nfrom queue import Empty\nfrom shutil import disk_usage\nfrom typing import Any, Callable, Optional, TypeVar, Union\nfrom urllib.parse import urlparse\n\nimport multiprocess\nimport multiprocess.pool\nimport numpy as np\nfrom tqdm.auto import tqdm\n\nfrom .. import config\nfrom ..parallel import parallel_map\nfrom . import logging\nfrom . import tqdm as hf_tqdm\nfrom ._dill import ( # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0\n Pickler,\n dump,\n dumps,\n pklregister,\n)\nfrom ._filelock import FileLock\n\n\ntry: # pragma: no branch\n from typing import Final\n\n import typing_extensions as _typing_extensions\n from typing_extensions import Literal\nexcept ImportError:\n _typing_extensions = Literal = Final = None\n\n\nlogger = logging.get_logger(__name__)\n\n\n# NOTE: When used on an instance method, the cache is shared across all\n# instances and IS NOT per-instance.\n# See\n# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance\n# For @property methods, use @memoized_property below.\nmemoize = functools.lru_cache\n\n\ndef size_str(size_in_bytes):\n """Returns a human readable size string.\n\n If size_in_bytes is None, then returns "Unknown size".\n\n For example `size_str(1.5 * datasets.units.GiB) == "1.50 GiB"`.\n\n Args:\n size_in_bytes: `int` or `None`, the size, in bytes, that we want to\n format as a human-readable size string.\n """\n if not size_in_bytes:\n return "Unknown size"\n\n _NAME_LIST = [("PiB", 2**50), ("TiB", 2**40), ("GiB", 2**30), ("MiB", 2**20), ("KiB", 2**10)]\n\n size_in_bytes = float(size_in_bytes)\n for name, size_bytes in _NAME_LIST:\n value = size_in_bytes / size_bytes\n if value >= 1.0:\n return f"{value:.2f} {name}"\n return f"{int(size_in_bytes)} bytes"\n\n\ndef convert_file_size_to_int(size: Union[int, str]) -> int:\n """\n Converts a size expressed as a string with digits an unit (like `"50MB"`) to an integer (in bytes).\n\n Args:\n size (`int` or `str`): The size to convert. Will be directly returned if an `int`.\n\n Example:\n\n ```py\n >>> convert_file_size_to_int("1MiB")\n 1048576\n ```\n """\n if isinstance(size, int):\n return size\n if size.upper().endswith("PIB"):\n return int(size[:-3]) * (2**50)\n if size.upper().endswith("TIB"):\n return int(size[:-3]) * (2**40)\n if size.upper().endswith("GIB"):\n return int(size[:-3]) * (2**30)\n if size.upper().endswith("MIB"):\n return int(size[:-3]) * (2**20)\n if size.upper().endswith("KIB"):\n return int(size[:-3]) * (2**10)\n if size.upper().endswith("PB"):\n int_size = int(size[:-2]) * (10**15)\n return int_size // 8 if size.endswith("b") else int_size\n if size.upper().endswith("TB"):\n int_size = int(size[:-2]) * (10**12)\n return int_size // 8 if size.endswith("b") else int_size\n if size.upper().endswith("GB"):\n int_size = int(size[:-2]) * (10**9)\n return int_size // 8 if size.endswith("b") else int_size\n if size.upper().endswith("MB"):\n int_size = int(size[:-2]) * (10**6)\n return int_size // 8 if size.endswith("b") else int_size\n if size.upper().endswith("KB"):\n int_size = int(size[:-2]) * (10**3)\n return int_size // 8 if size.endswith("b") else int_size\n raise ValueError(f"`size={size}` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")\n\n\ndef glob_pattern_to_regex(pattern):\n # partially taken from fsspec:\n # https://github.com/fsspec/filesystem_spec/blob/697d0f8133d8a5fbc3926e4761d7ecd51337ce50/fsspec/asyn.py#L735\n return (\n pattern.replace("\\", r"\\")\n .replace(".", r"\.")\n .replace("*", ".*")\n .replace("+", r"\+")\n .replace("//", "/")\n .replace("(", r"\(")\n .replace(")", r"\)")\n .replace("|", r"\|")\n .replace("^", r"\^")\n .replace("$", r"\$")\n .rstrip("/")\n .replace("?", ".")\n )\n\n\ndef string_to_dict(string: str, pattern: str) -> Optional[dict[str, str]]:\n """Un-format a string using a python f-string pattern.\n From https://stackoverflow.com/a/36838374\n\n Example::\n\n >>> p = 'hello, my name is {name} and I am a {age} year old {what}'\n >>> s = p.format(name='cody', age=18, what='quarterback')\n >>> s\n 'hello, my name is cody and I am a 18 year old quarterback'\n >>> string_to_dict(s, p)\n {'age': '18', 'name': 'cody', 'what': 'quarterback'}\n\n Args:\n string (str): input string\n pattern (str): pattern formatted like a python f-string\n\n Returns:\n Optional[dict[str, str]]: dictionary of variable -> value, retrieved from the input using the pattern, or\n `None` if the string does not match the pattern.\n """\n pattern = re.sub(r"{([^:}]+)(?::[^}]+)?}", r"{\1}", pattern) # remove format specifiers, e.g. {rank:05d} -> {rank}\n regex = re.sub(r"{(.+?)}", r"(?P<_\1>.+)", pattern)\n result = re.search(regex, string)\n if result is None:\n return None\n values = list(result.groups())\n keys = re.findall(r"{(.+?)}", pattern)\n _dict = dict(zip(keys, values))\n return _dict\n\n\ndef asdict(obj):\n """Convert an object to its dictionary representation recursively.\n\n <Added version="2.4.0"/>\n """\n\n # Implementation based on https://docs.python.org/3/library/dataclasses.html#dataclasses.asdict\n\n def _is_dataclass_instance(obj):\n # https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass\n return is_dataclass(obj) and not isinstance(obj, type)\n\n def _asdict_inner(obj):\n if _is_dataclass_instance(obj):\n result = {}\n for f in fields(obj):\n value = _asdict_inner(getattr(obj, f.name))\n if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False):\n result[f.name] = value\n return result\n elif isinstance(obj, tuple) and hasattr(obj, "_fields"):\n # obj is a namedtuple\n return type(obj)(*[_asdict_inner(v) for v in obj])\n elif isinstance(obj, (list, tuple)):\n # Assume we can create an object of this type by passing in a\n # generator (which is not true for namedtuples, handled\n # above).\n return type(obj)(_asdict_inner(v) for v in obj)\n elif isinstance(obj, dict):\n return {_asdict_inner(k): _asdict_inner(v) for k, v in obj.items()}\n else:\n return copy.deepcopy(obj)\n\n if not isinstance(obj, dict) and not _is_dataclass_instance(obj):\n raise TypeError(f"{obj} is not a dict or a dataclass")\n\n return _asdict_inner(obj)\n\n\n@contextmanager\ndef temporary_assignment(obj, attr, value):\n """Temporarily assign obj.attr to value."""\n original = getattr(obj, attr, None)\n setattr(obj, attr, value)\n try:\n yield\n finally:\n setattr(obj, attr, original)\n\n\n@contextmanager\ndef temp_seed(seed: int, set_pytorch=False, set_tensorflow=False):\n """Temporarily set the random seed. This works for python numpy, pytorch and tensorflow."""\n np_state = np.random.get_state()\n np.random.seed(seed)\n\n if set_pytorch and config.TORCH_AVAILABLE:\n import torch\n\n torch_state = torch.random.get_rng_state()\n torch.random.manual_seed(seed)\n\n if torch.cuda.is_available():\n torch_cuda_states = torch.cuda.get_rng_state_all()\n torch.cuda.manual_seed_all(seed)\n\n if set_tensorflow and config.TF_AVAILABLE:\n import tensorflow as tf\n from tensorflow.python.eager import context as tfpycontext\n\n tf_state = tf.random.get_global_generator()\n temp_gen = tf.random.Generator.from_seed(seed)\n tf.random.set_global_generator(temp_gen)\n\n if not tf.executing_eagerly():\n raise ValueError("Setting random seed for TensorFlow is only available in eager mode")\n\n tf_context = tfpycontext.context() # eager mode context\n tf_seed = tf_context._seed\n tf_rng_initialized = hasattr(tf_context, "_rng")\n if tf_rng_initialized:\n tf_rng = tf_context._rng\n tf_context._set_global_seed(seed)\n\n try:\n yield\n finally:\n np.random.set_state(np_state)\n\n if set_pytorch and config.TORCH_AVAILABLE:\n torch.random.set_rng_state(torch_state)\n if torch.cuda.is_available():\n torch.cuda.set_rng_state_all(torch_cuda_states)\n\n if set_tensorflow and config.TF_AVAILABLE:\n tf.random.set_global_generator(tf_state)\n\n tf_context._seed = tf_seed\n if tf_rng_initialized:\n tf_context._rng = tf_rng\n else:\n delattr(tf_context, "_rng")\n\n\ndef unique_values(values):\n """Iterate over iterable and return only unique values in order."""\n seen = set()\n for value in values:\n if value not in seen:\n seen.add(value)\n yield value\n\n\ndef no_op_if_value_is_null(func):\n """If the value is None, return None, else call `func`."""\n\n def wrapper(value):\n return func(value) if value is not None else None\n\n return wrapper\n\n\ndef first_non_null_value(iterable):\n """Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index."""\n for i, value in enumerate(iterable):\n if value is not None:\n return i, value\n return -1, None\n\n\ndef first_non_null_non_empty_value(iterable):\n """Return the index and the value of the first non-null non-empty value in the iterable. If all values are None or empty, return -1 as index."""\n for i, value in enumerate(iterable):\n if value is not None and not (isinstance(value, (dict, list)) and len(value) == 0):\n return i, value\n return -1, None\n\n\ndef zip_dict(*dicts):\n """Iterate over items of dictionaries grouped by their keys."""\n for key in unique_values(itertools.chain(*dicts)): # set merge all keys\n # Will raise KeyError if the dict don't have the same keys\n yield key, tuple(d[key] for d in dicts)\n\n\nclass NonMutableDict(dict):\n """Dict where keys can only be added but not modified.\n\n Will raise an error if the user try to overwrite one key. The error message\n can be customized during construction. It will be formatted using {key} for\n the overwritten key.\n """\n\n def __init__(self, *args, **kwargs):\n self._error_msg = kwargs.pop(\n "error_msg",\n "Try to overwrite existing key: {key}",\n )\n if kwargs:\n raise ValueError("NonMutableDict cannot be initialized with kwargs.")\n super().__init__(*args, **kwargs)\n\n def __setitem__(self, key, value):\n if key in self:\n raise ValueError(self._error_msg.format(key=key))\n return super().__setitem__(key, value)\n\n def update(self, other):\n if any(k in self for k in other):\n raise ValueError(self._error_msg.format(key=set(self) & set(other)))\n return super().update(other)\n\n\nclass classproperty(property): # pylint: disable=invalid-name\n """Descriptor to be used as decorator for @classmethods."""\n\n def __get__(self, obj, objtype=None):\n return self.fget.__get__(None, objtype)()\n\n\ndef _single_map_nested(args):\n """Apply a function recursively to each element of a nested data struct."""\n function, data_struct, batched, batch_size, types, rank, disable_tqdm, desc = args\n\n # Singleton first to spare some computation\n if not isinstance(data_struct, dict) and not isinstance(data_struct, types):\n if batched:\n return function([data_struct])[0]\n else:\n return function(data_struct)\n if (\n batched\n and not isinstance(data_struct, dict)\n and isinstance(data_struct, types)\n and all(not isinstance(v, (dict, types)) for v in data_struct)\n ):\n return [mapped_item for batch in iter_batched(data_struct, batch_size) for mapped_item in function(batch)]\n\n # Reduce logging to keep things readable in multiprocessing with tqdm\n if rank is not None and logging.get_verbosity() < logging.WARNING:\n logging.set_verbosity_warning()\n # Print at least one thing to fix tqdm in notebooks in multiprocessing\n # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308\n if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__):\n print(" ", end="", flush=True)\n\n # Loop over single examples or batches and write to buffer/file if examples are to be updated\n pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct\n pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc\n with hf_tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar:\n if isinstance(data_struct, dict):\n return {\n k: _single_map_nested((function, v, batched, batch_size, types, None, True, None)) for k, v in pbar\n }\n else:\n mapped = [_single_map_nested((function, v, batched, batch_size, types, None, True, None)) for v in pbar]\n if isinstance(data_struct, list):\n return mapped\n elif isinstance(data_struct, tuple):\n return tuple(mapped)\n else:\n return np.array(mapped)\n\n\ndef map_nested(\n function: Callable[[Any], Any],\n data_struct: Any,\n dict_only: bool = False,\n map_list: bool = True,\n map_tuple: bool = False,\n map_numpy: bool = False,\n num_proc: Optional[int] = None,\n parallel_min_length: int = 2,\n batched: bool = False,\n batch_size: Optional[int] = 1000,\n types: Optional[tuple] = None,\n disable_tqdm: bool = True,\n desc: Optional[str] = None,\n) -> Any:\n """Apply a function recursively to each element of a nested data struct.\n\n Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to\n `parallel_min_length`.\n\n <Changed version="2.5.0">\n\n Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``.\n\n Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and\n multiprocessing is used.\n\n </Changed>\n\n Args:\n function (`Callable`): Function to be applied to `data_struct`.\n data_struct (`Any`): Data structure to apply `function` to.\n dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in\n `data_struct`.\n map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict`\n values).\n map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides\n `dict` values).\n map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides\n `dict` values).\n num_proc (`int`, *optional*): Number of processes.\n The level in the data struct used for multiprocessing is the first level that has smaller sub-structs,\n starting from the root.\n parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel\n processing.\n <Added version="2.5.0"/>\n batched (`bool`, defaults to `False`):\n Provide batch of items to `function`.\n <Added version="2.19.0"/>\n batch_size (`int`, *optional*, defaults to `1000`):\n Number of items per batch provided to `function` if `batched=True`.\n If `batch_size <= 0` or `batch_size == None`, provide the full iterable as a single batch to `function`.\n <Added version="2.19.0"/>\n types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their\n elements.\n disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar.\n desc (`str`, *optional*): Prefix for the tqdm progressbar.\n\n Returns:\n `Any`\n """\n if types is None:\n types = []\n if not dict_only:\n if map_list:\n types.append(list)\n if map_tuple:\n types.append(tuple)\n if map_numpy:\n types.append(np.ndarray)\n types = tuple(types)\n\n # Singleton\n if not isinstance(data_struct, dict) and not isinstance(data_struct, types):\n if batched:\n data_struct = [data_struct]\n mapped = function(data_struct)\n if batched:\n mapped = mapped[0]\n return mapped\n\n iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct\n\n if num_proc is None:\n num_proc = 1\n if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable):\n mapped = [\n map_nested(\n function=function,\n data_struct=obj,\n num_proc=num_proc,\n parallel_min_length=parallel_min_length,\n batched=batched,\n batch_size=batch_size,\n types=types,\n )\n for obj in iterable\n ]\n elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length:\n if batched:\n if batch_size is None or batch_size <= 0:\n batch_size = max(len(iterable) // num_proc + int(len(iterable) % num_proc > 0), 1)\n iterable = list(iter_batched(iterable, batch_size))\n mapped = [\n _single_map_nested((function, obj, batched, batch_size, types, None, True, None))\n for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc)\n ]\n if batched:\n mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch]\n else:\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n message=".* is experimental and might be subject to breaking changes in the future\\.$",\n category=UserWarning,\n )\n if batched:\n if batch_size is None or batch_size <= 0:\n batch_size = len(iterable) // num_proc + int(len(iterable) % num_proc > 0)\n iterable = list(iter_batched(iterable, batch_size))\n mapped = parallel_map(\n function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, _single_map_nested\n )\n if batched:\n mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch]\n\n if isinstance(data_struct, dict):\n return dict(zip(data_struct.keys(), mapped))\n else:\n if isinstance(data_struct, list):\n return mapped\n elif isinstance(data_struct, tuple):\n return tuple(mapped)\n else:\n return np.array(mapped)\n\n\nclass NestedDataStructure:\n def __init__(self, data=None):\n self.data = data if data is not None else []\n\n def flatten(self, data=None):\n data = data if data is not None else self.data\n if isinstance(data, dict):\n return self.flatten(list(data.values()))\n elif isinstance(data, (list, tuple)):\n return [flattened for item in data for flattened in self.flatten(item)]\n else:\n return [data]\n\n\ndef has_sufficient_disk_space(needed_bytes, directory="."):\n try:\n free_bytes = disk_usage(os.path.abspath(directory)).free\n except OSError:\n return True\n return needed_bytes < free_bytes\n\n\ndef _convert_github_url(url_path: str) -> tuple[str, Optional[str]]:\n """Convert a link to a file on a github repo in a link to the raw github object."""\n parsed = urlparse(url_path)\n sub_directory = None\n if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":\n if "blob" in url_path:\n if not url_path.endswith(".py"):\n raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'")\n url_path = url_path.replace("blob", "raw") # Point to the raw file\n else:\n # Parse github url to point to zip\n github_path = parsed.path[1:]\n repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")\n repo_owner, repo_name = repo_info.split("/")\n url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip"\n sub_directory = f"{repo_name}-{branch}"\n return url_path, sub_directory\n\n\ndef lock_importable_file(importable_local_file: str) -> FileLock:\n # Check the directory with a unique name in our dataset folder\n # path is: ./datasets/dataset_name/hash_from_code/script.py\n # we use a hash as subdirectory_name to be able to have multiple versions of a dataset processing file together\n importable_directory_path = str(Path(importable_local_file).resolve().parent.parent)\n lock_path = importable_directory_path + ".lock"\n return FileLock(lock_path)\n\n\ndef get_imports(file_path: str) -> tuple[str, str, str, str]:\n """Find whether we should import or clone additional files for a given processing script.\n And list the import.\n\n We allow:\n - library dependencies,\n - local dependencies and\n - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.\n external dependencies will be downloaded (and extracted if needed in the dataset folder).\n We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.\n\n Note that only direct import in the dataset processing script will be handled\n We don't recursively explore the additional import to download further files.\n\n Example::\n\n import tensorflow\n import .c4_utils\n import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset\n """\n lines = []\n with open(file_path, encoding="utf-8") as f:\n lines.extend(f.readlines())\n\n logger.debug(f"Checking {file_path} for additional imports.")\n imports: list[tuple[str, str, str, Optional[str]]] = []\n is_in_docstring = False\n for line in lines:\n docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)\n\n if len(docstr_start_match) == 1:\n # flip True <=> False only if doctstring\n # starts at line without finishing\n is_in_docstring = not is_in_docstring\n\n if is_in_docstring:\n # import statements in doctstrings should\n # not be added as required dependencies\n continue\n\n match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)\n if match is None:\n match = re.match(\n r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",\n line,\n flags=re.MULTILINE,\n )\n if match is None:\n continue\n if match.group(1):\n # The import starts with a '.', we will download the relevant file\n if any(imp[1] == match.group(2) for imp in imports):\n # We already have this import\n continue\n if match.group(3):\n # The import has a comment with 'From:', we'll retrieve it from the given url\n url_path = match.group(3)\n url_path, sub_directory = _convert_github_url(url_path)\n imports.append(("external", match.group(2), url_path, sub_directory))\n elif match.group(2):\n # The import should be at the same place as the file\n imports.append(("internal", match.group(2), match.group(2), None))\n else:\n if match.group(3):\n # The import has a comment with `From: git+https:...`, asks user to pip install from git.\n url_path = match.group(3)\n imports.append(("library", match.group(2), url_path, None))\n else:\n imports.append(("library", match.group(2), match.group(2), None))\n\n return imports\n\n\ndef copyfunc(func):\n result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__)\n result.__kwdefaults__ = func.__kwdefaults__\n return result\n\n\nY = TypeVar("Y")\n\n\ndef _write_generator_to_queue(queue: queue.Queue, func: Callable[..., Iterable[Y]], kwargs: dict) -> int:\n for i, result in enumerate(func(**kwargs)):\n queue.put(result)\n return i\n\n\ndef _get_pool_pid(pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool]) -> set[int]:\n return {f.pid for f in pool._pool}\n\n\ndef iflatmap_unordered(\n pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool],\n func: Callable[..., Iterable[Y]],\n *,\n kwargs_iterable: Iterable[dict],\n) -> Iterable[Y]:\n initial_pool_pid = _get_pool_pid(pool)\n pool_changed = False\n manager_cls = Manager if isinstance(pool, multiprocessing.pool.Pool) else multiprocess.Manager\n with manager_cls() as manager:\n queue = manager.Queue()\n async_results = [\n pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_iterable\n ]\n try:\n while True:\n try:\n yield queue.get(timeout=0.05)\n except Empty:\n if all(async_result.ready() for async_result in async_results) and queue.empty():\n break\n if _get_pool_pid(pool) != initial_pool_pid:\n pool_changed = True\n # One of the subprocesses has died. We should not wait forever.\n raise RuntimeError(\n "One of the subprocesses has abruptly died during map operation."\n "To debug the error, disable multiprocessing."\n )\n finally:\n if not pool_changed:\n # we get the result in case there's an error to raise\n [async_result.get(timeout=0.05) for async_result in async_results]\n\n\nT = TypeVar("T")\n\n\ndef iter_batched(iterable: Iterable[T], n: int) -> Iterable[list[T]]:\n if n < 1:\n raise ValueError(f"Invalid batch size {n}")\n batch = []\n for item in iterable:\n batch.append(item)\n if len(batch) == n:\n yield batch\n batch = []\n if batch:\n yield batch\n
|
.venv\Lib\site-packages\datasets\utils\py_utils.py
|
py_utils.py
|
Python
| 28,088 | 0.95 | 0.287634 | 0.081037 |
node-utils
| 460 |
2024-01-15T20:27:35.177982
|
BSD-3-Clause
| false |
8ffdbe7fd26af6b7fa39c825f805de63
|
import numpy as np\n\n\ndef _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:\n """Return the number of possible shards according to the input gen_kwargs"""\n # Having lists of different sizes makes sharding ambigious, raise an error in this case\n # until we decide how to define sharding without ambiguity for users\n lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)}\n if len(set(lists_lengths.values())) > 1:\n raise RuntimeError(\n "Sharding is ambiguous for this dataset: "\n + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"\n + "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items())\n + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "\n + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."\n )\n max_length = max(lists_lengths.values(), default=0)\n return max(1, max_length)\n\n\ndef _distribute_shards(num_shards: int, max_num_jobs: int) -> list[range]:\n """\n Get the range of shard indices per job.\n If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard.\n The shards indices order is preserved: e.g. all the first shards are given the first job.\n Moreover all the jobs are given approximately the same number of shards.\n\n Example:\n\n ```python\n >>> _distribute_shards(2, max_num_jobs=4)\n [range(0, 1), range(1, 2)]\n >>> _distribute_shards(10, max_num_jobs=3)\n [range(0, 4), range(4, 7), range(7, 10)]\n ```\n """\n shards_indices_per_group = []\n for group_idx in range(max_num_jobs):\n num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))\n if num_shards_to_add == 0:\n break\n start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0\n shard_indices = range(start, start + num_shards_to_add)\n shards_indices_per_group.append(shard_indices)\n return shards_indices_per_group\n\n\ndef _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> list[dict]:\n """Split the gen_kwargs into `max_num_job` gen_kwargs"""\n # Having lists of different sizes makes sharding ambigious, raise an error in this case\n num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs)\n if num_shards == 1:\n return [dict(gen_kwargs)]\n else:\n shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs)\n return [\n {\n key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]\n if isinstance(value, list)\n else value\n for key, value in gen_kwargs.items()\n }\n for group_idx in range(len(shard_indices_per_group))\n ]\n\n\ndef _merge_gen_kwargs(gen_kwargs_list: list[dict]) -> dict:\n return {\n key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]\n if isinstance(gen_kwargs_list[0][key], list)\n else gen_kwargs_list[0][key]\n for key in gen_kwargs_list[0]\n }\n\n\ndef _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict:\n """Return a shuffled copy of the input gen_kwargs"""\n # We must shuffle all the lists, and lists of the same size must have the same shuffling.\n # This way entangled lists of (shard, shard_metadata) are still in the right order.\n\n # First, let's generate the shuffled indices per list size\n list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)}\n indices_per_size = {}\n for size in list_sizes:\n indices_per_size[size] = list(range(size))\n rng.shuffle(indices_per_size[size])\n # Now let's copy the gen_kwargs and shuffle the lists based on their sizes\n shuffled_kwargs = dict(gen_kwargs)\n for key, value in shuffled_kwargs.items():\n if isinstance(value, list):\n shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]]\n return shuffled_kwargs\n
|
.venv\Lib\site-packages\datasets\utils\sharding.py
|
sharding.py
|
Python
| 4,215 | 0.95 | 0.326087 | 0.088608 |
react-lib
| 778 |
2025-06-22T22:38:51.543802
|
BSD-3-Clause
| false |
c04975e8d52b004273f9c1894a3809e3
|
import numpy as np\n\n\ndef approximate_mode(class_counts, n_draws, rng):\n """Computes approximate mode of multivariate hypergeometric.\n This is an approximation to the mode of the multivariate\n hypergeometric given by class_counts and n_draws.\n It shouldn't be off by more than one.\n It is the mostly likely outcome of drawing n_draws many\n samples from the population given by class_counts.\n Args\n ----------\n class_counts : ndarray of int\n Population per class.\n n_draws : int\n Number of draws (samples to draw) from the overall population.\n rng : random state\n Used to break ties.\n Returns\n -------\n sampled_classes : ndarray of int\n Number of samples drawn from each class.\n np.sum(sampled_classes) == n_draws\n\n """\n # this computes a bad approximation to the mode of the\n # multivariate hypergeometric given by class_counts and n_draws\n continuous = n_draws * class_counts / class_counts.sum()\n # floored means we don't overshoot n_samples, but probably undershoot\n floored = np.floor(continuous)\n # we add samples according to how much "left over" probability\n # they had, until we arrive at n_samples\n need_to_add = int(n_draws - floored.sum())\n if need_to_add > 0:\n remainder = continuous - floored\n values = np.sort(np.unique(remainder))[::-1]\n # add according to remainder, but break ties\n # randomly to avoid biases\n for value in values:\n (inds,) = np.where(remainder == value)\n # if we need_to_add less than what's in inds\n # we draw randomly from them.\n # if we need to add more, we add them all and\n # go to the next value\n add_now = min(len(inds), need_to_add)\n inds = rng.choice(inds, size=add_now, replace=False)\n floored[inds] += 1\n need_to_add -= add_now\n if need_to_add == 0:\n break\n return floored.astype(np.int64)\n\n\ndef stratified_shuffle_split_generate_indices(y, n_train, n_test, rng, n_splits=10):\n """\n\n Provides train/test indices to split data in train/test sets.\n It's reference is taken from StratifiedShuffleSplit implementation\n of scikit-learn library.\n\n Args\n ----------\n\n n_train : int,\n represents the absolute number of train samples.\n\n n_test : int,\n represents the absolute number of test samples.\n\n random_state : int or RandomState instance, default=None\n Controls the randomness of the training and testing indices produced.\n Pass an int for reproducible output across multiple function calls.\n\n n_splits : int, default=10\n Number of re-shuffling & splitting iterations.\n """\n classes, y_indices = np.unique(y, return_inverse=True)\n n_classes = classes.shape[0]\n class_counts = np.bincount(y_indices)\n if np.min(class_counts) < 2:\n raise ValueError("Minimum class count error")\n if n_train < n_classes:\n raise ValueError(\n "The train_size = %d should be greater or equal to the number of classes = %d" % (n_train, n_classes)\n )\n if n_test < n_classes:\n raise ValueError(\n "The test_size = %d should be greater or equal to the number of classes = %d" % (n_test, n_classes)\n )\n class_indices = np.split(np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1])\n for _ in range(n_splits):\n n_i = approximate_mode(class_counts, n_train, rng)\n class_counts_remaining = class_counts - n_i\n t_i = approximate_mode(class_counts_remaining, n_test, rng)\n\n train = []\n test = []\n\n for i in range(n_classes):\n permutation = rng.permutation(class_counts[i])\n perm_indices_class_i = class_indices[i].take(permutation, mode="clip")\n train.extend(perm_indices_class_i[: n_i[i]])\n test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]])\n train = rng.permutation(train)\n test = rng.permutation(test)\n\n yield train, test\n
|
.venv\Lib\site-packages\datasets\utils\stratify.py
|
stratify.py
|
Python
| 4,085 | 0.95 | 0.158879 | 0.11828 |
awesome-app
| 209 |
2024-11-08T22:05:37.947285
|
BSD-3-Clause
| false |
f2ca9aa9ab6ff2f65bde281367fc6c54
|
"""Utility helpers to handle progress bars in `datasets`.\n\nExample:\n 1. Use `datasets.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`.\n 2. To disable progress bars, either use `disable_progress_bars()` helper or set the\n environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` to 1.\n 3. To re-enable progress bars, use `enable_progress_bars()`.\n 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`.\n\nNOTE: Environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` has the priority.\n\nExample:\n ```py\n from datasets.utils import (\n are_progress_bars_disabled,\n disable_progress_bars,\n enable_progress_bars,\n tqdm,\n )\n\n # Disable progress bars globally\n disable_progress_bars()\n\n # Use as normal `tqdm`\n for _ in tqdm(range(5)):\n do_something()\n\n # Still not showing progress bars, as `disable=False` is overwritten to `True`.\n for _ in tqdm(range(5), disable=False):\n do_something()\n\n are_progress_bars_disabled() # True\n\n # Re-enable progress bars globally\n enable_progress_bars()\n\n # Progress bar will be shown !\n for _ in tqdm(range(5)):\n do_something()\n ```\n"""\n\nimport warnings\n\nfrom tqdm.auto import tqdm as old_tqdm\n\nfrom ..config import HF_DATASETS_DISABLE_PROGRESS_BARS\n\n\n# `HF_DATASETS_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_datasets_progress_bars_disabled`\n# is a `bool`. If `HF_DATASETS_DISABLE_PROGRESS_BARS` is set to True or False, it has priority.\n# If `HF_DATASETS_DISABLE_PROGRESS_BARS` is None, it means the user have not set the\n# environment variable and is free to enable/disable progress bars programmatically.\n# TL;DR: env variable has priority over code.\n#\n# By default, progress bars are enabled.\n_hf_datasets_progress_bars_disabled: bool = HF_DATASETS_DISABLE_PROGRESS_BARS or False\n\n\ndef disable_progress_bars() -> None:\n """\n Disable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment\n variable has been set.\n\n Use [`~utils.enable_progress_bars`] to re-enable them.\n """\n if HF_DATASETS_DISABLE_PROGRESS_BARS is False:\n warnings.warn(\n "Cannot disable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=0` is set and has"\n " priority."\n )\n return\n global _hf_datasets_progress_bars_disabled\n _hf_datasets_progress_bars_disabled = True\n\n\ndef enable_progress_bars() -> None:\n """\n Enable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment\n variable has been set.\n\n Use [`~utils.disable_progress_bars`] to disable them.\n """\n if HF_DATASETS_DISABLE_PROGRESS_BARS is True:\n warnings.warn(\n "Cannot enable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=1` is set and has"\n " priority."\n )\n return\n global _hf_datasets_progress_bars_disabled\n _hf_datasets_progress_bars_disabled = False\n\n\ndef are_progress_bars_disabled() -> bool:\n """Return whether progress bars are globally disabled or not.\n\n Progress bars used in `datasets` can be enable or disabled globally using [`~utils.enable_progress_bars`]\n and [`~utils.disable_progress_bars`] or by setting `HF_DATASETS_DISABLE_PROGRESS_BAR` as environment variable.\n """\n global _hf_datasets_progress_bars_disabled\n return _hf_datasets_progress_bars_disabled\n\n\nclass tqdm(old_tqdm):\n """\n Class to override `disable` argument in case progress bars are globally disabled.\n\n Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.\n """\n\n def __init__(self, *args, **kwargs):\n if are_progress_bars_disabled():\n kwargs["disable"] = True\n super().__init__(*args, **kwargs)\n\n def __delattr__(self, attr: str) -> None:\n """Fix for https://github.com/huggingface/datasets/issues/6066"""\n try:\n super().__delattr__(attr)\n except AttributeError:\n if attr != "_lock":\n raise\n\n\n# backward compatibility\nenable_progress_bar = enable_progress_bars\ndisable_progress_bar = disable_progress_bars\n\n\ndef is_progress_bar_enabled():\n return not are_progress_bars_disabled()\n
|
.venv\Lib\site-packages\datasets\utils\tqdm.py
|
tqdm.py
|
Python
| 4,303 | 0.95 | 0.145038 | 0.131313 |
vue-tools
| 297 |
2024-07-14T02:21:17.921517
|
Apache-2.0
| false |
77b3b4a5abbb8bee55244770afcaef52
|
from collections.abc import Iterable, Iterator\n\n\nclass tracked_str(str):\n origins = {}\n\n def set_origin(self, origin: str):\n if super().__repr__() not in self.origins:\n self.origins[super().__repr__()] = origin\n\n def get_origin(self):\n return self.origins.get(super().__repr__(), str(self))\n\n def __repr__(self) -> str:\n if super().__repr__() not in self.origins or self.origins[super().__repr__()] == self:\n return super().__repr__()\n else:\n return f"{str(self)} (origin={self.origins[super().__repr__()]})"\n\n\nclass tracked_list(list):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.last_item = None\n\n def __iter__(self) -> Iterator:\n for x in super().__iter__():\n self.last_item = x\n yield x\n self.last_item = None\n\n def __repr__(self) -> str:\n if self.last_item is None:\n return super().__repr__()\n else:\n return f"{self.__class__.__name__}(current={self.last_item})"\n\n\nclass TrackedIterableFromGenerator(Iterable):\n """Utility class to create an iterable from a generator function, in order to reset the generator when needed."""\n\n def __init__(self, generator, *args):\n super().__init__()\n self.generator = generator\n self.args = args\n self.last_item = None\n\n def __iter__(self):\n for x in self.generator(*self.args):\n self.last_item = x\n yield x\n self.last_item = None\n\n def __repr__(self) -> str:\n if self.last_item is None:\n return super().__repr__()\n else:\n return f"{self.__class__.__name__}(current={self.last_item})"\n\n def __reduce__(self):\n return (self.__class__, (self.generator, *self.args))\n
|
.venv\Lib\site-packages\datasets\utils\track.py
|
track.py
|
Python
| 1,838 | 0.85 | 0.344262 | 0 |
node-utils
| 727 |
2024-04-23T20:03:56.902912
|
GPL-3.0
| false |
38eac53be1db4892b54fa649bd278547
|
import os\nfrom typing import TypeVar, Union\n\n\nT = TypeVar("T")\n\nListLike = Union[list[T], tuple[T, ...]]\nNestedDataStructureLike = Union[T, list[T], dict[str, T]]\nPathLike = Union[str, bytes, os.PathLike]\n
|
.venv\Lib\site-packages\datasets\utils\typing.py
|
typing.py
|
Python
| 205 | 0.85 | 0 | 0 |
vue-tools
| 651 |
2024-10-30T10:49:56.427454
|
MIT
| false |
37c97f866fda3b7b415c0ebcc49d2679
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n"""Version utils."""\n\nimport dataclasses\nimport re\nfrom dataclasses import dataclass\nfrom functools import total_ordering\nfrom typing import Optional, Union\n\n\n_VERSION_REG = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")\n\n\n@total_ordering\n@dataclass\nclass Version:\n """Dataset version `MAJOR.MINOR.PATCH`.\n\n Args:\n version_str (`str`):\n The dataset version.\n description (`str`):\n A description of what is new in this version.\n major (`str`):\n minor (`str`):\n patch (`str`):\n\n Example:\n\n ```py\n >>> VERSION = datasets.Version("1.0.0")\n ```\n """\n\n version_str: str\n description: Optional[str] = None\n major: Optional[Union[str, int]] = None\n minor: Optional[Union[str, int]] = None\n patch: Optional[Union[str, int]] = None\n\n def __post_init__(self):\n self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str)\n\n def __repr__(self):\n return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"\n\n @property\n def tuple(self):\n return self.major, self.minor, self.patch\n\n def _validate_operand(self, other):\n if isinstance(other, str):\n return Version(other)\n elif isinstance(other, Version):\n return other\n raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.")\n\n def __eq__(self, other):\n try:\n other = self._validate_operand(other)\n except (TypeError, ValueError):\n return False\n else:\n return self.tuple == other.tuple\n\n def __lt__(self, other):\n other = self._validate_operand(other)\n return self.tuple < other.tuple\n\n def __hash__(self):\n return hash(_version_tuple_to_str(self.tuple))\n\n @classmethod\n def from_dict(cls, dic):\n field_names = {f.name for f in dataclasses.fields(cls)}\n return cls(**{k: v for k, v in dic.items() if k in field_names})\n\n def _to_yaml_string(self) -> str:\n return self.version_str\n\n\ndef _str_to_version_tuple(version_str):\n """Return the tuple (major, minor, patch) version extracted from the str."""\n res = _VERSION_REG.match(version_str)\n if not res:\n raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.")\n return tuple(int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")])\n\n\ndef _version_tuple_to_str(version_tuple):\n """Return the str version from the version tuple (major, minor, patch)."""\n return ".".join(str(v) for v in version_tuple)\n
|
.venv\Lib\site-packages\datasets\utils\version.py
|
version.py
|
Python
| 3,281 | 0.95 | 0.198113 | 0.168675 |
node-utils
| 842 |
2023-12-28T16:05:55.694932
|
GPL-3.0
| false |
7010052c584b0e9d1d666f7670287137
|
from typing import Any, Optional, Union\n\nfrom huggingface_hub.utils import get_session\n\nfrom .. import config\nfrom ..exceptions import DatasetsError\nfrom .file_utils import (\n get_authentication_headers_for_url,\n)\nfrom .logging import get_logger\n\n\nlogger = get_logger(__name__)\n\n\nclass DatasetViewerError(DatasetsError):\n """Dataset viewer error.\n\n Raised when trying to use the dataset viewer HTTP API and when trying to access:\n - a missing dataset, or\n - a private/gated dataset and the user is not authenticated.\n - unavailable /parquet or /info responses\n """\n\n\ndef get_exported_parquet_files(\n dataset: str, commit_hash: str, token: Optional[Union[str, bool]]\n) -> list[dict[str, Any]]:\n """\n Get the dataset exported parquet files\n Docs: https://huggingface.co/docs/datasets-server/parquet\n """\n dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="\n try:\n parquet_data_files_response = get_session().get(\n url=dataset_viewer_parquet_url + dataset,\n headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),\n timeout=100.0,\n )\n parquet_data_files_response.raise_for_status()\n if "X-Revision" in parquet_data_files_response.headers:\n if parquet_data_files_response.headers["X-Revision"] == commit_hash or commit_hash is None:\n parquet_data_files_response_json = parquet_data_files_response.json()\n if (\n parquet_data_files_response_json.get("partial") is False\n and not parquet_data_files_response_json.get("pending", True)\n and not parquet_data_files_response_json.get("failed", True)\n and "parquet_files" in parquet_data_files_response_json\n ):\n return parquet_data_files_response_json["parquet_files"]\n else:\n logger.debug(f"Parquet export for {dataset} is not completely ready yet.")\n else:\n logger.debug(\n f"Parquet export for {dataset} is available but outdated (commit_hash='{parquet_data_files_response.headers['X-Revision']}')"\n )\n except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist\n logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")\n raise DatasetViewerError("No exported Parquet files available.")\n\n\ndef get_exported_dataset_infos(\n dataset: str, commit_hash: str, token: Optional[Union[str, bool]]\n) -> dict[str, dict[str, Any]]:\n """\n Get the dataset information, can be useful to get e.g. the dataset features.\n Docs: https://huggingface.co/docs/datasets-server/info\n """\n dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="\n try:\n info_response = get_session().get(\n url=dataset_viewer_info_url + dataset,\n headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),\n timeout=100.0,\n )\n info_response.raise_for_status()\n if "X-Revision" in info_response.headers:\n if info_response.headers["X-Revision"] == commit_hash or commit_hash is None:\n info_response = info_response.json()\n if (\n info_response.get("partial") is False\n and not info_response.get("pending", True)\n and not info_response.get("failed", True)\n and "dataset_info" in info_response\n ):\n return info_response["dataset_info"]\n else:\n logger.debug(f"Dataset info for {dataset} is not completely ready yet.")\n else:\n logger.debug(\n f"Dataset info for {dataset} is available but outdated (commit_hash='{info_response.headers['X-Revision']}')"\n )\n except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist\n logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")\n raise DatasetViewerError("No exported dataset infos available.")\n
|
.venv\Lib\site-packages\datasets\utils\_dataset_viewer.py
|
_dataset_viewer.py
|
Python
| 4,397 | 0.95 | 0.2 | 0 |
react-lib
| 14 |
2023-07-20T22:00:31.244779
|
MIT
| false |
6d7e823b4d7fb0e06170dfc39c66f269
|
# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Extends `dill` to support pickling more types and produce more consistent dumps."""\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom types import CodeType, FunctionType\n\nimport dill\nfrom packaging import version\n\nfrom .. import config\n\n\nclass Pickler(dill.Pickler):\n dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())\n _legacy_no_dict_keys_sorting = False\n\n def save(self, obj, save_persistent_id=True):\n obj_type = type(obj)\n if obj_type not in self.dispatch:\n if "regex" in sys.modules:\n import regex # type: ignore\n\n if obj_type is regex.Pattern:\n pklregister(obj_type)(_save_regexPattern)\n if "spacy" in sys.modules:\n import spacy # type: ignore\n\n if issubclass(obj_type, spacy.Language):\n pklregister(obj_type)(_save_spacyLanguage)\n if "tiktoken" in sys.modules:\n import tiktoken # type: ignore\n\n if obj_type is tiktoken.Encoding:\n pklregister(obj_type)(_save_tiktokenEncoding)\n if "torch" in sys.modules:\n import torch # type: ignore\n\n if issubclass(obj_type, torch.Tensor):\n pklregister(obj_type)(_save_torchTensor)\n\n if obj_type is torch.Generator:\n pklregister(obj_type)(_save_torchGenerator)\n\n # Unwrap `torch.compile`-ed modules\n if issubclass(obj_type, torch.nn.Module):\n obj = getattr(obj, "_orig_mod", obj)\n if "transformers" in sys.modules:\n import transformers # type: ignore\n\n if issubclass(obj_type, transformers.PreTrainedTokenizerBase):\n pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase)\n\n # Unwrap `torch.compile`-ed functions\n if obj_type is FunctionType:\n obj = getattr(obj, "_torchdynamo_orig_callable", obj)\n dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id)\n\n def _batch_setitems(self, items):\n if self._legacy_no_dict_keys_sorting:\n return super()._batch_setitems(items)\n # Ignore the order of keys in a dict\n try:\n # Faster, but fails for unorderable elements\n items = sorted(items)\n except Exception: # TypeError, decimal.InvalidOperation, etc.\n from datasets.fingerprint import Hasher\n\n items = sorted(items, key=lambda x: Hasher.hash(x[0]))\n dill.Pickler._batch_setitems(self, items)\n\n def memoize(self, obj):\n # Don't memoize strings since two identical strings can have different Python ids\n if type(obj) is not str: # noqa: E721\n dill.Pickler.memoize(self, obj)\n\n\ndef pklregister(t):\n """Register a custom reducer for the type."""\n\n def proxy(func):\n Pickler.dispatch[t] = func\n return func\n\n return proxy\n\n\ndef dump(obj, file):\n """Pickle an object to a file."""\n Pickler(file, recurse=True).dump(obj)\n\n\ndef dumps(obj):\n """Pickle an object to a string."""\n file = BytesIO()\n dump(obj, file)\n return file.getvalue()\n\n\nif config.DILL_VERSION < version.parse("0.3.6"):\n\n def log(pickler, msg):\n dill._dill.log.info(msg)\n\nelif config.DILL_VERSION.release[:3] in [\n version.parse("0.3.6").release,\n version.parse("0.3.7").release,\n version.parse("0.3.8").release,\n]:\n\n def log(pickler, msg):\n dill._dill.logger.trace(pickler, msg)\n\n\n@pklregister(set)\ndef _save_set(pickler, obj):\n log(pickler, f"Se: {obj}")\n try:\n # Faster, but fails for unorderable elements\n args = (sorted(obj),)\n except Exception: # TypeError, decimal.InvalidOperation, etc.\n from datasets.fingerprint import Hasher\n\n args = (sorted(obj, key=Hasher.hash),)\n\n pickler.save_reduce(set, args, obj=obj)\n log(pickler, "# Se")\n\n\ndef _save_regexPattern(pickler, obj):\n import regex # type: ignore\n\n log(pickler, f"Re: {obj}")\n args = (obj.pattern, obj.flags)\n pickler.save_reduce(regex.compile, args, obj=obj)\n log(pickler, "# Re")\n\n\ndef _save_tiktokenEncoding(pickler, obj):\n import tiktoken # type: ignore\n\n log(pickler, f"Enc: {obj}")\n args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens)\n pickler.save_reduce(tiktoken.Encoding, args, obj=obj)\n log(pickler, "# Enc")\n\n\ndef _save_torchTensor(pickler, obj):\n import torch # type: ignore\n\n # `torch.from_numpy` is not picklable in `torch>=1.11.0`\n def create_torchTensor(np_array, dtype=None):\n tensor = torch.from_numpy(np_array)\n if dtype:\n tensor = tensor.type(dtype)\n return tensor\n\n log(pickler, f"To: {obj}")\n if obj.dtype == torch.bfloat16:\n args = (obj.detach().to(torch.float).cpu().numpy(), torch.bfloat16)\n else:\n args = (obj.detach().cpu().numpy(),)\n pickler.save_reduce(create_torchTensor, args, obj=obj)\n log(pickler, "# To")\n\n\ndef _save_torchGenerator(pickler, obj):\n import torch # type: ignore\n\n def create_torchGenerator(state):\n generator = torch.Generator()\n generator.set_state(state)\n return generator\n\n log(pickler, f"Ge: {obj}")\n args = (obj.get_state(),)\n pickler.save_reduce(create_torchGenerator, args, obj=obj)\n log(pickler, "# Ge")\n\n\ndef _save_spacyLanguage(pickler, obj):\n import spacy # type: ignore\n\n def create_spacyLanguage(config, bytes):\n lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"])\n lang_inst = lang_cls.from_config(config)\n return lang_inst.from_bytes(bytes)\n\n log(pickler, f"Sp: {obj}")\n args = (obj.config, obj.to_bytes())\n pickler.save_reduce(create_spacyLanguage, args, obj=obj)\n log(pickler, "# Sp")\n\n\ndef _save_transformersPreTrainedTokenizerBase(pickler, obj):\n log(pickler, f"Tok: {obj}")\n # Ignore the `cache` attribute\n state = obj.__dict__\n if "cache" in state and isinstance(state["cache"], dict):\n state["cache"] = {}\n pickler.save_reduce(type(obj), (), state=state, obj=obj)\n log(pickler, "# Tok")\n\n\nif config.DILL_VERSION < version.parse("0.3.6"):\n\n @pklregister(CodeType)\n def _save_code(pickler, obj):\n """\n From dill._dill.save_code\n This is a modified version that removes the origin (filename + line no.)\n of functions created in notebooks or shells for example.\n """\n dill._dill.log.info(f"Co: {obj}")\n # The filename of a function is the .py file where it is defined.\n # Filenames of functions created in notebooks or shells start with '<'\n # ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell\n # Filenames of functions created in ipykernel the filename\n # look like f"{tempdir}/ipykernel_{id1}/{id2}.py"\n # Moreover lambda functions have a special name: '<lambda>'\n # ex: (lambda x: x).__code__.co_name == "<lambda>" # True\n #\n # For the hashing mechanism we ignore where the function has been defined\n # More specifically:\n # - we ignore the filename of special functions (filename starts with '<')\n # - we always ignore the line number\n # - we only use the base name of the file instead of the whole path,\n # to be robust in case a script is moved for example.\n #\n # Only those two lines are different from the original implementation:\n co_filename = (\n ""\n if obj.co_filename.startswith("<")\n or (\n len(obj.co_filename.split(os.path.sep)) > 1\n and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")\n )\n or obj.co_name == "<lambda>"\n else os.path.basename(obj.co_filename)\n )\n co_firstlineno = 1\n # The rest is the same as in the original dill implementation\n if dill._dill.PY3:\n if hasattr(obj, "co_posonlyargcount"):\n args = (\n obj.co_argcount,\n obj.co_posonlyargcount,\n obj.co_kwonlyargcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename,\n obj.co_name,\n co_firstlineno,\n obj.co_lnotab,\n obj.co_freevars,\n obj.co_cellvars,\n )\n else:\n args = (\n obj.co_argcount,\n obj.co_kwonlyargcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename,\n obj.co_name,\n co_firstlineno,\n obj.co_lnotab,\n obj.co_freevars,\n obj.co_cellvars,\n )\n else:\n args = (\n obj.co_argcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename,\n obj.co_name,\n co_firstlineno,\n obj.co_lnotab,\n obj.co_freevars,\n obj.co_cellvars,\n )\n pickler.save_reduce(CodeType, args, obj=obj)\n dill._dill.log.info("# Co")\n return\n\nelif config.DILL_VERSION.release[:3] in [\n version.parse("0.3.6").release,\n version.parse("0.3.7").release,\n version.parse("0.3.8").release,\n]:\n # From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104\n @pklregister(CodeType)\n def save_code(pickler, obj):\n dill._dill.logger.trace(pickler, "Co: %s", obj)\n\n ############################################################################################################\n # Modification here for huggingface/datasets\n # The filename of a function is the .py file where it is defined.\n # Filenames of functions created in notebooks or shells start with '<'\n # ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell\n # Filenames of functions created in ipykernel the filename\n # look like f"{tempdir}/ipykernel_{id1}/{id2}.py"\n # Moreover lambda functions have a special name: '<lambda>'\n # ex: (lambda x: x).__code__.co_name == "<lambda>" # True\n #\n # For the hashing mechanism we ignore where the function has been defined\n # More specifically:\n # - we ignore the filename of special functions (filename starts with '<')\n # - we always ignore the line number\n # - we only use the base name of the file instead of the whole path,\n # to be robust in case a script is moved for example.\n #\n # Only those two lines are different from the original implementation:\n co_filename = (\n ""\n if obj.co_filename.startswith("<")\n or (\n len(obj.co_filename.split(os.path.sep)) > 1\n and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")\n )\n or obj.co_name == "<lambda>"\n else os.path.basename(obj.co_filename)\n )\n co_firstlineno = 1\n # The rest is the same as in the original dill implementation, except for the replacements:\n # - obj.co_filename => co_filename\n # - obj.co_firstlineno => co_firstlineno\n ############################################################################################################\n\n if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args)\n args = (\n obj.co_lnotab, # for < python 3.10 [not counted in args]\n obj.co_argcount,\n obj.co_posonlyargcount,\n obj.co_kwonlyargcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename, # Modification for huggingface/datasets ############################################\n obj.co_name,\n obj.co_qualname,\n co_firstlineno, # Modification for huggingface/datasets #########################################\n obj.co_linetable,\n obj.co_endlinetable,\n obj.co_columntable,\n obj.co_exceptiontable,\n obj.co_freevars,\n obj.co_cellvars,\n )\n elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args)\n args = (\n obj.co_lnotab, # for < python 3.10 [not counted in args]\n obj.co_argcount,\n obj.co_posonlyargcount,\n obj.co_kwonlyargcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename, # Modification for huggingface/datasets ############################################\n obj.co_name,\n obj.co_qualname,\n co_firstlineno, # Modification for huggingface/datasets #########################################\n obj.co_linetable,\n obj.co_exceptiontable,\n obj.co_freevars,\n obj.co_cellvars,\n )\n elif hasattr(obj, "co_linetable"): # python 3.10 (16 args)\n args = (\n obj.co_lnotab, # for < python 3.10 [not counted in args]\n obj.co_argcount,\n obj.co_posonlyargcount,\n obj.co_kwonlyargcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename, # Modification for huggingface/datasets ############################################\n obj.co_name,\n co_firstlineno, # Modification for huggingface/datasets #########################################\n obj.co_linetable,\n obj.co_freevars,\n obj.co_cellvars,\n )\n elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args)\n args = (\n obj.co_argcount,\n obj.co_posonlyargcount,\n obj.co_kwonlyargcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename, # Modification for huggingface/datasets ############################################\n obj.co_name,\n co_firstlineno, # Modification for huggingface/datasets #########################################\n obj.co_lnotab,\n obj.co_freevars,\n obj.co_cellvars,\n )\n else: # python 3.7 (15 args)\n args = (\n obj.co_argcount,\n obj.co_kwonlyargcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename, # Modification for huggingface/datasets ############################################\n obj.co_name,\n co_firstlineno, # Modification for huggingface/datasets #########################################\n obj.co_lnotab,\n obj.co_freevars,\n obj.co_cellvars,\n )\n\n pickler.save_reduce(dill._dill._create_code, args, obj=obj)\n dill._dill.logger.trace(pickler, "# Co")\n return\n
|
.venv\Lib\site-packages\datasets\utils\_dill.py
|
_dill.py
|
Python
| 17,136 | 0.95 | 0.172043 | 0.15099 |
react-lib
| 165 |
2024-07-28T14:08:08.097214
|
GPL-3.0
| false |
a4cff26892e3441a6f7b15d6969fee6f
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom . import tqdm as _tqdm # _tqdm is the module\nfrom .experimental import experimental\nfrom .info_utils import VerificationMode\nfrom .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled\nfrom .tqdm import (\n are_progress_bars_disabled,\n disable_progress_bars,\n enable_progress_bars,\n tqdm,\n)\nfrom .version import Version\n
|
.venv\Lib\site-packages\datasets\utils\__init__.py
|
__init__.py
|
Python
| 999 | 0.95 | 0.04 | 0.541667 |
react-lib
| 357 |
2024-11-29T10:58:19.285600
|
GPL-3.0
| false |
5ca42946cb0fd4f45c59758a2babdb28
|
{\n "language": [\n "found",\n "crowdsourced",\n "expert-generated",\n "machine-generated",\n "other"\n ],\n "annotations": [\n "found",\n "crowdsourced",\n "expert-generated",\n "machine-generated",\n "no-annotation",\n "other"\n ]\n}\n
|
.venv\Lib\site-packages\datasets\utils\resources\creators.json
|
creators.json
|
JSON
| 257 | 0.7 | 0 | 0 |
node-utils
| 170 |
2024-03-09T08:43:00.482550
|
Apache-2.0
| false |
e7bc442ebd7062d9314aa2b751d67cae
|
{\n "monolingual": "contains a single language",\n "multilingual": "contains multiple languages",\n "translation": "contains translated or aligned text",\n "other": "other type of language distribution"\n}\n
|
.venv\Lib\site-packages\datasets\utils\resources\multilingualities.json
|
multilingualities.json
|
JSON
| 205 | 0.7 | 0 | 0 |
node-utils
| 888 |
2024-09-21T22:39:21.590377
|
Apache-2.0
| false |
f4c67c7a1c0c630672ea0bff0b72f15b
|
name: "" # Filename comes here\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null # meaning it should not be checked.\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Dataset Structure"\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Data Instances"\n allow_empty: false\n allow_empty_text: true\n subsections: null\n - name: "Data Fields"\n allow_empty: false\n allow_empty_text: true\n subsections: null\n - name: "Data Splits"\n allow_empty: false\n allow_empty_text: true\n subsections: null\n - name: "Dataset Creation"\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Curation Rationale"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Source Data"\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Initial Data Collection and Normalization"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Who are the source language producers?"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Annotations"\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Annotation process"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Who are the annotators?"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Personal and Sensitive Information"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Considerations for Using the Data"\n allow_empty: true\n allow_empty_text: true\n subsections:\n - name: "Social Impact of Dataset"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Discussion of Biases"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Other Known Limitations"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Additional Information"\n allow_empty: true\n allow_empty_text: true\n subsections:\n - name: "Dataset Curators"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Licensing Information"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: "Citation Information"\n allow_empty: false\n allow_empty_text: true\n subsections: null\n - name: "Contributions"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n
|
.venv\Lib\site-packages\datasets\utils\resources\readme_structure.yaml
|
readme_structure.yaml
|
YAML
| 3,877 | 0.8 | 0.017241 | 0 |
python-kit
| 280 |
2025-03-02T19:47:22.375332
|
Apache-2.0
| false |
9760c66299752d0d20a99b18ebd150ff
|
[\n "unknown",\n "n<1K",\n "1K<n<10K",\n "10K<n<100K",\n "100K<n<1M",\n "1M<n<10M",\n "10M<n<100M",\n "100M<n<1B",\n "1B<n<10B",\n "10B<n<100B",\n "100B<n<1T",\n "n>1T"\n]\n
|
.venv\Lib\site-packages\datasets\utils\resources\size_categories.json
|
size_categories.json
|
JSON
| 171 | 0.7 | 0 | 0 |
python-kit
| 733 |
2024-04-05T14:43:25.750949
|
GPL-3.0
| false |
31e4973e2def155cb0d39d0066fc7d46
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\resources\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 199 | 0.7 | 0 | 0 |
vue-tools
| 933 |
2023-11-29T19:25:35.113555
|
GPL-3.0
| false |
bb8ececf16e685ec86547d61dee8c622
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\deprecation_utils.cpython-313.pyc
|
deprecation_utils.cpython-313.pyc
|
Other
| 5,266 | 0.95 | 0.106383 | 0 |
python-kit
| 937 |
2024-07-28T22:40:22.189618
|
MIT
| false |
f4d87ee362fef226da458a79581d96a9
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\doc_utils.cpython-313.pyc
|
doc_utils.cpython-313.pyc
|
Other
| 763 | 0.85 | 0.125 | 0 |
react-lib
| 944 |
2024-10-03T07:11:42.303057
|
GPL-3.0
| false |
0bb7bb230e43c67dac68d8a8d4e4c514
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\experimental.cpython-313.pyc
|
experimental.cpython-313.pyc
|
Other
| 1,555 | 0.95 | 0.096774 | 0 |
vue-tools
| 122 |
2024-01-17T22:43:07.857893
|
BSD-3-Clause
| false |
76fc683d49b32a2e957487ff7cee6179
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\extract.cpython-313.pyc
|
extract.cpython-313.pyc
|
Other
| 21,492 | 0.95 | 0.004587 | 0.02439 |
vue-tools
| 62 |
2024-05-14T18:40:57.102829
|
GPL-3.0
| false |
16da48b17f2611555642b61f6767a931
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\filelock.cpython-313.pyc
|
filelock.cpython-313.pyc
|
Other
| 384 | 0.7 | 0 | 0 |
python-kit
| 168 |
2024-05-19T20:44:14.569260
|
GPL-3.0
| false |
5402bcbf69fed6a3db0f5349192da74b
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\file_utils.cpython-313.pyc
|
file_utils.cpython-313.pyc
|
Other
| 67,134 | 0.75 | 0.068613 | 0.013378 |
vue-tools
| 534 |
2025-02-09T20:34:11.164668
|
MIT
| false |
7a1a65b91fa9d9f921fba2d2cf8e772e
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\hub.cpython-313.pyc
|
hub.cpython-313.pyc
|
Other
| 876 | 0.7 | 0 | 0 |
node-utils
| 239 |
2024-03-06T10:15:49.214642
|
MIT
| false |
49f0d3a72bdae10ef1708f09b728a9bf
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\info_utils.cpython-313.pyc
|
info_utils.cpython-313.pyc
|
Other
| 6,086 | 0.8 | 0.039474 | 0 |
node-utils
| 779 |
2023-07-18T16:58:19.024516
|
Apache-2.0
| false |
2768decd066b7c28c72f074bfb6f44dd
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\logging.cpython-313.pyc
|
logging.cpython-313.pyc
|
Other
| 6,371 | 0.95 | 0.103896 | 0 |
vue-tools
| 934 |
2023-11-11T09:47:05.022731
|
MIT
| false |
4483e940a60197d1e2094105ee4e8e91
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\metadata.cpython-313.pyc
|
metadata.cpython-313.pyc
|
Other
| 11,751 | 0.8 | 0 | 0 |
python-kit
| 449 |
2023-07-23T06:34:36.290699
|
MIT
| false |
50a45468e1fff7a37040680d624c0847
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\patching.cpython-313.pyc
|
patching.cpython-313.pyc
|
Other
| 5,788 | 0.95 | 0 | 0 |
react-lib
| 558 |
2024-08-26T22:58:42.599669
|
Apache-2.0
| false |
d1ae49823c4e14188b8a8bb6164464a7
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\py_utils.cpython-313.pyc
|
py_utils.cpython-313.pyc
|
Other
| 37,658 | 0.95 | 0.080605 | 0.005571 |
python-kit
| 774 |
2024-01-23T15:06:50.107810
|
GPL-3.0
| false |
139935fc426e726c5f88ed355c1851fa
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\sharding.cpython-313.pyc
|
sharding.cpython-313.pyc
|
Other
| 5,473 | 0.8 | 0.027027 | 0 |
awesome-app
| 957 |
2024-04-04T08:05:56.109727
|
MIT
| false |
e84a68fd6ee106b324d33552db13293d
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\stratify.cpython-313.pyc
|
stratify.cpython-313.pyc
|
Other
| 4,422 | 0.95 | 0.070423 | 0 |
node-utils
| 444 |
2024-02-19T16:38:17.368345
|
MIT
| false |
3e5f9d545558e791bb154c47a0165586
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\tf_utils.cpython-313.pyc
|
tf_utils.cpython-313.pyc
|
Other
| 25,332 | 0.95 | 0.041494 | 0 |
python-kit
| 189 |
2025-01-04T08:31:57.453551
|
GPL-3.0
| false |
e42abe3ea2decda6906866df285b9421
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\tqdm.cpython-313.pyc
|
tqdm.cpython-313.pyc
|
Other
| 4,549 | 0.95 | 0.064516 | 0.064935 |
react-lib
| 690 |
2023-09-23T17:12:32.432312
|
GPL-3.0
| false |
f59e4f681010d35a345bd05220813e62
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\track.cpython-313.pyc
|
track.cpython-313.pyc
|
Other
| 4,352 | 0.95 | 0.083333 | 0 |
node-utils
| 794 |
2024-12-01T07:39:42.174180
|
BSD-3-Clause
| false |
142b339a571db001d6760751d0dc97fb
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\typing.cpython-313.pyc
|
typing.cpython-313.pyc
|
Other
| 547 | 0.7 | 0 | 0 |
node-utils
| 680 |
2023-11-29T00:49:42.706692
|
GPL-3.0
| false |
5f3c1a5704b84d5f1432dbe8d9362669
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\version.cpython-313.pyc
|
version.cpython-313.pyc
|
Other
| 5,477 | 0.8 | 0 | 0 |
vue-tools
| 901 |
2024-12-28T12:02:11.596016
|
MIT
| false |
af315c00a1c95bd46ab1ac48d96c94a6
|
\n\n
|
.venv\Lib\site-packages\datasets\utils\__pycache__\_dataset_viewer.cpython-313.pyc
|
_dataset_viewer.cpython-313.pyc
|
Other
| 5,071 | 0.8 | 0.074074 | 0 |
python-kit
| 804 |
2024-04-07T15:36:37.625106
|
BSD-3-Clause
| false |
b0526d9f07645cfe4443ff32d38ae566
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.