content
stringlengths 1
103k
⌀ | path
stringlengths 8
216
| filename
stringlengths 2
179
| language
stringclasses 15
values | size_bytes
int64 2
189k
| quality_score
float64 0.5
0.95
| complexity
float64 0
1
| documentation_ratio
float64 0
1
| repository
stringclasses 5
values | stars
int64 0
1k
| created_date
stringdate 2023-07-10 19:21:08
2025-07-09 19:11:45
| license
stringclasses 4
values | is_test
bool 2
classes | file_hash
stringlengths 32
32
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
GLOB_EDGE_CASES_TESTS = {\n "argnames": ("path", "recursive", "maxdepth", "expected"),\n "argvalues": [\n ("fil?1", False, None, ["file1"]),\n ("fil?1", True, None, ["file1"]),\n ("file[1-2]", False, None, ["file1", "file2"]),\n ("file[1-2]", True, None, ["file1", "file2"]),\n ("*", False, None, ["file1", "file2"]),\n (\n "*",\n True,\n None,\n [\n "file1",\n "file2",\n "subdir0/subfile1",\n "subdir0/subfile2",\n "subdir0/nesteddir/nestedfile",\n "subdir1/subfile1",\n "subdir1/subfile2",\n "subdir1/nesteddir/nestedfile",\n ],\n ),\n ("*", True, 1, ["file1", "file2"]),\n (\n "*",\n True,\n 2,\n [\n "file1",\n "file2",\n "subdir0/subfile1",\n "subdir0/subfile2",\n "subdir1/subfile1",\n "subdir1/subfile2",\n ],\n ),\n ("*1", False, None, ["file1"]),\n (\n "*1",\n True,\n None,\n [\n "file1",\n "subdir1/subfile1",\n "subdir1/subfile2",\n "subdir1/nesteddir/nestedfile",\n ],\n ),\n ("*1", True, 2, ["file1", "subdir1/subfile1", "subdir1/subfile2"]),\n (\n "**",\n False,\n None,\n [\n "file1",\n "file2",\n "subdir0/subfile1",\n "subdir0/subfile2",\n "subdir0/nesteddir/nestedfile",\n "subdir1/subfile1",\n "subdir1/subfile2",\n "subdir1/nesteddir/nestedfile",\n ],\n ),\n (\n "**",\n True,\n None,\n [\n "file1",\n "file2",\n "subdir0/subfile1",\n "subdir0/subfile2",\n "subdir0/nesteddir/nestedfile",\n "subdir1/subfile1",\n "subdir1/subfile2",\n "subdir1/nesteddir/nestedfile",\n ],\n ),\n ("**", True, 1, ["file1", "file2"]),\n (\n "**",\n True,\n 2,\n [\n "file1",\n "file2",\n "subdir0/subfile1",\n "subdir0/subfile2",\n "subdir0/nesteddir/nestedfile",\n "subdir1/subfile1",\n "subdir1/subfile2",\n "subdir1/nesteddir/nestedfile",\n ],\n ),\n (\n "**",\n False,\n 2,\n [\n "file1",\n "file2",\n "subdir0/subfile1",\n "subdir0/subfile2",\n "subdir1/subfile1",\n "subdir1/subfile2",\n ],\n ),\n ("**/*1", False, None, ["file1", "subdir0/subfile1", "subdir1/subfile1"]),\n (\n "**/*1",\n True,\n None,\n [\n "file1",\n "subdir0/subfile1",\n "subdir1/subfile1",\n "subdir1/subfile2",\n "subdir1/nesteddir/nestedfile",\n ],\n ),\n ("**/*1", True, 1, ["file1"]),\n (\n "**/*1",\n True,\n 2,\n ["file1", "subdir0/subfile1", "subdir1/subfile1", "subdir1/subfile2"],\n ),\n ("**/*1", False, 2, ["file1", "subdir0/subfile1", "subdir1/subfile1"]),\n ("**/subdir0", False, None, []),\n ("**/subdir0", True, None, ["subfile1", "subfile2", "nesteddir/nestedfile"]),\n ("**/subdir0/nested*", False, 2, []),\n ("**/subdir0/nested*", True, 2, ["nestedfile"]),\n ("subdir[1-2]", False, None, []),\n ("subdir[1-2]", True, None, ["subfile1", "subfile2", "nesteddir/nestedfile"]),\n ("subdir[1-2]", True, 2, ["subfile1", "subfile2"]),\n ("subdir[0-1]", False, None, []),\n (\n "subdir[0-1]",\n True,\n None,\n [\n "subdir0/subfile1",\n "subdir0/subfile2",\n "subdir0/nesteddir/nestedfile",\n "subdir1/subfile1",\n "subdir1/subfile2",\n "subdir1/nesteddir/nestedfile",\n ],\n ),\n (\n "subdir[0-1]/*fil[e]*",\n False,\n None,\n [\n "subdir0/subfile1",\n "subdir0/subfile2",\n "subdir1/subfile1",\n "subdir1/subfile2",\n ],\n ),\n (\n "subdir[0-1]/*fil[e]*",\n True,\n None,\n [\n "subdir0/subfile1",\n "subdir0/subfile2",\n "subdir1/subfile1",\n "subdir1/subfile2",\n ],\n ),\n ],\n}\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\common.py
|
common.py
|
Python
| 4,973 | 0.8 | 0 | 0 |
node-utils
| 621 |
2023-10-06T16:19:25.002219
|
Apache-2.0
| true |
a142d111eeb64e2d8a65382629be6e18
|
from hashlib import md5\nfrom itertools import product\n\nimport pytest\n\nfrom fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS\n\n\nclass AbstractCopyTests:\n def test_copy_file_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n fs_target,\n supports_empty_directories,\n ):\n # Copy scenario 1a\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n if not supports_empty_directories:\n # Force target directory to exist by adding a dummy file\n fs.touch(fs_join(target, "dummy"))\n assert fs.isdir(target)\n\n target_file2 = fs_join(target, "file2")\n target_subfile1 = fs_join(target, "subfile1")\n\n # Copy from source directory\n fs.cp(fs_join(source, "file2"), target)\n assert fs.isfile(target_file2)\n\n # Copy from sub directory\n fs.cp(fs_join(source, "subdir", "subfile1"), target)\n assert fs.isfile(target_subfile1)\n\n # Remove copied files\n fs.rm([target_file2, target_subfile1])\n assert not fs.exists(target_file2)\n assert not fs.exists(target_subfile1)\n\n # Repeat with trailing slash on target\n fs.cp(fs_join(source, "file2"), target + "/")\n assert fs.isdir(target)\n assert fs.isfile(target_file2)\n\n fs.cp(fs_join(source, "subdir", "subfile1"), target + "/")\n assert fs.isfile(target_subfile1)\n\n def test_copy_file_to_new_directory(\n self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target\n ):\n # Copy scenario 1b\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n\n fs.cp(\n fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir/")\n ) # Note trailing slash\n assert fs.isdir(target)\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n\n def test_copy_file_to_file_in_existing_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n fs_target,\n supports_empty_directories,\n ):\n # Copy scenario 1c\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n if not supports_empty_directories:\n # Force target directory to exist by adding a dummy file\n fs.touch(fs_join(target, "dummy"))\n assert fs.isdir(target)\n\n fs.cp(fs_join(source, "subdir", "subfile1"), fs_join(target, "newfile"))\n assert fs.isfile(fs_join(target, "newfile"))\n\n def test_copy_file_to_file_in_new_directory(\n self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target\n ):\n # Copy scenario 1d\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n\n fs.cp(\n fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir", "newfile")\n )\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "newfile"))\n\n def test_copy_directory_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n fs_target,\n supports_empty_directories,\n ):\n # Copy scenario 1e\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n if not supports_empty_directories:\n # Force target directory to exist by adding a dummy file\n dummy = fs_join(target, "dummy")\n fs.touch(dummy)\n assert fs.isdir(target)\n\n for source_slash, target_slash in zip([False, True], [False, True]):\n s = fs_join(source, "subdir")\n if source_slash:\n s += "/"\n t = target + "/" if target_slash else target\n\n # Without recursive does nothing\n fs.cp(s, t)\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n # With recursive\n fs.cp(s, t, recursive=True)\n if source_slash:\n assert fs.isfile(fs_join(target, "subfile1"))\n assert fs.isfile(fs_join(target, "subfile2"))\n assert fs.isdir(fs_join(target, "nesteddir"))\n assert fs.isfile(fs_join(target, "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(\n [\n fs_join(target, "subfile1"),\n fs_join(target, "subfile2"),\n fs_join(target, "nesteddir"),\n ],\n recursive=True,\n )\n else:\n assert fs.isdir(fs_join(target, "subdir"))\n assert fs.isfile(fs_join(target, "subdir", "subfile1"))\n assert fs.isfile(fs_join(target, "subdir", "subfile2"))\n assert fs.isdir(fs_join(target, "subdir", "nesteddir"))\n assert fs.isfile(fs_join(target, "subdir", "nesteddir", "nestedfile"))\n\n fs.rm(fs_join(target, "subdir"), recursive=True)\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n # Limit recursive by maxdepth\n fs.cp(s, t, recursive=True, maxdepth=1)\n if source_slash:\n assert fs.isfile(fs_join(target, "subfile1"))\n assert fs.isfile(fs_join(target, "subfile2"))\n assert not fs.exists(fs_join(target, "nesteddir"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(\n [\n fs_join(target, "subfile1"),\n fs_join(target, "subfile2"),\n ],\n recursive=True,\n )\n else:\n assert fs.isdir(fs_join(target, "subdir"))\n assert fs.isfile(fs_join(target, "subdir", "subfile1"))\n assert fs.isfile(fs_join(target, "subdir", "subfile2"))\n assert not fs.exists(fs_join(target, "subdir", "nesteddir"))\n\n fs.rm(fs_join(target, "subdir"), recursive=True)\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n def test_copy_directory_to_new_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n fs_target,\n supports_empty_directories,\n ):\n # Copy scenario 1f\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n\n for source_slash, target_slash in zip([False, True], [False, True]):\n s = fs_join(source, "subdir")\n if source_slash:\n s += "/"\n t = fs_join(target, "newdir")\n if target_slash:\n t += "/"\n\n # Without recursive does nothing\n fs.cp(s, t)\n if supports_empty_directories:\n assert fs.ls(target) == []\n else:\n with pytest.raises(FileNotFoundError):\n fs.ls(target)\n\n # With recursive\n fs.cp(s, t, recursive=True)\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n assert fs.isfile(fs_join(target, "newdir", "subfile2"))\n assert fs.isdir(fs_join(target, "newdir", "nesteddir"))\n assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(fs_join(target, "newdir"), recursive=True)\n assert not fs.exists(fs_join(target, "newdir"))\n\n # Limit recursive by maxdepth\n fs.cp(s, t, recursive=True, maxdepth=1)\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n assert fs.isfile(fs_join(target, "newdir", "subfile2"))\n assert not fs.exists(fs_join(target, "newdir", "nesteddir"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(fs_join(target, "newdir"), recursive=True)\n assert not fs.exists(fs_join(target, "newdir"))\n\n def test_copy_glob_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n fs_target,\n supports_empty_directories,\n ):\n # Copy scenario 1g\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n if not supports_empty_directories:\n # Force target directory to exist by adding a dummy file\n dummy = fs_join(target, "dummy")\n fs.touch(dummy)\n assert fs.isdir(target)\n\n for target_slash in [False, True]:\n t = target + "/" if target_slash else target\n\n # Without recursive\n fs.cp(fs_join(source, "subdir", "*"), t)\n assert fs.isfile(fs_join(target, "subfile1"))\n assert fs.isfile(fs_join(target, "subfile2"))\n assert not fs.isdir(fs_join(target, "nesteddir"))\n assert not fs.exists(fs_join(target, "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(\n [\n fs_join(target, "subfile1"),\n fs_join(target, "subfile2"),\n ],\n recursive=True,\n )\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n # With recursive\n for glob, recursive in zip(["*", "**"], [True, False]):\n fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive)\n assert fs.isfile(fs_join(target, "subfile1"))\n assert fs.isfile(fs_join(target, "subfile2"))\n assert fs.isdir(fs_join(target, "nesteddir"))\n assert fs.isfile(fs_join(target, "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(\n [\n fs_join(target, "subfile1"),\n fs_join(target, "subfile2"),\n fs_join(target, "nesteddir"),\n ],\n recursive=True,\n )\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n # Limit recursive by maxdepth\n fs.cp(\n fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1\n )\n assert fs.isfile(fs_join(target, "subfile1"))\n assert fs.isfile(fs_join(target, "subfile2"))\n assert not fs.exists(fs_join(target, "nesteddir"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(\n [\n fs_join(target, "subfile1"),\n fs_join(target, "subfile2"),\n ],\n recursive=True,\n )\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n def test_copy_glob_to_new_directory(\n self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target\n ):\n # Copy scenario 1h\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n\n for target_slash in [False, True]:\n t = fs_join(target, "newdir")\n if target_slash:\n t += "/"\n\n # Without recursive\n fs.cp(fs_join(source, "subdir", "*"), t)\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n assert fs.isfile(fs_join(target, "newdir", "subfile2"))\n assert not fs.exists(fs_join(target, "newdir", "nesteddir"))\n assert not fs.exists(fs_join(target, "newdir", "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n assert not fs.exists(fs_join(target, "newdir", "subdir"))\n\n fs.rm(fs_join(target, "newdir"), recursive=True)\n assert not fs.exists(fs_join(target, "newdir"))\n\n # With recursive\n for glob, recursive in zip(["*", "**"], [True, False]):\n fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive)\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n assert fs.isfile(fs_join(target, "newdir", "subfile2"))\n assert fs.isdir(fs_join(target, "newdir", "nesteddir"))\n assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n assert not fs.exists(fs_join(target, "newdir", "subdir"))\n\n fs.rm(fs_join(target, "newdir"), recursive=True)\n assert not fs.exists(fs_join(target, "newdir"))\n\n # Limit recursive by maxdepth\n fs.cp(\n fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1\n )\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n assert fs.isfile(fs_join(target, "newdir", "subfile2"))\n assert not fs.exists(fs_join(target, "newdir", "nesteddir"))\n assert not fs.exists(fs_join(target, "subdir"))\n assert not fs.exists(fs_join(target, "newdir", "subdir"))\n\n fs.rm(fs_join(target, "newdir"), recursive=True)\n assert not fs.exists(fs_join(target, "newdir"))\n\n @pytest.mark.parametrize(\n GLOB_EDGE_CASES_TESTS["argnames"],\n GLOB_EDGE_CASES_TESTS["argvalues"],\n )\n def test_copy_glob_edge_cases(\n self,\n path,\n recursive,\n maxdepth,\n expected,\n fs,\n fs_join,\n fs_glob_edge_cases_files,\n fs_target,\n fs_sanitize_path,\n ):\n # Copy scenario 1g\n source = fs_glob_edge_cases_files\n\n target = fs_target\n\n for new_dir, target_slash in product([True, False], [True, False]):\n fs.mkdir(target)\n\n t = fs_join(target, "newdir") if new_dir else target\n t = t + "/" if target_slash else t\n\n fs.copy(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth)\n\n output = fs.find(target)\n if new_dir:\n prefixed_expected = [\n fs_sanitize_path(fs_join(target, "newdir", p)) for p in expected\n ]\n else:\n prefixed_expected = [\n fs_sanitize_path(fs_join(target, p)) for p in expected\n ]\n assert sorted(output) == sorted(prefixed_expected)\n\n try:\n fs.rm(target, recursive=True)\n except FileNotFoundError:\n pass\n\n def test_copy_list_of_files_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n fs_target,\n supports_empty_directories,\n ):\n # Copy scenario 2a\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n if not supports_empty_directories:\n # Force target directory to exist by adding a dummy file\n dummy = fs_join(target, "dummy")\n fs.touch(dummy)\n assert fs.isdir(target)\n\n source_files = [\n fs_join(source, "file1"),\n fs_join(source, "file2"),\n fs_join(source, "subdir", "subfile1"),\n ]\n\n for target_slash in [False, True]:\n t = target + "/" if target_slash else target\n\n fs.cp(source_files, t)\n assert fs.isfile(fs_join(target, "file1"))\n assert fs.isfile(fs_join(target, "file2"))\n assert fs.isfile(fs_join(target, "subfile1"))\n\n fs.rm(\n [\n fs_join(target, "file1"),\n fs_join(target, "file2"),\n fs_join(target, "subfile1"),\n ],\n recursive=True,\n )\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n def test_copy_list_of_files_to_new_directory(\n self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target\n ):\n # Copy scenario 2b\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n\n source_files = [\n fs_join(source, "file1"),\n fs_join(source, "file2"),\n fs_join(source, "subdir", "subfile1"),\n ]\n\n fs.cp(source_files, fs_join(target, "newdir") + "/") # Note trailing slash\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "file1"))\n assert fs.isfile(fs_join(target, "newdir", "file2"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n\n def test_copy_two_files_new_directory(\n self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target\n ):\n # This is a duplicate of test_copy_list_of_files_to_new_directory and\n # can eventually be removed.\n source = fs_bulk_operations_scenario_0\n\n target = fs_target\n assert not fs.exists(target)\n fs.cp([fs_join(source, "file1"), fs_join(source, "file2")], target)\n\n assert fs.isdir(target)\n assert fs.isfile(fs_join(target, "file1"))\n assert fs.isfile(fs_join(target, "file2"))\n\n def test_copy_directory_without_files_with_same_name_prefix(\n self,\n fs,\n fs_join,\n fs_target,\n fs_dir_and_file_with_same_name_prefix,\n supports_empty_directories,\n ):\n # Create the test dirs\n source = fs_dir_and_file_with_same_name_prefix\n target = fs_target\n\n # Test without glob\n fs.cp(fs_join(source, "subdir"), target, recursive=True)\n\n assert fs.isfile(fs_join(target, "subfile.txt"))\n assert not fs.isfile(fs_join(target, "subdir.txt"))\n\n fs.rm([fs_join(target, "subfile.txt")])\n if supports_empty_directories:\n assert fs.ls(target) == []\n else:\n assert not fs.exists(target)\n\n # Test with glob\n fs.cp(fs_join(source, "subdir*"), target, recursive=True)\n\n assert fs.isdir(fs_join(target, "subdir"))\n assert fs.isfile(fs_join(target, "subdir", "subfile.txt"))\n assert fs.isfile(fs_join(target, "subdir.txt"))\n\n def test_copy_with_source_and_destination_as_list(\n self, fs, fs_target, fs_join, fs_10_files_with_hashed_names\n ):\n # Create the test dir\n source = fs_10_files_with_hashed_names\n target = fs_target\n\n # Create list of files for source and destination\n source_files = []\n destination_files = []\n for i in range(10):\n hashed_i = md5(str(i).encode("utf-8")).hexdigest()\n source_files.append(fs_join(source, f"{hashed_i}.txt"))\n destination_files.append(fs_join(target, f"{hashed_i}.txt"))\n\n # Copy and assert order was kept\n fs.copy(path1=source_files, path2=destination_files)\n\n for i in range(10):\n file_content = fs.cat(destination_files[i]).decode("utf-8")\n assert file_content == str(i)\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\copy.py
|
copy.py
|
Python
| 19,967 | 0.95 | 0.098743 | 0.084926 |
python-kit
| 798 |
2024-02-29T07:31:21.998464
|
GPL-3.0
| true |
2430cdc4047e6270b5b51796fd64b7bb
|
from hashlib import md5\nfrom itertools import product\n\nimport pytest\n\nfrom fsspec.implementations.local import make_path_posix\nfrom fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS\n\n\nclass AbstractGetTests:\n def test_get_file_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 1a\n source = fs_bulk_operations_scenario_0\n\n target = local_target\n local_fs.mkdir(target)\n assert local_fs.isdir(target)\n\n target_file2 = local_join(target, "file2")\n target_subfile1 = local_join(target, "subfile1")\n\n # Copy from source directory\n fs.get(fs_join(source, "file2"), target)\n assert local_fs.isfile(target_file2)\n\n # Copy from sub directory\n fs.get(fs_join(source, "subdir", "subfile1"), target)\n assert local_fs.isfile(target_subfile1)\n\n # Remove copied files\n local_fs.rm([target_file2, target_subfile1])\n assert not local_fs.exists(target_file2)\n assert not local_fs.exists(target_subfile1)\n\n # Repeat with trailing slash on target\n fs.get(fs_join(source, "file2"), target + "/")\n assert local_fs.isdir(target)\n assert local_fs.isfile(target_file2)\n\n fs.get(fs_join(source, "subdir", "subfile1"), target + "/")\n assert local_fs.isfile(target_subfile1)\n\n def test_get_file_to_new_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 1b\n source = fs_bulk_operations_scenario_0\n\n target = local_target\n local_fs.mkdir(target)\n\n fs.get(\n fs_join(source, "subdir", "subfile1"), local_join(target, "newdir/")\n ) # Note trailing slash\n\n assert local_fs.isdir(target)\n assert local_fs.isdir(local_join(target, "newdir"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile1"))\n\n def test_get_file_to_file_in_existing_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 1c\n source = fs_bulk_operations_scenario_0\n\n target = local_target\n local_fs.mkdir(target)\n\n fs.get(fs_join(source, "subdir", "subfile1"), local_join(target, "newfile"))\n assert local_fs.isfile(local_join(target, "newfile"))\n\n def test_get_file_to_file_in_new_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 1d\n source = fs_bulk_operations_scenario_0\n\n target = local_target\n local_fs.mkdir(target)\n\n fs.get(\n fs_join(source, "subdir", "subfile1"),\n local_join(target, "newdir", "newfile"),\n )\n assert local_fs.isdir(local_join(target, "newdir"))\n assert local_fs.isfile(local_join(target, "newdir", "newfile"))\n\n def test_get_directory_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 1e\n source = fs_bulk_operations_scenario_0\n\n target = local_target\n local_fs.mkdir(target)\n assert local_fs.isdir(target)\n\n for source_slash, target_slash in zip([False, True], [False, True]):\n s = fs_join(source, "subdir")\n if source_slash:\n s += "/"\n t = target + "/" if target_slash else target\n\n # Without recursive does nothing\n fs.get(s, t)\n assert local_fs.ls(target) == []\n\n # With recursive\n fs.get(s, t, recursive=True)\n if source_slash:\n assert local_fs.isfile(local_join(target, "subfile1"))\n assert local_fs.isfile(local_join(target, "subfile2"))\n assert local_fs.isdir(local_join(target, "nesteddir"))\n assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile"))\n assert not local_fs.exists(local_join(target, "subdir"))\n\n local_fs.rm(\n [\n local_join(target, "subfile1"),\n local_join(target, "subfile2"),\n local_join(target, "nesteddir"),\n ],\n recursive=True,\n )\n else:\n assert local_fs.isdir(local_join(target, "subdir"))\n assert local_fs.isfile(local_join(target, "subdir", "subfile1"))\n assert local_fs.isfile(local_join(target, "subdir", "subfile2"))\n assert local_fs.isdir(local_join(target, "subdir", "nesteddir"))\n assert local_fs.isfile(\n local_join(target, "subdir", "nesteddir", "nestedfile")\n )\n\n local_fs.rm(local_join(target, "subdir"), recursive=True)\n assert local_fs.ls(target) == []\n\n # Limit recursive by maxdepth\n fs.get(s, t, recursive=True, maxdepth=1)\n if source_slash:\n assert local_fs.isfile(local_join(target, "subfile1"))\n assert local_fs.isfile(local_join(target, "subfile2"))\n assert not local_fs.exists(local_join(target, "nesteddir"))\n assert not local_fs.exists(local_join(target, "subdir"))\n\n local_fs.rm(\n [\n local_join(target, "subfile1"),\n local_join(target, "subfile2"),\n ],\n recursive=True,\n )\n else:\n assert local_fs.isdir(local_join(target, "subdir"))\n assert local_fs.isfile(local_join(target, "subdir", "subfile1"))\n assert local_fs.isfile(local_join(target, "subdir", "subfile2"))\n assert not local_fs.exists(local_join(target, "subdir", "nesteddir"))\n\n local_fs.rm(local_join(target, "subdir"), recursive=True)\n assert local_fs.ls(target) == []\n\n def test_get_directory_to_new_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 1f\n source = fs_bulk_operations_scenario_0\n\n target = local_target\n local_fs.mkdir(target)\n\n for source_slash, target_slash in zip([False, True], [False, True]):\n s = fs_join(source, "subdir")\n if source_slash:\n s += "/"\n t = local_join(target, "newdir")\n if target_slash:\n t += "/"\n\n # Without recursive does nothing\n fs.get(s, t)\n assert local_fs.ls(target) == []\n\n # With recursive\n fs.get(s, t, recursive=True)\n assert local_fs.isdir(local_join(target, "newdir"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile1"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile2"))\n assert local_fs.isdir(local_join(target, "newdir", "nesteddir"))\n assert local_fs.isfile(\n local_join(target, "newdir", "nesteddir", "nestedfile")\n )\n assert not local_fs.exists(local_join(target, "subdir"))\n\n local_fs.rm(local_join(target, "newdir"), recursive=True)\n assert local_fs.ls(target) == []\n\n # Limit recursive by maxdepth\n fs.get(s, t, recursive=True, maxdepth=1)\n assert local_fs.isdir(local_join(target, "newdir"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile1"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile2"))\n assert not local_fs.exists(local_join(target, "newdir", "nesteddir"))\n assert not local_fs.exists(local_join(target, "subdir"))\n\n local_fs.rm(local_join(target, "newdir"), recursive=True)\n assert not local_fs.exists(local_join(target, "newdir"))\n\n def test_get_glob_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 1g\n source = fs_bulk_operations_scenario_0\n\n target = local_target\n local_fs.mkdir(target)\n\n for target_slash in [False, True]:\n t = target + "/" if target_slash else target\n\n # Without recursive\n fs.get(fs_join(source, "subdir", "*"), t)\n assert local_fs.isfile(local_join(target, "subfile1"))\n assert local_fs.isfile(local_join(target, "subfile2"))\n assert not local_fs.isdir(local_join(target, "nesteddir"))\n assert not local_fs.exists(local_join(target, "nesteddir", "nestedfile"))\n assert not local_fs.exists(local_join(target, "subdir"))\n\n local_fs.rm(\n [\n local_join(target, "subfile1"),\n local_join(target, "subfile2"),\n ],\n recursive=True,\n )\n assert local_fs.ls(target) == []\n\n # With recursive\n for glob, recursive in zip(["*", "**"], [True, False]):\n fs.get(fs_join(source, "subdir", glob), t, recursive=recursive)\n assert local_fs.isfile(local_join(target, "subfile1"))\n assert local_fs.isfile(local_join(target, "subfile2"))\n assert local_fs.isdir(local_join(target, "nesteddir"))\n assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile"))\n assert not local_fs.exists(local_join(target, "subdir"))\n\n local_fs.rm(\n [\n local_join(target, "subfile1"),\n local_join(target, "subfile2"),\n local_join(target, "nesteddir"),\n ],\n recursive=True,\n )\n assert local_fs.ls(target) == []\n\n # Limit recursive by maxdepth\n fs.get(\n fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1\n )\n assert local_fs.isfile(local_join(target, "subfile1"))\n assert local_fs.isfile(local_join(target, "subfile2"))\n assert not local_fs.exists(local_join(target, "nesteddir"))\n assert not local_fs.exists(local_join(target, "subdir"))\n\n local_fs.rm(\n [\n local_join(target, "subfile1"),\n local_join(target, "subfile2"),\n ],\n recursive=True,\n )\n assert local_fs.ls(target) == []\n\n def test_get_glob_to_new_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 1h\n source = fs_bulk_operations_scenario_0\n\n target = local_target\n local_fs.mkdir(target)\n\n for target_slash in [False, True]:\n t = fs_join(target, "newdir")\n if target_slash:\n t += "/"\n\n # Without recursive\n fs.get(fs_join(source, "subdir", "*"), t)\n assert local_fs.isdir(local_join(target, "newdir"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile1"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile2"))\n assert not local_fs.exists(local_join(target, "newdir", "nesteddir"))\n assert not local_fs.exists(\n local_join(target, "newdir", "nesteddir", "nestedfile")\n )\n assert not local_fs.exists(local_join(target, "subdir"))\n assert not local_fs.exists(local_join(target, "newdir", "subdir"))\n\n local_fs.rm(local_join(target, "newdir"), recursive=True)\n assert local_fs.ls(target) == []\n\n # With recursive\n for glob, recursive in zip(["*", "**"], [True, False]):\n fs.get(fs_join(source, "subdir", glob), t, recursive=recursive)\n assert local_fs.isdir(local_join(target, "newdir"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile1"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile2"))\n assert local_fs.isdir(local_join(target, "newdir", "nesteddir"))\n assert local_fs.isfile(\n local_join(target, "newdir", "nesteddir", "nestedfile")\n )\n assert not local_fs.exists(local_join(target, "subdir"))\n assert not local_fs.exists(local_join(target, "newdir", "subdir"))\n\n local_fs.rm(local_join(target, "newdir"), recursive=True)\n assert not local_fs.exists(local_join(target, "newdir"))\n\n # Limit recursive by maxdepth\n fs.get(\n fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1\n )\n assert local_fs.isdir(local_join(target, "newdir"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile1"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile2"))\n assert not local_fs.exists(local_join(target, "newdir", "nesteddir"))\n assert not local_fs.exists(local_join(target, "subdir"))\n assert not local_fs.exists(local_join(target, "newdir", "subdir"))\n\n local_fs.rm(local_fs.ls(target, detail=False), recursive=True)\n assert not local_fs.exists(local_join(target, "newdir"))\n\n @pytest.mark.parametrize(\n GLOB_EDGE_CASES_TESTS["argnames"],\n GLOB_EDGE_CASES_TESTS["argvalues"],\n )\n def test_get_glob_edge_cases(\n self,\n path,\n recursive,\n maxdepth,\n expected,\n fs,\n fs_join,\n fs_glob_edge_cases_files,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 1g\n source = fs_glob_edge_cases_files\n\n target = local_target\n\n for new_dir, target_slash in product([True, False], [True, False]):\n local_fs.mkdir(target)\n\n t = local_join(target, "newdir") if new_dir else target\n t = t + "/" if target_slash else t\n\n fs.get(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth)\n\n output = local_fs.find(target)\n if new_dir:\n prefixed_expected = [\n make_path_posix(local_join(target, "newdir", p)) for p in expected\n ]\n else:\n prefixed_expected = [\n make_path_posix(local_join(target, p)) for p in expected\n ]\n assert sorted(output) == sorted(prefixed_expected)\n\n try:\n local_fs.rm(target, recursive=True)\n except FileNotFoundError:\n pass\n\n def test_get_list_of_files_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 2a\n source = fs_bulk_operations_scenario_0\n\n target = local_target\n local_fs.mkdir(target)\n\n source_files = [\n fs_join(source, "file1"),\n fs_join(source, "file2"),\n fs_join(source, "subdir", "subfile1"),\n ]\n\n for target_slash in [False, True]:\n t = target + "/" if target_slash else target\n\n fs.get(source_files, t)\n assert local_fs.isfile(local_join(target, "file1"))\n assert local_fs.isfile(local_join(target, "file2"))\n assert local_fs.isfile(local_join(target, "subfile1"))\n\n local_fs.rm(\n [\n local_join(target, "file1"),\n local_join(target, "file2"),\n local_join(target, "subfile1"),\n ],\n recursive=True,\n )\n assert local_fs.ls(target) == []\n\n def test_get_list_of_files_to_new_directory(\n self,\n fs,\n fs_join,\n fs_bulk_operations_scenario_0,\n local_fs,\n local_join,\n local_target,\n ):\n # Copy scenario 2b\n source = fs_bulk_operations_scenario_0\n\n target = local_target\n local_fs.mkdir(target)\n\n source_files = [\n fs_join(source, "file1"),\n fs_join(source, "file2"),\n fs_join(source, "subdir", "subfile1"),\n ]\n\n fs.get(source_files, local_join(target, "newdir") + "/") # Note trailing slash\n assert local_fs.isdir(local_join(target, "newdir"))\n assert local_fs.isfile(local_join(target, "newdir", "file1"))\n assert local_fs.isfile(local_join(target, "newdir", "file2"))\n assert local_fs.isfile(local_join(target, "newdir", "subfile1"))\n\n def test_get_directory_recursive(\n self, fs, fs_join, fs_path, local_fs, local_join, local_target\n ):\n # https://github.com/fsspec/filesystem_spec/issues/1062\n # Recursive cp/get/put of source directory into non-existent target directory.\n src = fs_join(fs_path, "src")\n src_file = fs_join(src, "file")\n fs.mkdir(src)\n fs.touch(src_file)\n\n target = local_target\n\n # get without slash\n assert not local_fs.exists(target)\n for loop in range(2):\n fs.get(src, target, recursive=True)\n assert local_fs.isdir(target)\n\n if loop == 0:\n assert local_fs.isfile(local_join(target, "file"))\n assert not local_fs.exists(local_join(target, "src"))\n else:\n assert local_fs.isfile(local_join(target, "file"))\n assert local_fs.isdir(local_join(target, "src"))\n assert local_fs.isfile(local_join(target, "src", "file"))\n\n local_fs.rm(target, recursive=True)\n\n # get with slash\n assert not local_fs.exists(target)\n for loop in range(2):\n fs.get(src + "/", target, recursive=True)\n assert local_fs.isdir(target)\n assert local_fs.isfile(local_join(target, "file"))\n assert not local_fs.exists(local_join(target, "src"))\n\n def test_get_directory_without_files_with_same_name_prefix(\n self,\n fs,\n fs_join,\n local_fs,\n local_join,\n local_target,\n fs_dir_and_file_with_same_name_prefix,\n ):\n # Create the test dirs\n source = fs_dir_and_file_with_same_name_prefix\n target = local_target\n\n # Test without glob\n fs.get(fs_join(source, "subdir"), target, recursive=True)\n\n assert local_fs.isfile(local_join(target, "subfile.txt"))\n assert not local_fs.isfile(local_join(target, "subdir.txt"))\n\n local_fs.rm([local_join(target, "subfile.txt")])\n assert local_fs.ls(target) == []\n\n # Test with glob\n fs.get(fs_join(source, "subdir*"), target, recursive=True)\n\n assert local_fs.isdir(local_join(target, "subdir"))\n assert local_fs.isfile(local_join(target, "subdir", "subfile.txt"))\n assert local_fs.isfile(local_join(target, "subdir.txt"))\n\n def test_get_with_source_and_destination_as_list(\n self,\n fs,\n fs_join,\n local_fs,\n local_join,\n local_target,\n fs_10_files_with_hashed_names,\n ):\n # Create the test dir\n source = fs_10_files_with_hashed_names\n target = local_target\n\n # Create list of files for source and destination\n source_files = []\n destination_files = []\n for i in range(10):\n hashed_i = md5(str(i).encode("utf-8")).hexdigest()\n source_files.append(fs_join(source, f"{hashed_i}.txt"))\n destination_files.append(\n make_path_posix(local_join(target, f"{hashed_i}.txt"))\n )\n\n # Copy and assert order was kept\n fs.get(rpath=source_files, lpath=destination_files)\n\n for i in range(10):\n file_content = local_fs.cat(destination_files[i]).decode("utf-8")\n assert file_content == str(i)\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\get.py
|
get.py
|
Python
| 20,755 | 0.95 | 0.074957 | 0.074447 |
react-lib
| 710 |
2024-06-11T03:36:20.698356
|
BSD-3-Clause
| true |
ae1e4e15ba9487848d131f4ebef16ccd
|
import pytest\n\n\nclass AbstractOpenTests:\n def test_open_exclusive(self, fs, fs_target):\n with fs.open(fs_target, "wb") as f:\n f.write(b"data")\n with fs.open(fs_target, "rb") as f:\n assert f.read() == b"data"\n with pytest.raises(FileExistsError):\n fs.open(fs_target, "xb")\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\open.py
|
open.py
|
Python
| 329 | 0.85 | 0.181818 | 0 |
node-utils
| 706 |
2024-08-24T00:27:49.223414
|
Apache-2.0
| true |
b139f16692bea90141ca6f99ca3fc8e2
|
import pytest\n\n\nclass AbstractPipeTests:\n def test_pipe_exclusive(self, fs, fs_target):\n fs.pipe_file(fs_target, b"data")\n assert fs.cat_file(fs_target) == b"data"\n with pytest.raises(FileExistsError):\n fs.pipe_file(fs_target, b"data", mode="create")\n fs.pipe_file(fs_target, b"new data", mode="overwrite")\n assert fs.cat_file(fs_target) == b"new data"\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\pipe.py
|
pipe.py
|
Python
| 402 | 0.85 | 0.181818 | 0 |
python-kit
| 576 |
2025-05-29T12:13:38.543115
|
GPL-3.0
| true |
351a492c5455f0a8f3d8b7f238a0ff94
|
from hashlib import md5\nfrom itertools import product\n\nimport pytest\n\nfrom fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS\n\n\nclass AbstractPutTests:\n def test_put_file_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_target,\n local_join,\n local_bulk_operations_scenario_0,\n supports_empty_directories,\n ):\n # Copy scenario 1a\n source = local_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n if not supports_empty_directories:\n # Force target directory to exist by adding a dummy file\n fs.touch(fs_join(target, "dummy"))\n assert fs.isdir(target)\n\n target_file2 = fs_join(target, "file2")\n target_subfile1 = fs_join(target, "subfile1")\n\n # Copy from source directory\n fs.put(local_join(source, "file2"), target)\n assert fs.isfile(target_file2)\n\n # Copy from sub directory\n fs.put(local_join(source, "subdir", "subfile1"), target)\n assert fs.isfile(target_subfile1)\n\n # Remove copied files\n fs.rm([target_file2, target_subfile1])\n assert not fs.exists(target_file2)\n assert not fs.exists(target_subfile1)\n\n # Repeat with trailing slash on target\n fs.put(local_join(source, "file2"), target + "/")\n assert fs.isdir(target)\n assert fs.isfile(target_file2)\n\n fs.put(local_join(source, "subdir", "subfile1"), target + "/")\n assert fs.isfile(target_subfile1)\n\n def test_put_file_to_new_directory(\n self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0\n ):\n # Copy scenario 1b\n source = local_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n\n fs.put(\n local_join(source, "subdir", "subfile1"), fs_join(target, "newdir/")\n ) # Note trailing slash\n assert fs.isdir(target)\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n\n def test_put_file_to_file_in_existing_directory(\n self,\n fs,\n fs_join,\n fs_target,\n local_join,\n supports_empty_directories,\n local_bulk_operations_scenario_0,\n ):\n # Copy scenario 1c\n source = local_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n if not supports_empty_directories:\n # Force target directory to exist by adding a dummy file\n fs.touch(fs_join(target, "dummy"))\n assert fs.isdir(target)\n\n fs.put(local_join(source, "subdir", "subfile1"), fs_join(target, "newfile"))\n assert fs.isfile(fs_join(target, "newfile"))\n\n def test_put_file_to_file_in_new_directory(\n self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0\n ):\n # Copy scenario 1d\n source = local_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n\n fs.put(\n local_join(source, "subdir", "subfile1"),\n fs_join(target, "newdir", "newfile"),\n )\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "newfile"))\n\n def test_put_directory_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_target,\n local_bulk_operations_scenario_0,\n supports_empty_directories,\n ):\n # Copy scenario 1e\n source = local_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n if not supports_empty_directories:\n # Force target directory to exist by adding a dummy file\n dummy = fs_join(target, "dummy")\n fs.touch(dummy)\n assert fs.isdir(target)\n\n for source_slash, target_slash in zip([False, True], [False, True]):\n s = fs_join(source, "subdir")\n if source_slash:\n s += "/"\n t = target + "/" if target_slash else target\n\n # Without recursive does nothing\n fs.put(s, t)\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n # With recursive\n fs.put(s, t, recursive=True)\n if source_slash:\n assert fs.isfile(fs_join(target, "subfile1"))\n assert fs.isfile(fs_join(target, "subfile2"))\n assert fs.isdir(fs_join(target, "nesteddir"))\n assert fs.isfile(fs_join(target, "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(\n [\n fs_join(target, "subfile1"),\n fs_join(target, "subfile2"),\n fs_join(target, "nesteddir"),\n ],\n recursive=True,\n )\n else:\n assert fs.isdir(fs_join(target, "subdir"))\n assert fs.isfile(fs_join(target, "subdir", "subfile1"))\n assert fs.isfile(fs_join(target, "subdir", "subfile2"))\n assert fs.isdir(fs_join(target, "subdir", "nesteddir"))\n assert fs.isfile(fs_join(target, "subdir", "nesteddir", "nestedfile"))\n\n fs.rm(fs_join(target, "subdir"), recursive=True)\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n # Limit recursive by maxdepth\n fs.put(s, t, recursive=True, maxdepth=1)\n if source_slash:\n assert fs.isfile(fs_join(target, "subfile1"))\n assert fs.isfile(fs_join(target, "subfile2"))\n assert not fs.exists(fs_join(target, "nesteddir"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(\n [\n fs_join(target, "subfile1"),\n fs_join(target, "subfile2"),\n ],\n recursive=True,\n )\n else:\n assert fs.isdir(fs_join(target, "subdir"))\n assert fs.isfile(fs_join(target, "subdir", "subfile1"))\n assert fs.isfile(fs_join(target, "subdir", "subfile2"))\n assert not fs.exists(fs_join(target, "subdir", "nesteddir"))\n\n fs.rm(fs_join(target, "subdir"), recursive=True)\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n def test_put_directory_to_new_directory(\n self,\n fs,\n fs_join,\n fs_target,\n local_bulk_operations_scenario_0,\n supports_empty_directories,\n ):\n # Copy scenario 1f\n source = local_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n\n for source_slash, target_slash in zip([False, True], [False, True]):\n s = fs_join(source, "subdir")\n if source_slash:\n s += "/"\n t = fs_join(target, "newdir")\n if target_slash:\n t += "/"\n\n # Without recursive does nothing\n fs.put(s, t)\n if supports_empty_directories:\n assert fs.ls(target) == []\n else:\n with pytest.raises(FileNotFoundError):\n fs.ls(target)\n\n # With recursive\n fs.put(s, t, recursive=True)\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n assert fs.isfile(fs_join(target, "newdir", "subfile2"))\n assert fs.isdir(fs_join(target, "newdir", "nesteddir"))\n assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(fs_join(target, "newdir"), recursive=True)\n assert not fs.exists(fs_join(target, "newdir"))\n\n # Limit recursive by maxdepth\n fs.put(s, t, recursive=True, maxdepth=1)\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n assert fs.isfile(fs_join(target, "newdir", "subfile2"))\n assert not fs.exists(fs_join(target, "newdir", "nesteddir"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(fs_join(target, "newdir"), recursive=True)\n assert not fs.exists(fs_join(target, "newdir"))\n\n def test_put_glob_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_target,\n local_join,\n supports_empty_directories,\n local_bulk_operations_scenario_0,\n ):\n # Copy scenario 1g\n source = local_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n if not supports_empty_directories:\n # Force target directory to exist by adding a dummy file\n dummy = fs_join(target, "dummy")\n fs.touch(dummy)\n assert fs.isdir(target)\n\n for target_slash in [False, True]:\n t = target + "/" if target_slash else target\n\n # Without recursive\n fs.put(local_join(source, "subdir", "*"), t)\n assert fs.isfile(fs_join(target, "subfile1"))\n assert fs.isfile(fs_join(target, "subfile2"))\n assert not fs.isdir(fs_join(target, "nesteddir"))\n assert not fs.exists(fs_join(target, "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(\n [\n fs_join(target, "subfile1"),\n fs_join(target, "subfile2"),\n ],\n recursive=True,\n )\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n # With recursive\n for glob, recursive in zip(["*", "**"], [True, False]):\n fs.put(local_join(source, "subdir", glob), t, recursive=recursive)\n assert fs.isfile(fs_join(target, "subfile1"))\n assert fs.isfile(fs_join(target, "subfile2"))\n assert fs.isdir(fs_join(target, "nesteddir"))\n assert fs.isfile(fs_join(target, "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(\n [\n fs_join(target, "subfile1"),\n fs_join(target, "subfile2"),\n fs_join(target, "nesteddir"),\n ],\n recursive=True,\n )\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n # Limit recursive by maxdepth\n fs.put(\n local_join(source, "subdir", glob),\n t,\n recursive=recursive,\n maxdepth=1,\n )\n assert fs.isfile(fs_join(target, "subfile1"))\n assert fs.isfile(fs_join(target, "subfile2"))\n assert not fs.exists(fs_join(target, "nesteddir"))\n assert not fs.exists(fs_join(target, "subdir"))\n\n fs.rm(\n [\n fs_join(target, "subfile1"),\n fs_join(target, "subfile2"),\n ],\n recursive=True,\n )\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n def test_put_glob_to_new_directory(\n self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0\n ):\n # Copy scenario 1h\n source = local_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n\n for target_slash in [False, True]:\n t = fs_join(target, "newdir")\n if target_slash:\n t += "/"\n\n # Without recursive\n fs.put(local_join(source, "subdir", "*"), t)\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n assert fs.isfile(fs_join(target, "newdir", "subfile2"))\n assert not fs.exists(fs_join(target, "newdir", "nesteddir"))\n assert not fs.exists(fs_join(target, "newdir", "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n assert not fs.exists(fs_join(target, "newdir", "subdir"))\n\n fs.rm(fs_join(target, "newdir"), recursive=True)\n assert not fs.exists(fs_join(target, "newdir"))\n\n # With recursive\n for glob, recursive in zip(["*", "**"], [True, False]):\n fs.put(local_join(source, "subdir", glob), t, recursive=recursive)\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n assert fs.isfile(fs_join(target, "newdir", "subfile2"))\n assert fs.isdir(fs_join(target, "newdir", "nesteddir"))\n assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile"))\n assert not fs.exists(fs_join(target, "subdir"))\n assert not fs.exists(fs_join(target, "newdir", "subdir"))\n\n fs.rm(fs_join(target, "newdir"), recursive=True)\n assert not fs.exists(fs_join(target, "newdir"))\n\n # Limit recursive by maxdepth\n fs.put(\n local_join(source, "subdir", glob),\n t,\n recursive=recursive,\n maxdepth=1,\n )\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n assert fs.isfile(fs_join(target, "newdir", "subfile2"))\n assert not fs.exists(fs_join(target, "newdir", "nesteddir"))\n assert not fs.exists(fs_join(target, "subdir"))\n assert not fs.exists(fs_join(target, "newdir", "subdir"))\n\n fs.rm(fs_join(target, "newdir"), recursive=True)\n assert not fs.exists(fs_join(target, "newdir"))\n\n @pytest.mark.parametrize(\n GLOB_EDGE_CASES_TESTS["argnames"],\n GLOB_EDGE_CASES_TESTS["argvalues"],\n )\n def test_put_glob_edge_cases(\n self,\n path,\n recursive,\n maxdepth,\n expected,\n fs,\n fs_join,\n fs_target,\n local_glob_edge_cases_files,\n local_join,\n fs_sanitize_path,\n ):\n # Copy scenario 1g\n source = local_glob_edge_cases_files\n\n target = fs_target\n\n for new_dir, target_slash in product([True, False], [True, False]):\n fs.mkdir(target)\n\n t = fs_join(target, "newdir") if new_dir else target\n t = t + "/" if target_slash else t\n\n fs.put(local_join(source, path), t, recursive=recursive, maxdepth=maxdepth)\n\n output = fs.find(target)\n if new_dir:\n prefixed_expected = [\n fs_sanitize_path(fs_join(target, "newdir", p)) for p in expected\n ]\n else:\n prefixed_expected = [\n fs_sanitize_path(fs_join(target, p)) for p in expected\n ]\n assert sorted(output) == sorted(prefixed_expected)\n\n try:\n fs.rm(target, recursive=True)\n except FileNotFoundError:\n pass\n\n def test_put_list_of_files_to_existing_directory(\n self,\n fs,\n fs_join,\n fs_target,\n local_join,\n local_bulk_operations_scenario_0,\n supports_empty_directories,\n ):\n # Copy scenario 2a\n source = local_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n if not supports_empty_directories:\n # Force target directory to exist by adding a dummy file\n dummy = fs_join(target, "dummy")\n fs.touch(dummy)\n assert fs.isdir(target)\n\n source_files = [\n local_join(source, "file1"),\n local_join(source, "file2"),\n local_join(source, "subdir", "subfile1"),\n ]\n\n for target_slash in [False, True]:\n t = target + "/" if target_slash else target\n\n fs.put(source_files, t)\n assert fs.isfile(fs_join(target, "file1"))\n assert fs.isfile(fs_join(target, "file2"))\n assert fs.isfile(fs_join(target, "subfile1"))\n\n fs.rm(\n [\n fs_join(target, "file1"),\n fs_join(target, "file2"),\n fs_join(target, "subfile1"),\n ],\n recursive=True,\n )\n assert fs.ls(target, detail=False) == (\n [] if supports_empty_directories else [dummy]\n )\n\n def test_put_list_of_files_to_new_directory(\n self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0\n ):\n # Copy scenario 2b\n source = local_bulk_operations_scenario_0\n\n target = fs_target\n fs.mkdir(target)\n\n source_files = [\n local_join(source, "file1"),\n local_join(source, "file2"),\n local_join(source, "subdir", "subfile1"),\n ]\n\n fs.put(source_files, fs_join(target, "newdir") + "/") # Note trailing slash\n assert fs.isdir(fs_join(target, "newdir"))\n assert fs.isfile(fs_join(target, "newdir", "file1"))\n assert fs.isfile(fs_join(target, "newdir", "file2"))\n assert fs.isfile(fs_join(target, "newdir", "subfile1"))\n\n def test_put_directory_recursive(\n self, fs, fs_join, fs_target, local_fs, local_join, local_path\n ):\n # https://github.com/fsspec/filesystem_spec/issues/1062\n # Recursive cp/get/put of source directory into non-existent target directory.\n src = local_join(local_path, "src")\n src_file = local_join(src, "file")\n local_fs.mkdir(src)\n local_fs.touch(src_file)\n\n target = fs_target\n\n # put without slash\n assert not fs.exists(target)\n for loop in range(2):\n fs.put(src, target, recursive=True)\n assert fs.isdir(target)\n\n if loop == 0:\n assert fs.isfile(fs_join(target, "file"))\n assert not fs.exists(fs_join(target, "src"))\n else:\n assert fs.isfile(fs_join(target, "file"))\n assert fs.isdir(fs_join(target, "src"))\n assert fs.isfile(fs_join(target, "src", "file"))\n\n fs.rm(target, recursive=True)\n\n # put with slash\n assert not fs.exists(target)\n for loop in range(2):\n fs.put(src + "/", target, recursive=True)\n assert fs.isdir(target)\n assert fs.isfile(fs_join(target, "file"))\n assert not fs.exists(fs_join(target, "src"))\n\n def test_put_directory_without_files_with_same_name_prefix(\n self,\n fs,\n fs_join,\n fs_target,\n local_join,\n local_dir_and_file_with_same_name_prefix,\n supports_empty_directories,\n ):\n # Create the test dirs\n source = local_dir_and_file_with_same_name_prefix\n target = fs_target\n\n # Test without glob\n fs.put(local_join(source, "subdir"), fs_target, recursive=True)\n\n assert fs.isfile(fs_join(fs_target, "subfile.txt"))\n assert not fs.isfile(fs_join(fs_target, "subdir.txt"))\n\n fs.rm([fs_join(target, "subfile.txt")])\n if supports_empty_directories:\n assert fs.ls(target) == []\n else:\n assert not fs.exists(target)\n\n # Test with glob\n fs.put(local_join(source, "subdir*"), fs_target, recursive=True)\n\n assert fs.isdir(fs_join(fs_target, "subdir"))\n assert fs.isfile(fs_join(fs_target, "subdir", "subfile.txt"))\n assert fs.isfile(fs_join(fs_target, "subdir.txt"))\n\n def test_copy_with_source_and_destination_as_list(\n self, fs, fs_target, fs_join, local_join, local_10_files_with_hashed_names\n ):\n # Create the test dir\n source = local_10_files_with_hashed_names\n target = fs_target\n\n # Create list of files for source and destination\n source_files = []\n destination_files = []\n for i in range(10):\n hashed_i = md5(str(i).encode("utf-8")).hexdigest()\n source_files.append(local_join(source, f"{hashed_i}.txt"))\n destination_files.append(fs_join(target, f"{hashed_i}.txt"))\n\n # Copy and assert order was kept\n fs.put(lpath=source_files, rpath=destination_files)\n\n for i in range(10):\n file_content = fs.cat(destination_files[i]).decode("utf-8")\n assert file_content == str(i)\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\put.py
|
put.py
|
Python
| 21,201 | 0.95 | 0.098139 | 0.083665 |
awesome-app
| 97 |
2023-07-21T06:37:10.188118
|
Apache-2.0
| true |
32d8f8329b88b8405ef98de5f7418e22
|
\n\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\__pycache__\common.cpython-313.pyc
|
common.cpython-313.pyc
|
Other
| 2,144 | 0.8 | 0 | 0.038961 |
python-kit
| 1 |
2023-11-22T14:16:47.112179
|
GPL-3.0
| true |
fb1802ff005eb3e54bff695a12d7a6d1
|
\n\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\__pycache__\copy.cpython-313.pyc
|
copy.cpython-313.pyc
|
Other
| 22,444 | 0.8 | 0 | 0 |
node-utils
| 974 |
2023-12-15T04:00:43.767539
|
GPL-3.0
| true |
0c0fe48c929bcd52b63fd21467191bc8
|
\n\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\__pycache__\get.cpython-313.pyc
|
get.cpython-313.pyc
|
Other
| 22,178 | 0.8 | 0 | 0.040816 |
python-kit
| 960 |
2024-01-09T01:28:46.460489
|
Apache-2.0
| true |
fe3d90bf9076b1a56c029b373c356da9
|
\n\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\__pycache__\mv.cpython-313.pyc
|
mv.cpython-313.pyc
|
Other
| 2,574 | 0.7 | 0 | 0.029412 |
react-lib
| 561 |
2024-12-29T00:07:59.252335
|
GPL-3.0
| true |
5fc3d49298d2fa05e8d4db0b9d5ba428
|
\n\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\__pycache__\open.cpython-313.pyc
|
open.cpython-313.pyc
|
Other
| 1,248 | 0.8 | 0 | 0 |
vue-tools
| 984 |
2024-01-11T12:57:33.198110
|
GPL-3.0
| true |
08fa20dcb91df277786ab2a65ba6b256
|
\n\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\__pycache__\pipe.cpython-313.pyc
|
pipe.cpython-313.pyc
|
Other
| 1,147 | 0.7 | 0 | 0 |
react-lib
| 20 |
2024-01-03T12:05:14.685296
|
MIT
| true |
ae9c18ff286888070d88be3ecc3405a2
|
\n\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\__pycache__\put.cpython-313.pyc
|
put.cpython-313.pyc
|
Other
| 23,529 | 0.8 | 0 | 0 |
vue-tools
| 32 |
2024-08-21T00:04:17.856906
|
BSD-3-Clause
| true |
ea6faf60cd5389a91925460f6bc66219
|
\n\n
|
.venv\Lib\site-packages\fsspec\tests\abstract\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 13,218 | 0.95 | 0.074713 | 0 |
vue-tools
| 616 |
2024-04-27T21:10:44.805319
|
Apache-2.0
| true |
36a80290e012f84160d4afdb2ec45c86
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\archive.cpython-313.pyc
|
archive.cpython-313.pyc
|
Other
| 4,138 | 0.95 | 0.119048 | 0 |
python-kit
| 455 |
2024-07-06T00:51:17.470426
|
GPL-3.0
| false |
5f5b4f4a029327e661fb33cb1b9dbd7d
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\asyn.cpython-313.pyc
|
asyn.cpython-313.pyc
|
Other
| 46,898 | 0.95 | 0.047423 | 0.006536 |
awesome-app
| 886 |
2025-06-24T08:32:25.600942
|
BSD-3-Clause
| false |
e07bd41ae8ea8089b338e9953fc665ee
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\caching.cpython-313.pyc
|
caching.cpython-313.pyc
|
Other
| 38,469 | 0.95 | 0.030238 | 0.002331 |
vue-tools
| 24 |
2024-08-29T04:09:37.090959
|
MIT
| false |
91b5bc912067e5dd0149572a9ac89178
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\callbacks.cpython-313.pyc
|
callbacks.cpython-313.pyc
|
Other
| 12,240 | 0.95 | 0.077236 | 0.004717 |
awesome-app
| 993 |
2024-07-10T04:49:47.172899
|
Apache-2.0
| false |
a26531a29d4a57ec556cb29349b58de4
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\compression.cpython-313.pyc
|
compression.cpython-313.pyc
|
Other
| 7,323 | 0.95 | 0.05618 | 0 |
awesome-app
| 91 |
2024-09-24T22:43:13.929142
|
BSD-3-Clause
| false |
1a57e7d597e29674b19a2c30af6d1f32
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\config.cpython-313.pyc
|
config.cpython-313.pyc
|
Other
| 5,969 | 0.8 | 0.061728 | 0 |
react-lib
| 402 |
2024-04-21T11:11:48.075708
|
BSD-3-Clause
| false |
919dc96c3cceec7bdf661cbc63279db6
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\conftest.cpython-313.pyc
|
conftest.cpython-313.pyc
|
Other
| 3,134 | 0.95 | 0 | 0 |
awesome-app
| 54 |
2024-02-23T22:03:05.592877
|
MIT
| true |
ed82913a8510d001a2a9d32a8ebedb12
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\core.cpython-313.pyc
|
core.cpython-313.pyc
|
Other
| 28,049 | 0.95 | 0.059242 | 0.007692 |
vue-tools
| 678 |
2025-05-10T13:36:09.213908
|
MIT
| false |
20624be9a744cbce06a28e440fa2efb1
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\dircache.cpython-313.pyc
|
dircache.cpython-313.pyc
|
Other
| 4,507 | 0.95 | 0.017241 | 0 |
vue-tools
| 604 |
2024-08-29T05:25:39.274445
|
Apache-2.0
| false |
4241dbe3eed6e0da73b3587faefad45e
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\exceptions.cpython-313.pyc
|
exceptions.cpython-313.pyc
|
Other
| 893 | 0.85 | 0.090909 | 0 |
vue-tools
| 241 |
2024-04-12T23:02:32.504388
|
GPL-3.0
| false |
15ca5a6e93d7103d08c312918820051a
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\fuse.cpython-313.pyc
|
fuse.cpython-313.pyc
|
Other
| 15,637 | 0.95 | 0.027473 | 0.018634 |
react-lib
| 487 |
2024-05-19T06:55:04.239402
|
MIT
| false |
089d7931e349ae424ce10c5660623a08
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\generic.cpython-313.pyc
|
generic.cpython-313.pyc
|
Other
| 19,612 | 0.95 | 0.035971 | 0.015209 |
node-utils
| 785 |
2025-02-12T01:09:46.926824
|
Apache-2.0
| false |
91c134b1294be1ec268ce1d5f5fd1d27
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\gui.cpython-313.pyc
|
gui.cpython-313.pyc
|
Other
| 21,431 | 0.95 | 0.04918 | 0.004926 |
awesome-app
| 417 |
2024-04-17T17:34:55.614182
|
BSD-3-Clause
| false |
80d1b5a4abbb2a26b555c37e67239bdb
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\json.cpython-313.pyc
|
json.cpython-313.pyc
|
Other
| 6,426 | 0.95 | 0.012048 | 0.0125 |
react-lib
| 851 |
2024-07-28T00:05:56.604153
|
Apache-2.0
| false |
45061292af5005584a3d03f46a739ab2
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\mapping.cpython-313.pyc
|
mapping.cpython-313.pyc
|
Other
| 12,128 | 0.95 | 0.055944 | 0 |
react-lib
| 336 |
2024-02-11T15:46:48.552164
|
Apache-2.0
| false |
867d32d199a05b85ff48be307cd7c5c6
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\parquet.cpython-313.pyc
|
parquet.cpython-313.pyc
|
Other
| 15,703 | 0.95 | 0.05 | 0.004329 |
node-utils
| 416 |
2024-05-19T04:48:39.250421
|
MIT
| false |
5d4c9b240c43dbdedbc5b29f41f506d2
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\registry.cpython-313.pyc
|
registry.cpython-313.pyc
|
Other
| 11,110 | 0.95 | 0.149254 | 0 |
awesome-app
| 254 |
2023-07-17T16:02:06.439363
|
BSD-3-Clause
| false |
5686ae394aceeeb70ae3ae908085471f
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\spec.cpython-313.pyc
|
spec.cpython-313.pyc
|
Other
| 87,390 | 0.75 | 0.079828 | 0.008721 |
vue-tools
| 141 |
2023-09-26T23:19:48.395358
|
GPL-3.0
| false |
13db30c5530e36b0e82972482e9f5c73
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\transaction.cpython-313.pyc
|
transaction.cpython-313.pyc
|
Other
| 4,716 | 0.8 | 0.029851 | 0 |
react-lib
| 517 |
2025-02-05T03:41:55.022710
|
BSD-3-Clause
| false |
e44d03296a38d777fbd0bf483fe418ef
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\utils.cpython-313.pyc
|
utils.cpython-313.pyc
|
Other
| 28,244 | 0.95 | 0.028133 | 0.008596 |
node-utils
| 400 |
2023-10-31T07:42:05.468293
|
MIT
| false |
535d81f1840e3d3dc5626a39c8faf670
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\_version.cpython-313.pyc
|
_version.cpython-313.pyc
|
Other
| 655 | 0.7 | 0 | 0 |
python-kit
| 906 |
2024-07-17T08:28:07.749636
|
GPL-3.0
| false |
8aefaa73cf0ae5350c216c5936833604
|
\n\n
|
.venv\Lib\site-packages\fsspec\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 1,834 | 0.85 | 0 | 0 |
node-utils
| 370 |
2024-12-09T08:22:21.620602
|
BSD-3-Clause
| false |
8b64b50665a8ceaf7b7f6fb7988dbf83
|
pip\n
|
.venv\Lib\site-packages\fsspec-2025.3.0.dist-info\INSTALLER
|
INSTALLER
|
Other
| 4 | 0.5 | 0 | 0 |
react-lib
| 764 |
2024-06-17T01:31:37.593344
|
Apache-2.0
| false |
365c9bfeb7d89244f2ce01c1de44cb85
|
Metadata-Version: 2.4\nName: fsspec\nVersion: 2025.3.0\nSummary: File-system specification\nProject-URL: Changelog, https://filesystem-spec.readthedocs.io/en/latest/changelog.html\nProject-URL: Documentation, https://filesystem-spec.readthedocs.io/en/latest/\nProject-URL: Homepage, https://github.com/fsspec/filesystem_spec\nMaintainer-email: Martin Durant <mdurant@anaconda.com>\nLicense: BSD 3-Clause License\n \n Copyright (c) 2018, Martin Durant\n All rights reserved.\n \n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n \n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n \n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n \n * Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n \n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nLicense-File: LICENSE\nKeywords: file\nClassifier: Development Status :: 4 - Beta\nClassifier: Intended Audience :: Developers\nClassifier: License :: OSI Approved :: BSD License\nClassifier: Operating System :: OS Independent\nClassifier: Programming Language :: Python :: 3.8\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: 3.11\nClassifier: Programming Language :: Python :: 3.12\nClassifier: Programming Language :: Python :: 3.13\nRequires-Python: >=3.8\nProvides-Extra: abfs\nRequires-Dist: adlfs; extra == 'abfs'\nProvides-Extra: adl\nRequires-Dist: adlfs; extra == 'adl'\nProvides-Extra: arrow\nRequires-Dist: pyarrow>=1; extra == 'arrow'\nProvides-Extra: dask\nRequires-Dist: dask; extra == 'dask'\nRequires-Dist: distributed; extra == 'dask'\nProvides-Extra: dev\nRequires-Dist: pre-commit; extra == 'dev'\nRequires-Dist: ruff; extra == 'dev'\nProvides-Extra: doc\nRequires-Dist: numpydoc; extra == 'doc'\nRequires-Dist: sphinx; extra == 'doc'\nRequires-Dist: sphinx-design; extra == 'doc'\nRequires-Dist: sphinx-rtd-theme; extra == 'doc'\nRequires-Dist: yarl; extra == 'doc'\nProvides-Extra: dropbox\nRequires-Dist: dropbox; extra == 'dropbox'\nRequires-Dist: dropboxdrivefs; extra == 'dropbox'\nRequires-Dist: requests; extra == 'dropbox'\nProvides-Extra: entrypoints\nProvides-Extra: full\nRequires-Dist: adlfs; extra == 'full'\nRequires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'full'\nRequires-Dist: dask; extra == 'full'\nRequires-Dist: distributed; extra == 'full'\nRequires-Dist: dropbox; extra == 'full'\nRequires-Dist: dropboxdrivefs; extra == 'full'\nRequires-Dist: fusepy; extra == 'full'\nRequires-Dist: gcsfs; extra == 'full'\nRequires-Dist: libarchive-c; extra == 'full'\nRequires-Dist: ocifs; extra == 'full'\nRequires-Dist: panel; extra == 'full'\nRequires-Dist: paramiko; extra == 'full'\nRequires-Dist: pyarrow>=1; extra == 'full'\nRequires-Dist: pygit2; extra == 'full'\nRequires-Dist: requests; extra == 'full'\nRequires-Dist: s3fs; extra == 'full'\nRequires-Dist: smbprotocol; extra == 'full'\nRequires-Dist: tqdm; extra == 'full'\nProvides-Extra: fuse\nRequires-Dist: fusepy; extra == 'fuse'\nProvides-Extra: gcs\nRequires-Dist: gcsfs; extra == 'gcs'\nProvides-Extra: git\nRequires-Dist: pygit2; extra == 'git'\nProvides-Extra: github\nRequires-Dist: requests; extra == 'github'\nProvides-Extra: gs\nRequires-Dist: gcsfs; extra == 'gs'\nProvides-Extra: gui\nRequires-Dist: panel; extra == 'gui'\nProvides-Extra: hdfs\nRequires-Dist: pyarrow>=1; extra == 'hdfs'\nProvides-Extra: http\nRequires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'http'\nProvides-Extra: libarchive\nRequires-Dist: libarchive-c; extra == 'libarchive'\nProvides-Extra: oci\nRequires-Dist: ocifs; extra == 'oci'\nProvides-Extra: s3\nRequires-Dist: s3fs; extra == 's3'\nProvides-Extra: sftp\nRequires-Dist: paramiko; extra == 'sftp'\nProvides-Extra: smb\nRequires-Dist: smbprotocol; extra == 'smb'\nProvides-Extra: ssh\nRequires-Dist: paramiko; extra == 'ssh'\nProvides-Extra: test\nRequires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'test'\nRequires-Dist: numpy; extra == 'test'\nRequires-Dist: pytest; extra == 'test'\nRequires-Dist: pytest-asyncio!=0.22.0; extra == 'test'\nRequires-Dist: pytest-benchmark; extra == 'test'\nRequires-Dist: pytest-cov; extra == 'test'\nRequires-Dist: pytest-mock; extra == 'test'\nRequires-Dist: pytest-recording; extra == 'test'\nRequires-Dist: pytest-rerunfailures; extra == 'test'\nRequires-Dist: requests; extra == 'test'\nProvides-Extra: test-downstream\nRequires-Dist: aiobotocore<3.0.0,>=2.5.4; extra == 'test-downstream'\nRequires-Dist: dask[dataframe,test]; extra == 'test-downstream'\nRequires-Dist: moto[server]<5,>4; extra == 'test-downstream'\nRequires-Dist: pytest-timeout; extra == 'test-downstream'\nRequires-Dist: xarray; extra == 'test-downstream'\nProvides-Extra: test-full\nRequires-Dist: adlfs; extra == 'test-full'\nRequires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'test-full'\nRequires-Dist: cloudpickle; extra == 'test-full'\nRequires-Dist: dask; extra == 'test-full'\nRequires-Dist: distributed; extra == 'test-full'\nRequires-Dist: dropbox; extra == 'test-full'\nRequires-Dist: dropboxdrivefs; extra == 'test-full'\nRequires-Dist: fastparquet; extra == 'test-full'\nRequires-Dist: fusepy; extra == 'test-full'\nRequires-Dist: gcsfs; extra == 'test-full'\nRequires-Dist: jinja2; extra == 'test-full'\nRequires-Dist: kerchunk; extra == 'test-full'\nRequires-Dist: libarchive-c; extra == 'test-full'\nRequires-Dist: lz4; extra == 'test-full'\nRequires-Dist: notebook; extra == 'test-full'\nRequires-Dist: numpy; extra == 'test-full'\nRequires-Dist: ocifs; extra == 'test-full'\nRequires-Dist: pandas; extra == 'test-full'\nRequires-Dist: panel; extra == 'test-full'\nRequires-Dist: paramiko; extra == 'test-full'\nRequires-Dist: pyarrow; extra == 'test-full'\nRequires-Dist: pyarrow>=1; extra == 'test-full'\nRequires-Dist: pyftpdlib; extra == 'test-full'\nRequires-Dist: pygit2; extra == 'test-full'\nRequires-Dist: pytest; extra == 'test-full'\nRequires-Dist: pytest-asyncio!=0.22.0; extra == 'test-full'\nRequires-Dist: pytest-benchmark; extra == 'test-full'\nRequires-Dist: pytest-cov; extra == 'test-full'\nRequires-Dist: pytest-mock; extra == 'test-full'\nRequires-Dist: pytest-recording; extra == 'test-full'\nRequires-Dist: pytest-rerunfailures; extra == 'test-full'\nRequires-Dist: python-snappy; extra == 'test-full'\nRequires-Dist: requests; extra == 'test-full'\nRequires-Dist: smbprotocol; extra == 'test-full'\nRequires-Dist: tqdm; extra == 'test-full'\nRequires-Dist: urllib3; extra == 'test-full'\nRequires-Dist: zarr; extra == 'test-full'\nRequires-Dist: zstandard; extra == 'test-full'\nProvides-Extra: tqdm\nRequires-Dist: tqdm; extra == 'tqdm'\nDescription-Content-Type: text/markdown\n\n# filesystem_spec\n\n[](https://pypi.python.org/pypi/fsspec/)\n[](https://anaconda.org/conda-forge/fsspec)\n\n[](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest)\n\nA specification for pythonic filesystems.\n\n## Install\n\n```bash\npip install fsspec\n```\n\nwould install the base fsspec. Various optionally supported features might require specification of custom\nextra require, e.g. `pip install fsspec[ssh]` will install dependencies for `ssh` backends support.\nUse `pip install fsspec[full]` for installation of all known extra dependencies.\n\nUp-to-date package also provided through conda-forge distribution:\n\n```bash\nconda install -c conda-forge fsspec\n```\n\n\n## Purpose\n\nTo produce a template or specification for a file-system interface, that specific implementations should follow,\nso that applications making use of them can rely on a common behaviour and not have to worry about the specific\ninternal implementation decisions with any given backend. Many such implementations are included in this package,\nor in sister projects such as `s3fs` and `gcsfs`.\n\nIn addition, if this is well-designed, then additional functionality, such as a key-value store or FUSE\nmounting of the file-system implementation may be available for all implementations "for free".\n\n## Documentation\n\nPlease refer to [RTD](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest)\n\n## Develop\n\nfsspec uses GitHub Actions for CI. Environment files can be found\nin the "ci/" directory. Note that the main environment is called "py38",\nbut it is expected that the version of python installed be adjustable at\nCI runtime. For local use, pick a version suitable for you.\n\n```bash\n# For a new environment (mamba / conda).\nmamba create -n fsspec -c conda-forge python=3.9 -y\nconda activate fsspec\n\n# Standard dev install with docs and tests.\npip install -e ".[dev,doc,test]"\n\n# Full tests except for downstream\npip install s3fs\npip uninstall s3fs\npip install -e .[dev,doc,test_full]\npip install s3fs --no-deps\npytest -v\n\n# Downstream tests.\nsh install_s3fs.sh\n# Windows powershell.\ninstall_s3fs.sh\n```\n\n### Testing\n\nTests can be run in the dev environment, if activated, via ``pytest fsspec``.\n\nThe full fsspec suite requires a system-level docker, docker-compose, and fuse\ninstallation. If only making changes to one backend implementation, it is\nnot generally necessary to run all tests locally.\n\nIt is expected that contributors ensure that any change to fsspec does not\ncause issues or regressions for either other fsspec-related packages such\nas gcsfs and s3fs, nor for downstream users of fsspec. The "downstream" CI\nrun and corresponding environment file run a set of tests from the dask\ntest suite, and very minimal tests against pandas and zarr from the\ntest_downstream.py module in this repo.\n\n### Code Formatting\n\nfsspec uses [Black](https://black.readthedocs.io/en/stable) to ensure\na consistent code format throughout the project.\nRun ``black fsspec`` from the root of the filesystem_spec repository to\nauto-format your code. Additionally, many editors have plugins that will apply\n``black`` as you edit files. ``black`` is included in the ``tox`` environments.\n\nOptionally, you may wish to setup [pre-commit hooks](https://pre-commit.com) to\nautomatically run ``black`` when you make a git commit.\nRun ``pre-commit install --install-hooks`` from the root of the\nfilesystem_spec repository to setup pre-commit hooks. ``black`` will now be run\nbefore you commit, reformatting any changed files. You can format without\ncommitting via ``pre-commit run`` or skip these checks with ``git commit\n--no-verify``.\n
|
.venv\Lib\site-packages\fsspec-2025.3.0.dist-info\METADATA
|
METADATA
|
Other
| 11,747 | 0.95 | 0.046763 | 0.061224 |
react-lib
| 341 |
2025-03-01T23:23:56.625939
|
Apache-2.0
| false |
5b171d889a1ab99135938a47b47db2d1
|
fsspec-2025.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\nfsspec-2025.3.0.dist-info/METADATA,sha256=L2OjjikjNWpjvp8i5_ec515Cd09eUi8qZ_3peQmWG8M,11747\nfsspec-2025.3.0.dist-info/RECORD,,\nfsspec-2025.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87\nfsspec-2025.3.0.dist-info/licenses/LICENSE,sha256=LcNUls5TpzB5FcAIqESq1T53K0mzTN0ARFBnaRQH7JQ,1513\nfsspec/__init__.py,sha256=l9MJaNNV2d4wKpCtMvXDr55n92DkdrAayGy3F9ICjzk,1998\nfsspec/__pycache__/__init__.cpython-313.pyc,,\nfsspec/__pycache__/_version.cpython-313.pyc,,\nfsspec/__pycache__/archive.cpython-313.pyc,,\nfsspec/__pycache__/asyn.cpython-313.pyc,,\nfsspec/__pycache__/caching.cpython-313.pyc,,\nfsspec/__pycache__/callbacks.cpython-313.pyc,,\nfsspec/__pycache__/compression.cpython-313.pyc,,\nfsspec/__pycache__/config.cpython-313.pyc,,\nfsspec/__pycache__/conftest.cpython-313.pyc,,\nfsspec/__pycache__/core.cpython-313.pyc,,\nfsspec/__pycache__/dircache.cpython-313.pyc,,\nfsspec/__pycache__/exceptions.cpython-313.pyc,,\nfsspec/__pycache__/fuse.cpython-313.pyc,,\nfsspec/__pycache__/generic.cpython-313.pyc,,\nfsspec/__pycache__/gui.cpython-313.pyc,,\nfsspec/__pycache__/json.cpython-313.pyc,,\nfsspec/__pycache__/mapping.cpython-313.pyc,,\nfsspec/__pycache__/parquet.cpython-313.pyc,,\nfsspec/__pycache__/registry.cpython-313.pyc,,\nfsspec/__pycache__/spec.cpython-313.pyc,,\nfsspec/__pycache__/transaction.cpython-313.pyc,,\nfsspec/__pycache__/utils.cpython-313.pyc,,\nfsspec/_version.py,sha256=mWdw-j-kCQGfmn6901GQSyjFXi2pK3h-NrKTuLRVqCw,517\nfsspec/archive.py,sha256=vM6t_lgV6lBWbBYwpm3S4ofBQFQxUPr5KkDQrrQcQro,2411\nfsspec/asyn.py,sha256=rsnCsFUmBZmKJqg9m-IDWInoQtE4wV0rGDZEXZwuU3c,36500\nfsspec/caching.py,sha256=oHVy9zpy4Oqk5f1t3-Q31bbw0tsmfddGGKLJs__OdKA,32790\nfsspec/callbacks.py,sha256=BDIwLzK6rr_0V5ch557fSzsivCElpdqhXr5dZ9Te-EE,9210\nfsspec/compression.py,sha256=jCSUMJu-zSNyrusnHT0wKXgOd1tTJR6vM126i5SR5Zc,4865\nfsspec/config.py,sha256=LF4Zmu1vhJW7Je9Q-cwkRc3xP7Rhyy7Xnwj26Z6sv2g,4279\nfsspec/conftest.py,sha256=fVfx-NLrH_OZS1TIpYNoPzM7efEcMoL62reHOdYeFCA,1245\nfsspec/core.py,sha256=1tLctwr7sF1VO3djc_UkjhJ8IAEy0TUMH_bb07Sw17E,23828\nfsspec/dircache.py,sha256=YzogWJrhEastHU7vWz-cJiJ7sdtLXFXhEpInGKd4EcM,2717\nfsspec/exceptions.py,sha256=pauSLDMxzTJMOjvX1WEUK0cMyFkrFxpWJsyFywav7A8,331\nfsspec/fuse.py,sha256=Q-3NOOyLqBfYa4Db5E19z_ZY36zzYHtIs1mOUasItBQ,10177\nfsspec/generic.py,sha256=AFbo-mHBt5QJV1Aplg5CJuUiiJ4bNQhcKRuwkZJdWac,13761\nfsspec/gui.py,sha256=xBnHL2-r0LVwhDAtnHoPpXts7jd4Z32peawCJiI-7lI,13975\nfsspec/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nfsspec/implementations/__pycache__/__init__.cpython-313.pyc,,\nfsspec/implementations/__pycache__/arrow.cpython-313.pyc,,\nfsspec/implementations/__pycache__/asyn_wrapper.cpython-313.pyc,,\nfsspec/implementations/__pycache__/cache_mapper.cpython-313.pyc,,\nfsspec/implementations/__pycache__/cache_metadata.cpython-313.pyc,,\nfsspec/implementations/__pycache__/cached.cpython-313.pyc,,\nfsspec/implementations/__pycache__/dask.cpython-313.pyc,,\nfsspec/implementations/__pycache__/data.cpython-313.pyc,,\nfsspec/implementations/__pycache__/dbfs.cpython-313.pyc,,\nfsspec/implementations/__pycache__/dirfs.cpython-313.pyc,,\nfsspec/implementations/__pycache__/ftp.cpython-313.pyc,,\nfsspec/implementations/__pycache__/git.cpython-313.pyc,,\nfsspec/implementations/__pycache__/github.cpython-313.pyc,,\nfsspec/implementations/__pycache__/http.cpython-313.pyc,,\nfsspec/implementations/__pycache__/http_sync.cpython-313.pyc,,\nfsspec/implementations/__pycache__/jupyter.cpython-313.pyc,,\nfsspec/implementations/__pycache__/libarchive.cpython-313.pyc,,\nfsspec/implementations/__pycache__/local.cpython-313.pyc,,\nfsspec/implementations/__pycache__/memory.cpython-313.pyc,,\nfsspec/implementations/__pycache__/reference.cpython-313.pyc,,\nfsspec/implementations/__pycache__/sftp.cpython-313.pyc,,\nfsspec/implementations/__pycache__/smb.cpython-313.pyc,,\nfsspec/implementations/__pycache__/tar.cpython-313.pyc,,\nfsspec/implementations/__pycache__/webhdfs.cpython-313.pyc,,\nfsspec/implementations/__pycache__/zip.cpython-313.pyc,,\nfsspec/implementations/arrow.py,sha256=721Dikne_lV_0tlgk9jyKmHL6W-5MT0h2LKGvOYQTPI,8623\nfsspec/implementations/asyn_wrapper.py,sha256=PNkYdHiLVWwk-GJok5O6dTnhPwDaSU9QTtBTE9CIRec,3082\nfsspec/implementations/cache_mapper.py,sha256=W4wlxyPxZbSp9ItJ0pYRVBMh6bw9eFypgP6kUYuuiI4,2421\nfsspec/implementations/cache_metadata.py,sha256=pcOJYcBQY5OaC7Yhw0F3wjg08QLYApGmoISCrbs59ks,8511\nfsspec/implementations/cached.py,sha256=KA6c4jqrGeeg8WNPLsh8FkL3KeRAQtGLzKw18vSF1CI,32820\nfsspec/implementations/dask.py,sha256=CXZbJzIVOhKV8ILcxuy3bTvcacCueAbyQxmvAkbPkrk,4466\nfsspec/implementations/data.py,sha256=LDLczxRh8h7x39Zjrd-GgzdQHr78yYxDlrv2C9Uxb5E,1658\nfsspec/implementations/dbfs.py,sha256=XwpotuS_ncz3XK1dkUteww9GnTja7HoY91c0m4GUfwI,15092\nfsspec/implementations/dirfs.py,sha256=f1sGnQ9Vf0xTxrXo4jDeBy4Qfq3RTqAEemqBSeb0hwY,12108\nfsspec/implementations/ftp.py,sha256=sorsczLp_2J3ukONsbZY-11sRZP6H5a3V7XXf6o6ip0,11936\nfsspec/implementations/git.py,sha256=4SElW9U5d3k3_ITlvUAx59Yk7XLNRTqkGa2C3hCUkWM,3754\nfsspec/implementations/github.py,sha256=eAn1kJ7VeWR6gVoVRLBYclF_rQDXSJU-xzMXpvPQWqs,8002\nfsspec/implementations/http.py,sha256=_gLt0yGbVOYWvE9pK81WCC-3TgbOMOKJYllBU72ALo8,30138\nfsspec/implementations/http_sync.py,sha256=vHf2_O9RRlaW6k-R4hsS-91BkqyPIlc0zwo_ENNGU4U,30147\nfsspec/implementations/jupyter.py,sha256=B2uj7OEm7yIk-vRSsO37_ND0t0EBvn4B-Su43ibN4Pg,3811\nfsspec/implementations/libarchive.py,sha256=5_I2DiLXwQ1JC8x-K7jXu-tBwhO9dj7tFLnb0bTnVMQ,7102\nfsspec/implementations/local.py,sha256=g2iK8uWPGkSiI6bwmnIRXhJMQvTegCmXZ8Kb8ojhvAo,15543\nfsspec/implementations/memory.py,sha256=cLNrK9wk97sl4Tre9uVDXWj6mEHvvVVIgaVgNA5KVIg,10527\nfsspec/implementations/reference.py,sha256=t23prs_5ugXJnYhLxLlPLPyagrx4_ofZWR_oyX9wd3Q,48703\nfsspec/implementations/sftp.py,sha256=fMY9XZcmpjszQ2tCqO_TPaJesaeD_Dv7ptYzgUPGoO0,5631\nfsspec/implementations/smb.py,sha256=5fhu8h06nOLBPh2c48aT7WBRqh9cEcbIwtyu06wTjec,15236\nfsspec/implementations/tar.py,sha256=dam78Tp_CozybNqCY2JYgGBS3Uc9FuJUAT9oB0lolOs,4111\nfsspec/implementations/webhdfs.py,sha256=G9wGywj7BkZk4Mu9zXu6HaDlEqX4F8Gw1i4k46CP_-o,16769\nfsspec/implementations/zip.py,sha256=9LBMHPft2OutJl2Ft-r9u_z3GptLkc2n91ur2A3bCbg,6072\nfsspec/json.py,sha256=65sQ0Y7mTj33u_Y4IId5up4abQ3bAel4E4QzbKMiQSg,3826\nfsspec/mapping.py,sha256=m2ndB_gtRBXYmNJg0Ie1-BVR75TFleHmIQBzC-yWhjU,8343\nfsspec/parquet.py,sha256=6ibAmG527L5JNFS0VO8BDNlxHdA3bVYqdByeiFgpUVM,19448\nfsspec/registry.py,sha256=Bbur6KJilN62hx_lSRgCx9HlKrv91pLpdEPG7Vzme1M,11566\nfsspec/spec.py,sha256=l7ZEbgLsnrFuS-yrGl9re6ia1Yts1_10RqGV_mT-5P8,76032\nfsspec/tests/abstract/__init__.py,sha256=4xUJrv7gDgc85xAOz1p-V_K1hrsdMWTSa0rviALlJk8,10181\nfsspec/tests/abstract/__pycache__/__init__.cpython-313.pyc,,\nfsspec/tests/abstract/__pycache__/common.cpython-313.pyc,,\nfsspec/tests/abstract/__pycache__/copy.cpython-313.pyc,,\nfsspec/tests/abstract/__pycache__/get.cpython-313.pyc,,\nfsspec/tests/abstract/__pycache__/mv.cpython-313.pyc,,\nfsspec/tests/abstract/__pycache__/open.cpython-313.pyc,,\nfsspec/tests/abstract/__pycache__/pipe.cpython-313.pyc,,\nfsspec/tests/abstract/__pycache__/put.cpython-313.pyc,,\nfsspec/tests/abstract/common.py,sha256=1GQwNo5AONzAnzZj0fWgn8NJPLXALehbsuGxS3FzWVU,4973\nfsspec/tests/abstract/copy.py,sha256=gU5-d97U3RSde35Vp4RxPY4rWwL744HiSrJ8IBOp9-8,19967\nfsspec/tests/abstract/get.py,sha256=vNR4HztvTR7Cj56AMo7_tx7TeYz1Jgr_2Wb8Lv-UiBY,20755\nfsspec/tests/abstract/mv.py,sha256=k8eUEBIrRrGMsBY5OOaDXdGnQUKGwDIfQyduB6YD3Ns,1982\nfsspec/tests/abstract/open.py,sha256=Fi2PBPYLbRqysF8cFm0rwnB41kMdQVYjq8cGyDXp3BU,329\nfsspec/tests/abstract/pipe.py,sha256=LFzIrLCB5GLXf9rzFKJmE8AdG7LQ_h4bJo70r8FLPqM,402\nfsspec/tests/abstract/put.py,sha256=7aih17OKB_IZZh1Mkq1eBDIjobhtMQmI8x-Pw-S_aZk,21201\nfsspec/transaction.py,sha256=xliRG6U2Zf3khG4xcw9WiB-yAoqJSHEGK_VjHOdtgo0,2398\nfsspec/utils.py,sha256=A11t25RnpiQ30RO6xeR0Qqlu3fGj8bnc40jg08tlYSI,22980\n
|
.venv\Lib\site-packages\fsspec-2025.3.0.dist-info\RECORD
|
RECORD
|
Other
| 8,029 | 0.7 | 0 | 0 |
react-lib
| 698 |
2024-12-18T09:53:57.925683
|
Apache-2.0
| false |
7b484dd63968f3ce0f1a05851fd700bc
|
Wheel-Version: 1.0\nGenerator: hatchling 1.27.0\nRoot-Is-Purelib: true\nTag: py3-none-any\n
|
.venv\Lib\site-packages\fsspec-2025.3.0.dist-info\WHEEL
|
WHEEL
|
Other
| 87 | 0.5 | 0 | 0 |
vue-tools
| 914 |
2023-12-16T06:30:12.647630
|
Apache-2.0
| false |
e2fcb0ad9ea59332c808928b4b439e7a
|
BSD 3-Clause License\n\nCopyright (c) 2018, Martin Durant\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n
|
.venv\Lib\site-packages\fsspec-2025.3.0.dist-info\licenses\LICENSE
|
LICENSE
|
Other
| 1,513 | 0.7 | 0 | 0.130435 |
python-kit
| 343 |
2025-01-15T12:53:48.899079
|
Apache-2.0
| false |
b38a11bf4dcdfc66307f8515ce1fbaa6
|
Marker\n
|
.venv\Lib\site-packages\h11\py.typed
|
py.typed
|
Other
| 7 | 0.5 | 0 | 0 |
python-kit
| 683 |
2024-07-20T06:41:25.009151
|
GPL-3.0
| false |
3522f1a61602da93a3a5e4600cc1f05f
|
# This contains the main Connection class. Everything in h11 revolves around\n# this.\nfrom typing import (\n Any,\n Callable,\n cast,\n Dict,\n List,\n Optional,\n overload,\n Tuple,\n Type,\n Union,\n)\n\nfrom ._events import (\n ConnectionClosed,\n Data,\n EndOfMessage,\n Event,\n InformationalResponse,\n Request,\n Response,\n)\nfrom ._headers import get_comma_header, has_expect_100_continue, set_comma_header\nfrom ._readers import READERS, ReadersType\nfrom ._receivebuffer import ReceiveBuffer\nfrom ._state import (\n _SWITCH_CONNECT,\n _SWITCH_UPGRADE,\n CLIENT,\n ConnectionState,\n DONE,\n ERROR,\n MIGHT_SWITCH_PROTOCOL,\n SEND_BODY,\n SERVER,\n SWITCHED_PROTOCOL,\n)\nfrom ._util import ( # Import the internal things we need\n LocalProtocolError,\n RemoteProtocolError,\n Sentinel,\n)\nfrom ._writers import WRITERS, WritersType\n\n# Everything in __all__ gets re-exported as part of the h11 public API.\n__all__ = ["Connection", "NEED_DATA", "PAUSED"]\n\n\nclass NEED_DATA(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass PAUSED(Sentinel, metaclass=Sentinel):\n pass\n\n\n# If we ever have this much buffered without it making a complete parseable\n# event, we error out. The only time we really buffer is when reading the\n# request/response line + headers together, so this is effectively the limit on\n# the size of that.\n#\n# Some precedents for defaults:\n# - node.js: 80 * 1024\n# - tomcat: 8 * 1024\n# - IIS: 16 * 1024\n# - Apache: <8 KiB per line>\nDEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024\n\n\n# RFC 7230's rules for connection lifecycles:\n# - If either side says they want to close the connection, then the connection\n# must close.\n# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close\n# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive\n# (and even this is a mess -- e.g. if you're implementing a proxy then\n# sending Connection: keep-alive is forbidden).\n#\n# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So\n# our rule is:\n# - If someone says Connection: close, we will close\n# - If someone uses HTTP/1.0, we will close.\ndef _keep_alive(event: Union[Request, Response]) -> bool:\n connection = get_comma_header(event.headers, b"connection")\n if b"close" in connection:\n return False\n if getattr(event, "http_version", b"1.1") < b"1.1":\n return False\n return True\n\n\ndef _body_framing(\n request_method: bytes, event: Union[Request, Response]\n) -> Tuple[str, Union[Tuple[()], Tuple[int]]]:\n # Called when we enter SEND_BODY to figure out framing information for\n # this body.\n #\n # These are the only two events that can trigger a SEND_BODY state:\n assert type(event) in (Request, Response)\n # Returns one of:\n #\n # ("content-length", count)\n # ("chunked", ())\n # ("http/1.0", ())\n #\n # which are (lookup key, *args) for constructing body reader/writer\n # objects.\n #\n # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3\n #\n # Step 1: some responses always have an empty body, regardless of what the\n # headers say.\n if type(event) is Response:\n if (\n event.status_code in (204, 304)\n or request_method == b"HEAD"\n or (request_method == b"CONNECT" and 200 <= event.status_code < 300)\n ):\n return ("content-length", (0,))\n # Section 3.3.3 also lists another case -- responses with status_code\n # < 200. For us these are InformationalResponses, not Responses, so\n # they can't get into this function in the first place.\n assert event.status_code >= 200\n\n # Step 2: check for Transfer-Encoding (T-E beats C-L):\n transfer_encodings = get_comma_header(event.headers, b"transfer-encoding")\n if transfer_encodings:\n assert transfer_encodings == [b"chunked"]\n return ("chunked", ())\n\n # Step 3: check for Content-Length\n content_lengths = get_comma_header(event.headers, b"content-length")\n if content_lengths:\n return ("content-length", (int(content_lengths[0]),))\n\n # Step 4: no applicable headers; fallback/default depends on type\n if type(event) is Request:\n return ("content-length", (0,))\n else:\n return ("http/1.0", ())\n\n\n################################################################\n#\n# The main Connection class\n#\n################################################################\n\n\nclass Connection:\n """An object encapsulating the state of an HTTP connection.\n\n Args:\n our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If\n you're implementing a server, pass :data:`h11.SERVER`.\n\n max_incomplete_event_size (int):\n The maximum number of bytes we're willing to buffer of an\n incomplete event. In practice this mostly sets a limit on the\n maximum size of the request/response line + headers. If this is\n exceeded, then :meth:`next_event` will raise\n :exc:`RemoteProtocolError`.\n\n """\n\n def __init__(\n self,\n our_role: Type[Sentinel],\n max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,\n ) -> None:\n self._max_incomplete_event_size = max_incomplete_event_size\n # State and role tracking\n if our_role not in (CLIENT, SERVER):\n raise ValueError(f"expected CLIENT or SERVER, not {our_role!r}")\n self.our_role = our_role\n self.their_role: Type[Sentinel]\n if our_role is CLIENT:\n self.their_role = SERVER\n else:\n self.their_role = CLIENT\n self._cstate = ConnectionState()\n\n # Callables for converting data->events or vice-versa given the\n # current state\n self._writer = self._get_io_object(self.our_role, None, WRITERS)\n self._reader = self._get_io_object(self.their_role, None, READERS)\n\n # Holds any unprocessed received data\n self._receive_buffer = ReceiveBuffer()\n # If this is true, then it indicates that the incoming connection was\n # closed *after* the end of whatever's in self._receive_buffer:\n self._receive_buffer_closed = False\n\n # Extra bits of state that don't fit into the state machine.\n #\n # These two are only used to interpret framing headers for figuring\n # out how to read/write response bodies. their_http_version is also\n # made available as a convenient public API.\n self.their_http_version: Optional[bytes] = None\n self._request_method: Optional[bytes] = None\n # This is pure flow-control and doesn't at all affect the set of legal\n # transitions, so no need to bother ConnectionState with it:\n self.client_is_waiting_for_100_continue = False\n\n @property\n def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]:\n """A dictionary like::\n\n {CLIENT: <client state>, SERVER: <server state>}\n\n See :ref:`state-machine` for details.\n\n """\n return dict(self._cstate.states)\n\n @property\n def our_state(self) -> Type[Sentinel]:\n """The current state of whichever role we are playing. See\n :ref:`state-machine` for details.\n """\n return self._cstate.states[self.our_role]\n\n @property\n def their_state(self) -> Type[Sentinel]:\n """The current state of whichever role we are NOT playing. See\n :ref:`state-machine` for details.\n """\n return self._cstate.states[self.their_role]\n\n @property\n def they_are_waiting_for_100_continue(self) -> bool:\n return self.their_role is CLIENT and self.client_is_waiting_for_100_continue\n\n def start_next_cycle(self) -> None:\n """Attempt to reset our connection state for a new request/response\n cycle.\n\n If both client and server are in :data:`DONE` state, then resets them\n both to :data:`IDLE` state in preparation for a new request/response\n cycle on this same connection. Otherwise, raises a\n :exc:`LocalProtocolError`.\n\n See :ref:`keepalive-and-pipelining`.\n\n """\n old_states = dict(self._cstate.states)\n self._cstate.start_next_cycle()\n self._request_method = None\n # self.their_http_version gets left alone, since it presumably lasts\n # beyond a single request/response cycle\n assert not self.client_is_waiting_for_100_continue\n self._respond_to_state_changes(old_states)\n\n def _process_error(self, role: Type[Sentinel]) -> None:\n old_states = dict(self._cstate.states)\n self._cstate.process_error(role)\n self._respond_to_state_changes(old_states)\n\n def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]:\n if type(event) is InformationalResponse and event.status_code == 101:\n return _SWITCH_UPGRADE\n if type(event) is Response:\n if (\n _SWITCH_CONNECT in self._cstate.pending_switch_proposals\n and 200 <= event.status_code < 300\n ):\n return _SWITCH_CONNECT\n return None\n\n # All events go through here\n def _process_event(self, role: Type[Sentinel], event: Event) -> None:\n # First, pass the event through the state machine to make sure it\n # succeeds.\n old_states = dict(self._cstate.states)\n if role is CLIENT and type(event) is Request:\n if event.method == b"CONNECT":\n self._cstate.process_client_switch_proposal(_SWITCH_CONNECT)\n if get_comma_header(event.headers, b"upgrade"):\n self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE)\n server_switch_event = None\n if role is SERVER:\n server_switch_event = self._server_switch_event(event)\n self._cstate.process_event(role, type(event), server_switch_event)\n\n # Then perform the updates triggered by it.\n\n if type(event) is Request:\n self._request_method = event.method\n\n if role is self.their_role and type(event) in (\n Request,\n Response,\n InformationalResponse,\n ):\n event = cast(Union[Request, Response, InformationalResponse], event)\n self.their_http_version = event.http_version\n\n # Keep alive handling\n #\n # RFC 7230 doesn't really say what one should do if Connection: close\n # shows up on a 1xx InformationalResponse. I think the idea is that\n # this is not supposed to happen. In any case, if it does happen, we\n # ignore it.\n if type(event) in (Request, Response) and not _keep_alive(\n cast(Union[Request, Response], event)\n ):\n self._cstate.process_keep_alive_disabled()\n\n # 100-continue\n if type(event) is Request and has_expect_100_continue(event):\n self.client_is_waiting_for_100_continue = True\n if type(event) in (InformationalResponse, Response):\n self.client_is_waiting_for_100_continue = False\n if role is CLIENT and type(event) in (Data, EndOfMessage):\n self.client_is_waiting_for_100_continue = False\n\n self._respond_to_state_changes(old_states, event)\n\n def _get_io_object(\n self,\n role: Type[Sentinel],\n event: Optional[Event],\n io_dict: Union[ReadersType, WritersType],\n ) -> Optional[Callable[..., Any]]:\n # event may be None; it's only used when entering SEND_BODY\n state = self._cstate.states[role]\n if state is SEND_BODY:\n # Special case: the io_dict has a dict of reader/writer factories\n # that depend on the request/response framing.\n framing_type, args = _body_framing(\n cast(bytes, self._request_method), cast(Union[Request, Response], event)\n )\n return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index]\n else:\n # General case: the io_dict just has the appropriate reader/writer\n # for this state\n return io_dict.get((role, state)) # type: ignore[return-value]\n\n # This must be called after any action that might have caused\n # self._cstate.states to change.\n def _respond_to_state_changes(\n self,\n old_states: Dict[Type[Sentinel], Type[Sentinel]],\n event: Optional[Event] = None,\n ) -> None:\n # Update reader/writer\n if self.our_state != old_states[self.our_role]:\n self._writer = self._get_io_object(self.our_role, event, WRITERS)\n if self.their_state != old_states[self.their_role]:\n self._reader = self._get_io_object(self.their_role, event, READERS)\n\n @property\n def trailing_data(self) -> Tuple[bytes, bool]:\n """Data that has been received, but not yet processed, represented as\n a tuple with two elements, where the first is a byte-string containing\n the unprocessed data itself, and the second is a bool that is True if\n the receive connection was closed.\n\n See :ref:`switching-protocols` for discussion of why you'd want this.\n """\n return (bytes(self._receive_buffer), self._receive_buffer_closed)\n\n def receive_data(self, data: bytes) -> None:\n """Add data to our internal receive buffer.\n\n This does not actually do any processing on the data, just stores\n it. To trigger processing, you have to call :meth:`next_event`.\n\n Args:\n data (:term:`bytes-like object`):\n The new data that was just received.\n\n Special case: If *data* is an empty byte-string like ``b""``,\n then this indicates that the remote side has closed the\n connection (end of file). Normally this is convenient, because\n standard Python APIs like :meth:`file.read` or\n :meth:`socket.recv` use ``b""`` to indicate end-of-file, while\n other failures to read are indicated using other mechanisms\n like raising :exc:`TimeoutError`. When using such an API you\n can just blindly pass through whatever you get from ``read``\n to :meth:`receive_data`, and everything will work.\n\n But, if you have an API where reading an empty string is a\n valid non-EOF condition, then you need to be aware of this and\n make sure to check for such strings and avoid passing them to\n :meth:`receive_data`.\n\n Returns:\n Nothing, but after calling this you should call :meth:`next_event`\n to parse the newly received data.\n\n Raises:\n RuntimeError:\n Raised if you pass an empty *data*, indicating EOF, and then\n pass a non-empty *data*, indicating more data that somehow\n arrived after the EOF.\n\n (Calling ``receive_data(b"")`` multiple times is fine,\n and equivalent to calling it once.)\n\n """\n if data:\n if self._receive_buffer_closed:\n raise RuntimeError("received close, then received more data?")\n self._receive_buffer += data\n else:\n self._receive_buffer_closed = True\n\n def _extract_next_receive_event(\n self,\n ) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:\n state = self.their_state\n # We don't pause immediately when they enter DONE, because even in\n # DONE state we can still process a ConnectionClosed() event. But\n # if we have data in our buffer, then we definitely aren't getting\n # a ConnectionClosed() immediately and we need to pause.\n if state is DONE and self._receive_buffer:\n return PAUSED\n if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL:\n return PAUSED\n assert self._reader is not None\n event = self._reader(self._receive_buffer)\n if event is None:\n if not self._receive_buffer and self._receive_buffer_closed:\n # In some unusual cases (basically just HTTP/1.0 bodies), EOF\n # triggers an actual protocol event; in that case, we want to\n # return that event, and then the state will change and we'll\n # get called again to generate the actual ConnectionClosed().\n if hasattr(self._reader, "read_eof"):\n event = self._reader.read_eof()\n else:\n event = ConnectionClosed()\n if event is None:\n event = NEED_DATA\n return event # type: ignore[no-any-return]\n\n def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:\n """Parse the next event out of our receive buffer, update our internal\n state, and return it.\n\n This is a mutating operation -- think of it like calling :func:`next`\n on an iterator.\n\n Returns:\n : One of three things:\n\n 1) An event object -- see :ref:`events`.\n\n 2) The special constant :data:`NEED_DATA`, which indicates that\n you need to read more data from your socket and pass it to\n :meth:`receive_data` before this method will be able to return\n any more events.\n\n 3) The special constant :data:`PAUSED`, which indicates that we\n are not in a state where we can process incoming data (usually\n because the peer has finished their part of the current\n request/response cycle, and you have not yet called\n :meth:`start_next_cycle`). See :ref:`flow-control` for details.\n\n Raises:\n RemoteProtocolError:\n The peer has misbehaved. You should close the connection\n (possibly after sending some kind of 4xx response).\n\n Once this method returns :class:`ConnectionClosed` once, then all\n subsequent calls will also return :class:`ConnectionClosed`.\n\n If this method raises any exception besides :exc:`RemoteProtocolError`\n then that's a bug -- if it happens please file a bug report!\n\n If this method raises any exception then it also sets\n :attr:`Connection.their_state` to :data:`ERROR` -- see\n :ref:`error-handling` for discussion.\n\n """\n\n if self.their_state is ERROR:\n raise RemoteProtocolError("Can't receive data when peer state is ERROR")\n try:\n event = self._extract_next_receive_event()\n if event not in [NEED_DATA, PAUSED]:\n self._process_event(self.their_role, cast(Event, event))\n if event is NEED_DATA:\n if len(self._receive_buffer) > self._max_incomplete_event_size:\n # 431 is "Request header fields too large" which is pretty\n # much the only situation where we can get here\n raise RemoteProtocolError(\n "Receive buffer too long", error_status_hint=431\n )\n if self._receive_buffer_closed:\n # We're still trying to complete some event, but that's\n # never going to happen because no more data is coming\n raise RemoteProtocolError("peer unexpectedly closed connection")\n return event\n except BaseException as exc:\n self._process_error(self.their_role)\n if isinstance(exc, LocalProtocolError):\n exc._reraise_as_remote_protocol_error()\n else:\n raise\n\n @overload\n def send(self, event: ConnectionClosed) -> None:\n ...\n\n @overload\n def send(\n self, event: Union[Request, InformationalResponse, Response, Data, EndOfMessage]\n ) -> bytes:\n ...\n\n @overload\n def send(self, event: Event) -> Optional[bytes]:\n ...\n\n def send(self, event: Event) -> Optional[bytes]:\n """Convert a high-level event into bytes that can be sent to the peer,\n while updating our internal state machine.\n\n Args:\n event: The :ref:`event <events>` to send.\n\n Returns:\n If ``type(event) is ConnectionClosed``, then returns\n ``None``. Otherwise, returns a :term:`bytes-like object`.\n\n Raises:\n LocalProtocolError:\n Sending this event at this time would violate our\n understanding of the HTTP/1.1 protocol.\n\n If this method raises any exception then it also sets\n :attr:`Connection.our_state` to :data:`ERROR` -- see\n :ref:`error-handling` for discussion.\n\n """\n data_list = self.send_with_data_passthrough(event)\n if data_list is None:\n return None\n else:\n return b"".join(data_list)\n\n def send_with_data_passthrough(self, event: Event) -> Optional[List[bytes]]:\n """Identical to :meth:`send`, except that in situations where\n :meth:`send` returns a single :term:`bytes-like object`, this instead\n returns a list of them -- and when sending a :class:`Data` event, this\n list is guaranteed to contain the exact object you passed in as\n :attr:`Data.data`. See :ref:`sendfile` for discussion.\n\n """\n if self.our_state is ERROR:\n raise LocalProtocolError("Can't send data when our state is ERROR")\n try:\n if type(event) is Response:\n event = self._clean_up_response_headers_for_sending(event)\n # We want to call _process_event before calling the writer,\n # because if someone tries to do something invalid then this will\n # give a sensible error message, while our writers all just assume\n # they will only receive valid events. But, _process_event might\n # change self._writer. So we have to do a little dance:\n writer = self._writer\n self._process_event(self.our_role, event)\n if type(event) is ConnectionClosed:\n return None\n else:\n # In any situation where writer is None, process_event should\n # have raised ProtocolError\n assert writer is not None\n data_list: List[bytes] = []\n writer(event, data_list.append)\n return data_list\n except:\n self._process_error(self.our_role)\n raise\n\n def send_failed(self) -> None:\n """Notify the state machine that we failed to send the data it gave\n us.\n\n This causes :attr:`Connection.our_state` to immediately become\n :data:`ERROR` -- see :ref:`error-handling` for discussion.\n\n """\n self._process_error(self.our_role)\n\n # When sending a Response, we take responsibility for a few things:\n #\n # - Sometimes you MUST set Connection: close. We take care of those\n # times. (You can also set it yourself if you want, and if you do then\n # we'll respect that and close the connection at the right time. But you\n # don't have to worry about that unless you want to.)\n #\n # - The user has to set Content-Length if they want it. Otherwise, for\n # responses that have bodies (e.g. not HEAD), then we will automatically\n # select the right mechanism for streaming a body of unknown length,\n # which depends on depending on the peer's HTTP version.\n #\n # This function's *only* responsibility is making sure headers are set up\n # right -- everything downstream just looks at the headers. There are no\n # side channels.\n def _clean_up_response_headers_for_sending(self, response: Response) -> Response:\n assert type(response) is Response\n\n headers = response.headers\n need_close = False\n\n # HEAD requests need some special handling: they always act like they\n # have Content-Length: 0, and that's how _body_framing treats\n # them. But their headers are supposed to match what we would send if\n # the request was a GET. (Technically there is one deviation allowed:\n # we're allowed to leave out the framing headers -- see\n # https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as\n # easy to get them right.)\n method_for_choosing_headers = cast(bytes, self._request_method)\n if method_for_choosing_headers == b"HEAD":\n method_for_choosing_headers = b"GET"\n framing_type, _ = _body_framing(method_for_choosing_headers, response)\n if framing_type in ("chunked", "http/1.0"):\n # This response has a body of unknown length.\n # If our peer is HTTP/1.1, we use Transfer-Encoding: chunked\n # If our peer is HTTP/1.0, we use no framing headers, and close the\n # connection afterwards.\n #\n # Make sure to clear Content-Length (in principle user could have\n # set both and then we ignored Content-Length b/c\n # Transfer-Encoding overwrote it -- this would be naughty of them,\n # but the HTTP spec says that if our peer does this then we have\n # to fix it instead of erroring out, so we'll accord the user the\n # same respect).\n headers = set_comma_header(headers, b"content-length", [])\n if self.their_http_version is None or self.their_http_version < b"1.1":\n # Either we never got a valid request and are sending back an\n # error (their_http_version is None), so we assume the worst;\n # or else we did get a valid HTTP/1.0 request, so we know that\n # they don't understand chunked encoding.\n headers = set_comma_header(headers, b"transfer-encoding", [])\n # This is actually redundant ATM, since currently we\n # unconditionally disable keep-alive when talking to HTTP/1.0\n # peers. But let's be defensive just in case we add\n # Connection: keep-alive support later:\n if self._request_method != b"HEAD":\n need_close = True\n else:\n headers = set_comma_header(headers, b"transfer-encoding", [b"chunked"])\n\n if not self._cstate.keep_alive or need_close:\n # Make sure Connection: close is set\n connection = set(get_comma_header(headers, b"connection"))\n connection.discard(b"keep-alive")\n connection.add(b"close")\n headers = set_comma_header(headers, b"connection", sorted(connection))\n\n return Response(\n headers=headers,\n status_code=response.status_code,\n http_version=response.http_version,\n reason=response.reason,\n )\n
|
.venv\Lib\site-packages\h11\_connection.py
|
_connection.py
|
Python
| 26,863 | 0.95 | 0.189681 | 0.260563 |
awesome-app
| 551 |
2024-01-26T15:01:32.074343
|
GPL-3.0
| false |
811188503f2e5bf065caa2b32871901f
|
import re\nfrom typing import AnyStr, cast, List, overload, Sequence, Tuple, TYPE_CHECKING, Union\n\nfrom ._abnf import field_name, field_value\nfrom ._util import bytesify, LocalProtocolError, validate\n\nif TYPE_CHECKING:\n from ._events import Request\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal # type: ignore\n\nCONTENT_LENGTH_MAX_DIGITS = 20 # allow up to 1 billion TB - 1\n\n\n# Facts\n# -----\n#\n# Headers are:\n# keys: case-insensitive ascii\n# values: mixture of ascii and raw bytes\n#\n# "Historically, HTTP has allowed field content with text in the ISO-8859-1\n# charset [ISO-8859-1], supporting other charsets only through use of\n# [RFC2047] encoding. In practice, most HTTP header field values use only a\n# subset of the US-ASCII charset [USASCII]. Newly defined header fields SHOULD\n# limit their field values to US-ASCII octets. A recipient SHOULD treat other\n# octets in field content (obs-text) as opaque data."\n# And it deprecates all non-ascii values\n#\n# Leading/trailing whitespace in header names is forbidden\n#\n# Values get leading/trailing whitespace stripped\n#\n# Content-Disposition actually needs to contain unicode semantically; to\n# accomplish this it has a terrifically weird way of encoding the filename\n# itself as ascii (and even this still has lots of cross-browser\n# incompatibilities)\n#\n# Order is important:\n# "a proxy MUST NOT change the order of these field values when forwarding a\n# message"\n# (and there are several headers where the order indicates a preference)\n#\n# Multiple occurences of the same header:\n# "A sender MUST NOT generate multiple header fields with the same field name\n# in a message unless either the entire field value for that header field is\n# defined as a comma-separated list [or the header is Set-Cookie which gets a\n# special exception]" - RFC 7230. (cookies are in RFC 6265)\n#\n# So every header aside from Set-Cookie can be merged by b", ".join if it\n# occurs repeatedly. But, of course, they can't necessarily be split by\n# .split(b","), because quoting.\n#\n# Given all this mess (case insensitive, duplicates allowed, order is\n# important, ...), there doesn't appear to be any standard way to handle\n# headers in Python -- they're almost like dicts, but... actually just\n# aren't. For now we punt and just use a super simple representation: headers\n# are a list of pairs\n#\n# [(name1, value1), (name2, value2), ...]\n#\n# where all entries are bytestrings, names are lowercase and have no\n# leading/trailing whitespace, and values are bytestrings with no\n# leading/trailing whitespace. Searching and updating are done via naive O(n)\n# methods.\n#\n# Maybe a dict-of-lists would be better?\n\n_content_length_re = re.compile(rb"[0-9]+")\n_field_name_re = re.compile(field_name.encode("ascii"))\n_field_value_re = re.compile(field_value.encode("ascii"))\n\n\nclass Headers(Sequence[Tuple[bytes, bytes]]):\n """\n A list-like interface that allows iterating over headers as byte-pairs\n of (lowercased-name, value).\n\n Internally we actually store the representation as three-tuples,\n including both the raw original casing, in order to preserve casing\n over-the-wire, and the lowercased name, for case-insensitive comparisions.\n\n r = Request(\n method="GET",\n target="/",\n headers=[("Host", "example.org"), ("Connection", "keep-alive")],\n http_version="1.1",\n )\n assert r.headers == [\n (b"host", b"example.org"),\n (b"connection", b"keep-alive")\n ]\n assert r.headers.raw_items() == [\n (b"Host", b"example.org"),\n (b"Connection", b"keep-alive")\n ]\n """\n\n __slots__ = "_full_items"\n\n def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:\n self._full_items = full_items\n\n def __bool__(self) -> bool:\n return bool(self._full_items)\n\n def __eq__(self, other: object) -> bool:\n return list(self) == list(other) # type: ignore\n\n def __len__(self) -> int:\n return len(self._full_items)\n\n def __repr__(self) -> str:\n return "<Headers(%s)>" % repr(list(self))\n\n def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]\n _, name, value = self._full_items[idx]\n return (name, value)\n\n def raw_items(self) -> List[Tuple[bytes, bytes]]:\n return [(raw_name, value) for raw_name, _, value in self._full_items]\n\n\nHeaderTypes = Union[\n List[Tuple[bytes, bytes]],\n List[Tuple[bytes, str]],\n List[Tuple[str, bytes]],\n List[Tuple[str, str]],\n]\n\n\n@overload\ndef normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:\n ...\n\n\n@overload\ndef normalize_and_validate(headers: HeaderTypes, _parsed: Literal[False]) -> Headers:\n ...\n\n\n@overload\ndef normalize_and_validate(\n headers: Union[Headers, HeaderTypes], _parsed: bool = False\n) -> Headers:\n ...\n\n\ndef normalize_and_validate(\n headers: Union[Headers, HeaderTypes], _parsed: bool = False\n) -> Headers:\n new_headers = []\n seen_content_length = None\n saw_transfer_encoding = False\n for name, value in headers:\n # For headers coming out of the parser, we can safely skip some steps,\n # because it always returns bytes and has already run these regexes\n # over the data:\n if not _parsed:\n name = bytesify(name)\n value = bytesify(value)\n validate(_field_name_re, name, "Illegal header name {!r}", name)\n validate(_field_value_re, value, "Illegal header value {!r}", value)\n assert isinstance(name, bytes)\n assert isinstance(value, bytes)\n\n raw_name = name\n name = name.lower()\n if name == b"content-length":\n lengths = {length.strip() for length in value.split(b",")}\n if len(lengths) != 1:\n raise LocalProtocolError("conflicting Content-Length headers")\n value = lengths.pop()\n validate(_content_length_re, value, "bad Content-Length")\n if len(value) > CONTENT_LENGTH_MAX_DIGITS:\n raise LocalProtocolError("bad Content-Length")\n if seen_content_length is None:\n seen_content_length = value\n new_headers.append((raw_name, name, value))\n elif seen_content_length != value:\n raise LocalProtocolError("conflicting Content-Length headers")\n elif name == b"transfer-encoding":\n # "A server that receives a request message with a transfer coding\n # it does not understand SHOULD respond with 501 (Not\n # Implemented)."\n # https://tools.ietf.org/html/rfc7230#section-3.3.1\n if saw_transfer_encoding:\n raise LocalProtocolError(\n "multiple Transfer-Encoding headers", error_status_hint=501\n )\n # "All transfer-coding names are case-insensitive"\n # -- https://tools.ietf.org/html/rfc7230#section-4\n value = value.lower()\n if value != b"chunked":\n raise LocalProtocolError(\n "Only Transfer-Encoding: chunked is supported",\n error_status_hint=501,\n )\n saw_transfer_encoding = True\n new_headers.append((raw_name, name, value))\n else:\n new_headers.append((raw_name, name, value))\n return Headers(new_headers)\n\n\ndef get_comma_header(headers: Headers, name: bytes) -> List[bytes]:\n # Should only be used for headers whose value is a list of\n # comma-separated, case-insensitive values.\n #\n # The header name `name` is expected to be lower-case bytes.\n #\n # Connection: meets these criteria (including cast insensitivity).\n #\n # Content-Length: technically is just a single value (1*DIGIT), but the\n # standard makes reference to implementations that do multiple values, and\n # using this doesn't hurt. Ditto, case insensitivity doesn't things either\n # way.\n #\n # Transfer-Encoding: is more complex (allows for quoted strings), so\n # splitting on , is actually wrong. For example, this is legal:\n #\n # Transfer-Encoding: foo; options="1,2", chunked\n #\n # and should be parsed as\n #\n # foo; options="1,2"\n # chunked\n #\n # but this naive function will parse it as\n #\n # foo; options="1\n # 2"\n # chunked\n #\n # However, this is okay because the only thing we are going to do with\n # any Transfer-Encoding is reject ones that aren't just "chunked", so\n # both of these will be treated the same anyway.\n #\n # Expect: the only legal value is the literal string\n # "100-continue". Splitting on commas is harmless. Case insensitive.\n #\n out: List[bytes] = []\n for _, found_name, found_raw_value in headers._full_items:\n if found_name == name:\n found_raw_value = found_raw_value.lower()\n for found_split_value in found_raw_value.split(b","):\n found_split_value = found_split_value.strip()\n if found_split_value:\n out.append(found_split_value)\n return out\n\n\ndef set_comma_header(headers: Headers, name: bytes, new_values: List[bytes]) -> Headers:\n # The header name `name` is expected to be lower-case bytes.\n #\n # Note that when we store the header we use title casing for the header\n # names, in order to match the conventional HTTP header style.\n #\n # Simply calling `.title()` is a blunt approach, but it's correct\n # here given the cases where we're using `set_comma_header`...\n #\n # Connection, Content-Length, Transfer-Encoding.\n new_headers: List[Tuple[bytes, bytes]] = []\n for found_raw_name, found_name, found_raw_value in headers._full_items:\n if found_name != name:\n new_headers.append((found_raw_name, found_raw_value))\n for new_value in new_values:\n new_headers.append((name.title(), new_value))\n return normalize_and_validate(new_headers)\n\n\ndef has_expect_100_continue(request: "Request") -> bool:\n # https://tools.ietf.org/html/rfc7231#section-5.1.1\n # "A server that receives a 100-continue expectation in an HTTP/1.0 request\n # MUST ignore that expectation."\n if request.http_version < b"1.1":\n return False\n expect = get_comma_header(request.headers, b"expect")\n return b"100-continue" in expect\n
|
.venv\Lib\site-packages\h11\_headers.py
|
_headers.py
|
Python
| 10,412 | 0.95 | 0.148936 | 0.443089 |
react-lib
| 772 |
2025-06-01T19:49:16.967017
|
BSD-3-Clause
| false |
e1b46091b343ba5a89a410664a0359b3
|
import re\nimport sys\nfrom typing import List, Optional, Union\n\n__all__ = ["ReceiveBuffer"]\n\n\n# Operations we want to support:\n# - find next \r\n or \r\n\r\n (\n or \n\n are also acceptable),\n# or wait until there is one\n# - read at-most-N bytes\n# Goals:\n# - on average, do this fast\n# - worst case, do this in O(n) where n is the number of bytes processed\n# Plan:\n# - store bytearray, offset, how far we've searched for a separator token\n# - use the how-far-we've-searched data to avoid rescanning\n# - while doing a stream of uninterrupted processing, advance offset instead\n# of constantly copying\n# WARNING:\n# - I haven't benchmarked or profiled any of this yet.\n#\n# Note that starting in Python 3.4, deleting the initial n bytes from a\n# bytearray is amortized O(n), thanks to some excellent work by Antoine\n# Martin:\n#\n# https://bugs.python.org/issue19087\n#\n# This means that if we only supported 3.4+, we could get rid of the code here\n# involving self._start and self.compress, because it's doing exactly the same\n# thing that bytearray now does internally.\n#\n# BUT unfortunately, we still support 2.7, and reading short segments out of a\n# long buffer MUST be O(bytes read) to avoid DoS issues, so we can't actually\n# delete this code. Yet:\n#\n# https://pythonclock.org/\n#\n# (Two things to double-check first though: make sure PyPy also has the\n# optimization, and benchmark to make sure it's a win, since we do have a\n# slightly clever thing where we delay calling compress() until we've\n# processed a whole event, which could in theory be slightly more efficient\n# than the internal bytearray support.)\nblank_line_regex = re.compile(b"\n\r?\n", re.MULTILINE)\n\n\nclass ReceiveBuffer:\n def __init__(self) -> None:\n self._data = bytearray()\n self._next_line_search = 0\n self._multiple_lines_search = 0\n\n def __iadd__(self, byteslike: Union[bytes, bytearray]) -> "ReceiveBuffer":\n self._data += byteslike\n return self\n\n def __bool__(self) -> bool:\n return bool(len(self))\n\n def __len__(self) -> int:\n return len(self._data)\n\n # for @property unprocessed_data\n def __bytes__(self) -> bytes:\n return bytes(self._data)\n\n def _extract(self, count: int) -> bytearray:\n # extracting an initial slice of the data buffer and return it\n out = self._data[:count]\n del self._data[:count]\n\n self._next_line_search = 0\n self._multiple_lines_search = 0\n\n return out\n\n def maybe_extract_at_most(self, count: int) -> Optional[bytearray]:\n """\n Extract a fixed number of bytes from the buffer.\n """\n out = self._data[:count]\n if not out:\n return None\n\n return self._extract(count)\n\n def maybe_extract_next_line(self) -> Optional[bytearray]:\n """\n Extract the first line, if it is completed in the buffer.\n """\n # Only search in buffer space that we've not already looked at.\n search_start_index = max(0, self._next_line_search - 1)\n partial_idx = self._data.find(b"\r\n", search_start_index)\n\n if partial_idx == -1:\n self._next_line_search = len(self._data)\n return None\n\n # + 2 is to compensate len(b"\r\n")\n idx = partial_idx + 2\n\n return self._extract(idx)\n\n def maybe_extract_lines(self) -> Optional[List[bytearray]]:\n """\n Extract everything up to the first blank line, and return a list of lines.\n """\n # Handle the case where we have an immediate empty line.\n if self._data[:1] == b"\n":\n self._extract(1)\n return []\n\n if self._data[:2] == b"\r\n":\n self._extract(2)\n return []\n\n # Only search in buffer space that we've not already looked at.\n match = blank_line_regex.search(self._data, self._multiple_lines_search)\n if match is None:\n self._multiple_lines_search = max(0, len(self._data) - 2)\n return None\n\n # Truncate the buffer and return it.\n idx = match.span(0)[-1]\n out = self._extract(idx)\n lines = out.split(b"\n")\n\n for line in lines:\n if line.endswith(b"\r"):\n del line[-1]\n\n assert lines[-2] == lines[-1] == b""\n\n del lines[-2:]\n\n return lines\n\n # In theory we should wait until `\r\n` before starting to validate\n # incoming data. However it's interesting to detect (very) invalid data\n # early given they might not even contain `\r\n` at all (hence only\n # timeout will get rid of them).\n # This is not a 100% effective detection but more of a cheap sanity check\n # allowing for early abort in some useful cases.\n # This is especially interesting when peer is messing up with HTTPS and\n # sent us a TLS stream where we were expecting plain HTTP given all\n # versions of TLS so far start handshake with a 0x16 message type code.\n def is_next_line_obviously_invalid_request_line(self) -> bool:\n try:\n # HTTP header line must not contain non-printable characters\n # and should not start with a space\n return self._data[0] < 0x21\n except IndexError:\n return False\n
|
.venv\Lib\site-packages\h11\_receivebuffer.py
|
_receivebuffer.py
|
Python
| 5,252 | 0.95 | 0.163399 | 0.428571 |
node-utils
| 88 |
2024-08-13T08:03:57.697341
|
Apache-2.0
| false |
60a0149281ead0518630f4cf130b1aab
|
################################################################\n# The core state machine\n################################################################\n#\n# Rule 1: everything that affects the state machine and state transitions must\n# live here in this file. As much as possible goes into the table-based\n# representation, but for the bits that don't quite fit, the actual code and\n# state must nonetheless live here.\n#\n# Rule 2: this file does not know about what role we're playing; it only knows\n# about HTTP request/response cycles in the abstract. This ensures that we\n# don't cheat and apply different rules to local and remote parties.\n#\n#\n# Theory of operation\n# ===================\n#\n# Possibly the simplest way to think about this is that we actually have 5\n# different state machines here. Yes, 5. These are:\n#\n# 1) The client state, with its complicated automaton (see the docs)\n# 2) The server state, with its complicated automaton (see the docs)\n# 3) The keep-alive state, with possible states {True, False}\n# 4) The SWITCH_CONNECT state, with possible states {False, True}\n# 5) The SWITCH_UPGRADE state, with possible states {False, True}\n#\n# For (3)-(5), the first state listed is the initial state.\n#\n# (1)-(3) are stored explicitly in member variables. The last\n# two are stored implicitly in the pending_switch_proposals set as:\n# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals)\n# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals)\n#\n# And each of these machines has two different kinds of transitions:\n#\n# a) Event-triggered\n# b) State-triggered\n#\n# Event triggered is the obvious thing that you'd think it is: some event\n# happens, and if it's the right event at the right time then a transition\n# happens. But there are somewhat complicated rules for which machines can\n# "see" which events. (As a rule of thumb, if a machine "sees" an event, this\n# means two things: the event can affect the machine, and if the machine is\n# not in a state where it expects that event then it's an error.) These rules\n# are:\n#\n# 1) The client machine sees all h11.events objects emitted by the client.\n#\n# 2) The server machine sees all h11.events objects emitted by the server.\n#\n# It also sees the client's Request event.\n#\n# And sometimes, server events are annotated with a _SWITCH_* event. For\n# example, we can have a (Response, _SWITCH_CONNECT) event, which is\n# different from a regular Response event.\n#\n# 3) The keep-alive machine sees the process_keep_alive_disabled() event\n# (which is derived from Request/Response events), and this event\n# transitions it from True -> False, or from False -> False. There's no way\n# to transition back.\n#\n# 4&5) The _SWITCH_* machines transition from False->True when we get a\n# Request that proposes the relevant type of switch (via\n# process_client_switch_proposals), and they go from True->False when we\n# get a Response that has no _SWITCH_* annotation.\n#\n# So that's event-triggered transitions.\n#\n# State-triggered transitions are less standard. What they do here is couple\n# the machines together. The way this works is, when certain *joint*\n# configurations of states are achieved, then we automatically transition to a\n# new *joint* state. So, for example, if we're ever in a joint state with\n#\n# client: DONE\n# keep-alive: False\n#\n# then the client state immediately transitions to:\n#\n# client: MUST_CLOSE\n#\n# This is fundamentally different from an event-based transition, because it\n# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state\n# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive\n# transitioned True -> False. Either way, once this precondition is satisfied,\n# this transition is immediately triggered.\n#\n# What if two conflicting state-based transitions get enabled at the same\n# time? In practice there's only one case where this arises (client DONE ->\n# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by\n# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition.\n#\n# Implementation\n# --------------\n#\n# The event-triggered transitions for the server and client machines are all\n# stored explicitly in a table. Ditto for the state-triggered transitions that\n# involve just the server and client state.\n#\n# The transitions for the other machines, and the state-triggered transitions\n# that involve the other machines, are written out as explicit Python code.\n#\n# It'd be nice if there were some cleaner way to do all this. This isn't\n# *too* terrible, but I feel like it could probably be better.\n#\n# WARNING\n# -------\n#\n# The script that generates the state machine diagrams for the docs knows how\n# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS\n# tables. But it can't automatically read the transitions that are written\n# directly in Python code. So if you touch those, you need to also update the\n# script to keep it in sync!\nfrom typing import cast, Dict, Optional, Set, Tuple, Type, Union\n\nfrom ._events import *\nfrom ._util import LocalProtocolError, Sentinel\n\n# Everything in __all__ gets re-exported as part of the h11 public API.\n__all__ = [\n "CLIENT",\n "SERVER",\n "IDLE",\n "SEND_RESPONSE",\n "SEND_BODY",\n "DONE",\n "MUST_CLOSE",\n "CLOSED",\n "MIGHT_SWITCH_PROTOCOL",\n "SWITCHED_PROTOCOL",\n "ERROR",\n]\n\n\nclass CLIENT(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass SERVER(Sentinel, metaclass=Sentinel):\n pass\n\n\n# States\nclass IDLE(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass SEND_RESPONSE(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass SEND_BODY(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass DONE(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass MUST_CLOSE(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass CLOSED(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass ERROR(Sentinel, metaclass=Sentinel):\n pass\n\n\n# Switch types\nclass MIGHT_SWITCH_PROTOCOL(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass SWITCHED_PROTOCOL(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass _SWITCH_UPGRADE(Sentinel, metaclass=Sentinel):\n pass\n\n\nclass _SWITCH_CONNECT(Sentinel, metaclass=Sentinel):\n pass\n\n\nEventTransitionType = Dict[\n Type[Sentinel],\n Dict[\n Type[Sentinel],\n Dict[Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], Type[Sentinel]],\n ],\n]\n\nEVENT_TRIGGERED_TRANSITIONS: EventTransitionType = {\n CLIENT: {\n IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED},\n SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},\n DONE: {ConnectionClosed: CLOSED},\n MUST_CLOSE: {ConnectionClosed: CLOSED},\n CLOSED: {ConnectionClosed: CLOSED},\n MIGHT_SWITCH_PROTOCOL: {},\n SWITCHED_PROTOCOL: {},\n ERROR: {},\n },\n SERVER: {\n IDLE: {\n ConnectionClosed: CLOSED,\n Response: SEND_BODY,\n # Special case: server sees client Request events, in this form\n (Request, CLIENT): SEND_RESPONSE,\n },\n SEND_RESPONSE: {\n InformationalResponse: SEND_RESPONSE,\n Response: SEND_BODY,\n (InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL,\n (Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL,\n },\n SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},\n DONE: {ConnectionClosed: CLOSED},\n MUST_CLOSE: {ConnectionClosed: CLOSED},\n CLOSED: {ConnectionClosed: CLOSED},\n SWITCHED_PROTOCOL: {},\n ERROR: {},\n },\n}\n\nStateTransitionType = Dict[\n Tuple[Type[Sentinel], Type[Sentinel]], Dict[Type[Sentinel], Type[Sentinel]]\n]\n\n# NB: there are also some special-case state-triggered transitions hard-coded\n# into _fire_state_triggered_transitions below.\nSTATE_TRIGGERED_TRANSITIONS: StateTransitionType = {\n # (Client state, Server state) -> new states\n # Protocol negotiation\n (MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL},\n # Socket shutdown\n (CLOSED, DONE): {SERVER: MUST_CLOSE},\n (CLOSED, IDLE): {SERVER: MUST_CLOSE},\n (ERROR, DONE): {SERVER: MUST_CLOSE},\n (DONE, CLOSED): {CLIENT: MUST_CLOSE},\n (IDLE, CLOSED): {CLIENT: MUST_CLOSE},\n (DONE, ERROR): {CLIENT: MUST_CLOSE},\n}\n\n\nclass ConnectionState:\n def __init__(self) -> None:\n # Extra bits of state that don't quite fit into the state model.\n\n # If this is False then it enables the automatic DONE -> MUST_CLOSE\n # transition. Don't set this directly; call .keep_alive_disabled()\n self.keep_alive = True\n\n # This is a subset of {UPGRADE, CONNECT}, containing the proposals\n # made by the client for switching protocols.\n self.pending_switch_proposals: Set[Type[Sentinel]] = set()\n\n self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE}\n\n def process_error(self, role: Type[Sentinel]) -> None:\n self.states[role] = ERROR\n self._fire_state_triggered_transitions()\n\n def process_keep_alive_disabled(self) -> None:\n self.keep_alive = False\n self._fire_state_triggered_transitions()\n\n def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None:\n self.pending_switch_proposals.add(switch_event)\n self._fire_state_triggered_transitions()\n\n def process_event(\n self,\n role: Type[Sentinel],\n event_type: Type[Event],\n server_switch_event: Optional[Type[Sentinel]] = None,\n ) -> None:\n _event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type\n if server_switch_event is not None:\n assert role is SERVER\n if server_switch_event not in self.pending_switch_proposals:\n raise LocalProtocolError(\n "Received server _SWITCH_UPGRADE event without a pending proposal"\n )\n _event_type = (event_type, server_switch_event)\n if server_switch_event is None and _event_type is Response:\n self.pending_switch_proposals = set()\n self._fire_event_triggered_transitions(role, _event_type)\n # Special case: the server state does get to see Request\n # events.\n if _event_type is Request:\n assert role is CLIENT\n self._fire_event_triggered_transitions(SERVER, (Request, CLIENT))\n self._fire_state_triggered_transitions()\n\n def _fire_event_triggered_transitions(\n self,\n role: Type[Sentinel],\n event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]],\n ) -> None:\n state = self.states[role]\n try:\n new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type]\n except KeyError:\n event_type = cast(Type[Event], event_type)\n raise LocalProtocolError(\n "can't handle event type {} when role={} and state={}".format(\n event_type.__name__, role, self.states[role]\n )\n ) from None\n self.states[role] = new_state\n\n def _fire_state_triggered_transitions(self) -> None:\n # We apply these rules repeatedly until converging on a fixed point\n while True:\n start_states = dict(self.states)\n\n # It could happen that both these special-case transitions are\n # enabled at the same time:\n #\n # DONE -> MIGHT_SWITCH_PROTOCOL\n # DONE -> MUST_CLOSE\n #\n # For example, this will always be true of a HTTP/1.0 client\n # requesting CONNECT. If this happens, the protocol switch takes\n # priority. From there the client will either go to\n # SWITCHED_PROTOCOL, in which case it's none of our business when\n # they close the connection, or else the server will deny the\n # request, in which case the client will go back to DONE and then\n # from there to MUST_CLOSE.\n if self.pending_switch_proposals:\n if self.states[CLIENT] is DONE:\n self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL\n\n if not self.pending_switch_proposals:\n if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL:\n self.states[CLIENT] = DONE\n\n if not self.keep_alive:\n for role in (CLIENT, SERVER):\n if self.states[role] is DONE:\n self.states[role] = MUST_CLOSE\n\n # Tabular state-triggered transitions\n joint_state = (self.states[CLIENT], self.states[SERVER])\n changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {})\n self.states.update(changes)\n\n if self.states == start_states:\n # Fixed point reached\n return\n\n def start_next_cycle(self) -> None:\n if self.states != {CLIENT: DONE, SERVER: DONE}:\n raise LocalProtocolError(\n f"not in a reusable state. self.states={self.states}"\n )\n # Can't reach DONE/DONE with any of these active, but still, let's be\n # sure.\n assert self.keep_alive\n assert not self.pending_switch_proposals\n self.states = {CLIENT: IDLE, SERVER: IDLE}\n
|
.venv\Lib\site-packages\h11\_state.py
|
_state.py
|
Python
| 13,231 | 0.95 | 0.147945 | 0.463492 |
python-kit
| 375 |
2024-07-04T07:20:44.363998
|
GPL-3.0
| false |
44e194babb85892216fa0ff48aa057d0
|
from typing import Any, Dict, NoReturn, Pattern, Tuple, Type, TypeVar, Union\n\n__all__ = [\n "ProtocolError",\n "LocalProtocolError",\n "RemoteProtocolError",\n "validate",\n "bytesify",\n]\n\n\nclass ProtocolError(Exception):\n """Exception indicating a violation of the HTTP/1.1 protocol.\n\n This as an abstract base class, with two concrete base classes:\n :exc:`LocalProtocolError`, which indicates that you tried to do something\n that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which\n indicates that the remote peer tried to do something that HTTP/1.1 says is\n illegal. See :ref:`error-handling` for details.\n\n In addition to the normal :exc:`Exception` features, it has one attribute:\n\n .. attribute:: error_status_hint\n\n This gives a suggestion as to what status code a server might use if\n this error occurred as part of a request.\n\n For a :exc:`RemoteProtocolError`, this is useful as a suggestion for\n how you might want to respond to a misbehaving peer, if you're\n implementing a server.\n\n For a :exc:`LocalProtocolError`, this can be taken as a suggestion for\n how your peer might have responded to *you* if h11 had allowed you to\n continue.\n\n The default is 400 Bad Request, a generic catch-all for protocol\n violations.\n\n """\n\n def __init__(self, msg: str, error_status_hint: int = 400) -> None:\n if type(self) is ProtocolError:\n raise TypeError("tried to directly instantiate ProtocolError")\n Exception.__init__(self, msg)\n self.error_status_hint = error_status_hint\n\n\n# Strategy: there are a number of public APIs where a LocalProtocolError can\n# be raised (send(), all the different event constructors, ...), and only one\n# public API where RemoteProtocolError can be raised\n# (receive_data()). Therefore we always raise LocalProtocolError internally,\n# and then receive_data will translate this into a RemoteProtocolError.\n#\n# Internally:\n# LocalProtocolError is the generic "ProtocolError".\n# Externally:\n# LocalProtocolError is for local errors and RemoteProtocolError is for\n# remote errors.\nclass LocalProtocolError(ProtocolError):\n def _reraise_as_remote_protocol_error(self) -> NoReturn:\n # After catching a LocalProtocolError, use this method to re-raise it\n # as a RemoteProtocolError. This method must be called from inside an\n # except: block.\n #\n # An easy way to get an equivalent RemoteProtocolError is just to\n # modify 'self' in place.\n self.__class__ = RemoteProtocolError # type: ignore\n # But the re-raising is somewhat non-trivial -- you might think that\n # now that we've modified the in-flight exception object, that just\n # doing 'raise' to re-raise it would be enough. But it turns out that\n # this doesn't work, because Python tracks the exception type\n # (exc_info[0]) separately from the exception object (exc_info[1]),\n # and we only modified the latter. So we really do need to re-raise\n # the new type explicitly.\n # On py3, the traceback is part of the exception object, so our\n # in-place modification preserved it and we can just re-raise:\n raise self\n\n\nclass RemoteProtocolError(ProtocolError):\n pass\n\n\ndef validate(\n regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any\n) -> Dict[str, bytes]:\n match = regex.fullmatch(data)\n if not match:\n if format_args:\n msg = msg.format(*format_args)\n raise LocalProtocolError(msg)\n return match.groupdict()\n\n\n# Sentinel values\n#\n# - Inherit identity-based comparison and hashing from object\n# - Have a nice repr\n# - Have a *bonus property*: type(sentinel) is sentinel\n#\n# The bonus property is useful if you want to take the return value from\n# next_event() and do some sort of dispatch based on type(event).\n\n_T_Sentinel = TypeVar("_T_Sentinel", bound="Sentinel")\n\n\nclass Sentinel(type):\n def __new__(\n cls: Type[_T_Sentinel],\n name: str,\n bases: Tuple[type, ...],\n namespace: Dict[str, Any],\n **kwds: Any\n ) -> _T_Sentinel:\n assert bases == (Sentinel,)\n v = super().__new__(cls, name, bases, namespace, **kwds)\n v.__class__ = v # type: ignore\n return v\n\n def __repr__(self) -> str:\n return self.__name__\n\n\n# Used for methods, request targets, HTTP versions, header names, and header\n# values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always\n# returns bytes.\ndef bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:\n # Fast-path:\n if type(s) is bytes:\n return s\n if isinstance(s, str):\n s = s.encode("ascii")\n if isinstance(s, int):\n raise TypeError("expected bytes-like object, not int")\n return bytes(s)\n
|
.venv\Lib\site-packages\h11\_util.py
|
_util.py
|
Python
| 4,888 | 0.95 | 0.214815 | 0.357798 |
awesome-app
| 276 |
2023-08-22T13:11:12.218355
|
MIT
| false |
c2c5fade65d8b57e238eecb1e660af7e
|
# This file must be kept very simple, because it is consumed from several\n# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc.\n\n# We use a simple scheme:\n# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev\n# where the +dev versions are never released into the wild, they're just what\n# we stick into the VCS in between releases.\n#\n# This is compatible with PEP 440:\n# http://legacy.python.org/dev/peps/pep-0440/\n# via the use of the "local suffix" "+dev", which is disallowed on index\n# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we\n# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before*\n# 1.0.0.)\n\n__version__ = "0.16.0"\n
|
.venv\Lib\site-packages\h11\_version.py
|
_version.py
|
Python
| 686 | 0.95 | 0 | 0.928571 |
python-kit
| 355 |
2025-02-15T02:09:31.705976
|
MIT
| false |
56ebcfa15b3e8ef5e92d2c3019ba95d2
|
# A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230),\n# containing no networking code at all, loosely modelled on hyper-h2's generic\n# implementation of HTTP/2 (and in particular the h2.connection.H2Connection\n# class). There's still a bunch of subtle details you need to get right if you\n# want to make this actually useful, because it doesn't implement all the\n# semantics to check that what you're asking to write to the wire is sensible,\n# but at least it gets you out of dealing with the wire itself.\n\nfrom h11._connection import Connection, NEED_DATA, PAUSED\nfrom h11._events import (\n ConnectionClosed,\n Data,\n EndOfMessage,\n Event,\n InformationalResponse,\n Request,\n Response,\n)\nfrom h11._state import (\n CLIENT,\n CLOSED,\n DONE,\n ERROR,\n IDLE,\n MIGHT_SWITCH_PROTOCOL,\n MUST_CLOSE,\n SEND_BODY,\n SEND_RESPONSE,\n SERVER,\n SWITCHED_PROTOCOL,\n)\nfrom h11._util import LocalProtocolError, ProtocolError, RemoteProtocolError\nfrom h11._version import __version__\n\nPRODUCT_ID = "python-h11/" + __version__\n\n\n__all__ = (\n "Connection",\n "NEED_DATA",\n "PAUSED",\n "ConnectionClosed",\n "Data",\n "EndOfMessage",\n "Event",\n "InformationalResponse",\n "Request",\n "Response",\n "CLIENT",\n "CLOSED",\n "DONE",\n "ERROR",\n "IDLE",\n "MUST_CLOSE",\n "SEND_BODY",\n "SEND_RESPONSE",\n "SERVER",\n "SWITCHED_PROTOCOL",\n "ProtocolError",\n "LocalProtocolError",\n "RemoteProtocolError",\n)\n
|
.venv\Lib\site-packages\h11\__init__.py
|
__init__.py
|
Python
| 1,507 | 0.95 | 0.032258 | 0.12069 |
node-utils
| 907 |
2023-11-08T16:09:50.855978
|
GPL-3.0
| false |
f9b5f613283814d5a7216bc0c1e90769
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\_abnf.cpython-313.pyc
|
_abnf.cpython-313.pyc
|
Other
| 1,761 | 0.8 | 0 | 0 |
python-kit
| 743 |
2023-11-19T07:34:44.110264
|
GPL-3.0
| false |
a4491efa01da7379cbc804403a5b332e
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\_connection.cpython-313.pyc
|
_connection.cpython-313.pyc
|
Other
| 22,736 | 0.8 | 0.077778 | 0 |
node-utils
| 43 |
2025-02-18T07:28:42.004835
|
Apache-2.0
| false |
66c4d468cb3d32be8603b330dd452f0b
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\_events.cpython-313.pyc
|
_events.cpython-313.pyc
|
Other
| 13,141 | 0.95 | 0.080717 | 0 |
react-lib
| 634 |
2025-03-04T21:52:15.721963
|
Apache-2.0
| false |
68be39874da73c1b1511113bff4baf40
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\_headers.cpython-313.pyc
|
_headers.cpython-313.pyc
|
Other
| 8,041 | 0.8 | 0.0125 | 0 |
node-utils
| 330 |
2024-07-14T17:04:12.801805
|
MIT
| false |
f1ce8ee4104cc2c72ab74e340884a572
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\_readers.cpython-313.pyc
|
_readers.cpython-313.pyc
|
Other
| 9,867 | 0.8 | 0 | 0.01087 |
node-utils
| 557 |
2024-02-18T19:22:53.804588
|
BSD-3-Clause
| false |
73f64a81599e59317aa18005b5edc4e0
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\_receivebuffer.cpython-313.pyc
|
_receivebuffer.cpython-313.pyc
|
Other
| 4,776 | 0.8 | 0.019231 | 0 |
vue-tools
| 790 |
2024-02-20T17:24:09.173481
|
MIT
| false |
78ec75f92f00875ce952182a8869286d
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\_state.cpython-313.pyc
|
_state.cpython-313.pyc
|
Other
| 8,776 | 0.8 | 0 | 0 |
vue-tools
| 358 |
2025-05-27T08:59:25.195737
|
GPL-3.0
| false |
3523eed485ab9ca756e64b2bd32aeee9
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\_util.cpython-313.pyc
|
_util.cpython-313.pyc
|
Other
| 4,839 | 0.8 | 0.128571 | 0 |
vue-tools
| 452 |
2025-03-07T07:00:22.390502
|
Apache-2.0
| false |
0df89127440b526b66ccce7d272eb83f
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\_version.cpython-313.pyc
|
_version.cpython-313.pyc
|
Other
| 207 | 0.7 | 0 | 0 |
vue-tools
| 830 |
2023-10-24T23:20:31.310373
|
MIT
| false |
a86c26f1093ab0c1f06392d000a8fa4c
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\_writers.cpython-313.pyc
|
_writers.cpython-313.pyc
|
Other
| 6,467 | 0.8 | 0.030303 | 0 |
awesome-app
| 924 |
2024-12-15T12:06:05.500994
|
GPL-3.0
| false |
abdf3d1b45d3ce401aec94cadea2e051
|
\n\n
|
.venv\Lib\site-packages\h11\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 1,069 | 0.7 | 0 | 0 |
vue-tools
| 468 |
2024-04-18T22:40:54.236004
|
BSD-3-Clause
| false |
bbe2d406b2312616e754a1b3f2eef510
|
pip\n
|
.venv\Lib\site-packages\h11-0.16.0.dist-info\INSTALLER
|
INSTALLER
|
Other
| 4 | 0.5 | 0 | 0 |
python-kit
| 861 |
2024-12-29T00:53:08.446767
|
GPL-3.0
| false |
365c9bfeb7d89244f2ce01c1de44cb85
|
Metadata-Version: 2.4\nName: h11\nVersion: 0.16.0\nSummary: A pure-Python, bring-your-own-I/O implementation of HTTP/1.1\nHome-page: https://github.com/python-hyper/h11\nAuthor: Nathaniel J. Smith\nAuthor-email: njs@pobox.com\nLicense: MIT\nClassifier: Development Status :: 3 - Alpha\nClassifier: Intended Audience :: Developers\nClassifier: License :: OSI Approved :: MIT License\nClassifier: Programming Language :: Python :: Implementation :: CPython\nClassifier: Programming Language :: Python :: Implementation :: PyPy\nClassifier: Programming Language :: Python :: 3\nClassifier: Programming Language :: Python :: 3 :: Only\nClassifier: Programming Language :: Python :: 3.8\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: 3.11\nClassifier: Programming Language :: Python :: 3.12\nClassifier: Topic :: Internet :: WWW/HTTP\nClassifier: Topic :: System :: Networking\nRequires-Python: >=3.8\nLicense-File: LICENSE.txt\nDynamic: author\nDynamic: author-email\nDynamic: classifier\nDynamic: description\nDynamic: home-page\nDynamic: license\nDynamic: license-file\nDynamic: requires-python\nDynamic: summary\n\nh11\n===\n\n.. image:: https://travis-ci.org/python-hyper/h11.svg?branch=master\n :target: https://travis-ci.org/python-hyper/h11\n :alt: Automated test status\n\n.. image:: https://codecov.io/gh/python-hyper/h11/branch/master/graph/badge.svg\n :target: https://codecov.io/gh/python-hyper/h11\n :alt: Test coverage\n\n.. image:: https://readthedocs.org/projects/h11/badge/?version=latest\n :target: http://h11.readthedocs.io/en/latest/?badge=latest\n :alt: Documentation Status\n\nThis is a little HTTP/1.1 library written from scratch in Python,\nheavily inspired by `hyper-h2 <https://hyper-h2.readthedocs.io/>`_.\n\nIt's a "bring-your-own-I/O" library; h11 contains no IO code\nwhatsoever. This means you can hook h11 up to your favorite network\nAPI, and that could be anything you want: synchronous, threaded,\nasynchronous, or your own implementation of `RFC 6214\n<https://tools.ietf.org/html/rfc6214>`_ -- h11 won't judge you.\n(Compare this to the current state of the art, where every time a `new\nnetwork API <https://trio.readthedocs.io/>`_ comes along then someone\ngets to start over reimplementing the entire HTTP protocol from\nscratch.) Cory Benfield made an `excellent blog post describing the\nbenefits of this approach\n<https://lukasa.co.uk/2015/10/The_New_Hyper/>`_, or if you like video\nthen here's his `PyCon 2016 talk on the same theme\n<https://www.youtube.com/watch?v=7cC3_jGwl_U>`_.\n\nThis also means that h11 is not immediately useful out of the box:\nit's a toolkit for building programs that speak HTTP, not something\nthat could directly replace ``requests`` or ``twisted.web`` or\nwhatever. But h11 makes it much easier to implement something like\n``requests`` or ``twisted.web``.\n\nAt a high level, working with h11 goes like this:\n\n1) First, create an ``h11.Connection`` object to track the state of a\n single HTTP/1.1 connection.\n\n2) When you read data off the network, pass it to\n ``conn.receive_data(...)``; you'll get back a list of objects\n representing high-level HTTP "events".\n\n3) When you want to send a high-level HTTP event, create the\n corresponding "event" object and pass it to ``conn.send(...)``;\n this will give you back some bytes that you can then push out\n through the network.\n\nFor example, a client might instantiate and then send a\n``h11.Request`` object, then zero or more ``h11.Data`` objects for the\nrequest body (e.g., if this is a POST), and then a\n``h11.EndOfMessage`` to indicate the end of the message. Then the\nserver would then send back a ``h11.Response``, some ``h11.Data``, and\nits own ``h11.EndOfMessage``. If either side violates the protocol,\nyou'll get a ``h11.ProtocolError`` exception.\n\nh11 is suitable for implementing both servers and clients, and has a\npleasantly symmetric API: the events you send as a client are exactly\nthe ones that you receive as a server and vice-versa.\n\n`Here's an example of a tiny HTTP client\n<https://github.com/python-hyper/h11/blob/master/examples/basic-client.py>`_\n\nIt also has `a fine manual <https://h11.readthedocs.io/>`_.\n\nFAQ\n---\n\n*Whyyyyy?*\n\nI wanted to play with HTTP in `Curio\n<https://curio.readthedocs.io/en/latest/tutorial.html>`__ and `Trio\n<https://trio.readthedocs.io>`__, which at the time didn't have any\nHTTP libraries. So I thought, no big deal, Python has, like, a dozen\ndifferent implementations of HTTP, surely I can find one that's\nreusable. I didn't find one, but I did find Cory's call-to-arms\nblog-post. So I figured, well, fine, if I have to implement HTTP from\nscratch, at least I can make sure no-one *else* has to ever again.\n\n*Should I use it?*\n\nMaybe. You should be aware that it's a very young project. But, it's\nfeature complete and has an exhaustive test-suite and complete docs,\nso the next step is for people to try using it and see how it goes\n:-). If you do then please let us know -- if nothing else we'll want\nto talk to you before making any incompatible changes!\n\n*What are the features/limitations?*\n\nRoughly speaking, it's trying to be a robust, complete, and non-hacky\nimplementation of the first "chapter" of the HTTP/1.1 spec: `RFC 7230:\nHTTP/1.1 Message Syntax and Routing\n<https://tools.ietf.org/html/rfc7230>`_. That is, it mostly focuses on\nimplementing HTTP at the level of taking bytes on and off the wire,\nand the headers related to that, and tries to be anal about spec\nconformance. It doesn't know about higher-level concerns like URL\nrouting, conditional GETs, cross-origin cookie policies, or content\nnegotiation. But it does know how to take care of framing,\ncross-version differences in keep-alive handling, and the "obsolete\nline folding" rule, so you can focus your energies on the hard /\ninteresting parts for your application, and it tries to support the\nfull specification in the sense that any useful HTTP/1.1 conformant\napplication should be able to use h11.\n\nIt's pure Python, and has no dependencies outside of the standard\nlibrary.\n\nIt has a test suite with 100.0% coverage for both statements and\nbranches.\n\nCurrently it supports Python 3 (testing on 3.8-3.12) and PyPy 3.\nThe last Python 2-compatible version was h11 0.11.x.\n(Originally it had a Cython wrapper for `http-parser\n<https://github.com/nodejs/http-parser>`_ and a beautiful nested state\nmachine implemented with ``yield from`` to postprocess the output. But\nI had to take these out -- the new *parser* needs fewer lines-of-code\nthan the old *parser wrapper*, is written in pure Python, uses no\nexotic language syntax, and has more features. It's sad, really; that\nold state machine was really slick. I just need a few sentences here\nto mourn that.)\n\nI don't know how fast it is. I haven't benchmarked or profiled it yet,\nso it's probably got a few pointless hot spots, and I've been trying\nto err on the side of simplicity and robustness instead of\nmicro-optimization. But at the architectural level I tried hard to\navoid fundamentally bad decisions, e.g., I believe that all the\nparsing algorithms remain linear-time even in the face of pathological\ninput like slowloris, and there are no byte-by-byte loops. (I also\nbelieve that it maintains bounded memory usage in the face of\narbitrary/pathological input.)\n\nThe whole library is ~800 lines-of-code. You can read and understand\nthe whole thing in less than an hour. Most of the energy invested in\nthis so far has been spent on trying to keep things simple by\nminimizing special-cases and ad hoc state manipulation; even though it\nis now quite small and simple, I'm still annoyed that I haven't\nfigured out how to make it even smaller and simpler. (Unfortunately,\nHTTP does not lend itself to simplicity.)\n\nThe API is ~feature complete and I don't expect the general outlines\nto change much, but you can't judge an API's ergonomics until you\nactually document and use it, so I'd expect some changes in the\ndetails.\n\n*How do I try it?*\n\n.. code-block:: sh\n\n $ pip install h11\n $ git clone git@github.com:python-hyper/h11\n $ cd h11/examples\n $ python basic-client.py\n\nand go from there.\n\n*License?*\n\nMIT\n\n*Code of conduct?*\n\nContributors are requested to follow our `code of conduct\n<https://github.com/python-hyper/h11/blob/master/CODE_OF_CONDUCT.md>`_ in\nall project spaces.\n
|
.venv\Lib\site-packages\h11-0.16.0.dist-info\METADATA
|
METADATA
|
Other
| 8,348 | 0.95 | 0.064356 | 0.036145 |
python-kit
| 291 |
2024-02-12T04:30:46.404198
|
BSD-3-Clause
| false |
73a390f47a824011df080be2f79250f0
|
h11-0.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\nh11-0.16.0.dist-info/METADATA,sha256=KPMmCYrAn8unm48YD5YIfIQf4kViFct7hyqcfVzRnWQ,8348\nh11-0.16.0.dist-info/RECORD,,\nh11-0.16.0.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91\nh11-0.16.0.dist-info/licenses/LICENSE.txt,sha256=N9tbuFkm2yikJ6JYZ_ELEjIAOuob5pzLhRE4rbjm82E,1124\nh11-0.16.0.dist-info/top_level.txt,sha256=F7dC4jl3zeh8TGHEPaWJrMbeuoWbS379Gwdi-Yvdcis,4\nh11/__init__.py,sha256=iO1KzkSO42yZ6ffg-VMgbx_ZVTWGUY00nRYEWn-s3kY,1507\nh11/__pycache__/__init__.cpython-313.pyc,,\nh11/__pycache__/_abnf.cpython-313.pyc,,\nh11/__pycache__/_connection.cpython-313.pyc,,\nh11/__pycache__/_events.cpython-313.pyc,,\nh11/__pycache__/_headers.cpython-313.pyc,,\nh11/__pycache__/_readers.cpython-313.pyc,,\nh11/__pycache__/_receivebuffer.cpython-313.pyc,,\nh11/__pycache__/_state.cpython-313.pyc,,\nh11/__pycache__/_util.cpython-313.pyc,,\nh11/__pycache__/_version.cpython-313.pyc,,\nh11/__pycache__/_writers.cpython-313.pyc,,\nh11/_abnf.py,sha256=ybixr0xsupnkA6GFAyMubuXF6Tc1lb_hF890NgCsfNc,4815\nh11/_connection.py,sha256=k9YRVf6koZqbttBW36xSWaJpWdZwa-xQVU9AHEo9DuI,26863\nh11/_events.py,sha256=I97aXoal1Wu7dkL548BANBUCkOIbe-x5CioYA9IBY14,11792\nh11/_headers.py,sha256=P7D-lBNxHwdLZPLimmYwrPG-9ZkjElvvJZJdZAgSP-4,10412\nh11/_readers.py,sha256=a4RypORUCC3d0q_kxPuBIM7jTD8iLt5X91TH0FsduN4,8590\nh11/_receivebuffer.py,sha256=xrspsdsNgWFxRfQcTXxR8RrdjRXXTK0Io5cQYWpJ1Ws,5252\nh11/_state.py,sha256=_5LG_BGR8FCcFQeBPH-TMHgm_-B-EUcWCnQof_9XjFE,13231\nh11/_util.py,sha256=LWkkjXyJaFlAy6Lt39w73UStklFT5ovcvo0TkY7RYuk,4888\nh11/_version.py,sha256=GVSsbPSPDcOuF6ptfIiXnVJoaEm3ygXbMnqlr_Giahw,686\nh11/_writers.py,sha256=oFKm6PtjeHfbj4RLX7VB7KDc1gIY53gXG3_HR9ltmTA,5081\nh11/py.typed,sha256=sow9soTwP9T_gEAQSVh7Gb8855h04Nwmhs2We-JRgZM,7\n
|
.venv\Lib\site-packages\h11-0.16.0.dist-info\RECORD
|
RECORD
|
Other
| 1,830 | 0.7 | 0 | 0 |
node-utils
| 779 |
2024-11-30T02:21:01.915703
|
GPL-3.0
| false |
4a71578f0082aac2c4d9e76ce05de2a6
|
h11\n
|
.venv\Lib\site-packages\h11-0.16.0.dist-info\top_level.txt
|
top_level.txt
|
Other
| 4 | 0.5 | 0 | 0 |
vue-tools
| 130 |
2025-02-06T18:00:33.893027
|
Apache-2.0
| false |
58b314cbeb0f4b3c2be5961ed1452c97
|
Wheel-Version: 1.0\nGenerator: setuptools (78.1.0)\nRoot-Is-Purelib: true\nTag: py3-none-any\n\n
|
.venv\Lib\site-packages\h11-0.16.0.dist-info\WHEEL
|
WHEEL
|
Other
| 91 | 0.5 | 0 | 0 |
node-utils
| 860 |
2024-03-22T13:58:52.918417
|
GPL-3.0
| false |
9c3ef2336f4e16b5f7573c25c683ce63
|
The MIT License (MIT)\n\nCopyright (c) 2016 Nathaniel J. Smith <njs@pobox.com> and other contributors\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n"Software"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n
|
.venv\Lib\site-packages\h11-0.16.0.dist-info\licenses\LICENSE.txt
|
LICENSE.txt
|
Other
| 1,124 | 0.7 | 0 | 0 |
awesome-app
| 670 |
2025-02-02T08:04:23.698881
|
Apache-2.0
| false |
f5501d19c3116f4aaeef89369f458693
|
from __future__ import annotations\n\nimport contextlib\nimport typing\n\nfrom ._models import URL, Extensions, HeaderTypes, Response\nfrom ._sync.connection_pool import ConnectionPool\n\n\ndef request(\n method: bytes | str,\n url: URL | bytes | str,\n *,\n headers: HeaderTypes = None,\n content: bytes | typing.Iterator[bytes] | None = None,\n extensions: Extensions | None = None,\n) -> Response:\n """\n Sends an HTTP request, returning the response.\n\n ```\n response = httpcore.request("GET", "https://www.example.com/")\n ```\n\n Arguments:\n method: The HTTP method for the request. Typically one of `"GET"`,\n `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`.\n url: The URL of the HTTP request. Either as an instance of `httpcore.URL`,\n or as str/bytes.\n headers: The HTTP request headers. Either as a dictionary of str/bytes,\n or as a list of two-tuples of str/bytes.\n content: The content of the request body. Either as bytes,\n or as a bytes iterator.\n extensions: A dictionary of optional extra information included on the request.\n Possible keys include `"timeout"`.\n\n Returns:\n An instance of `httpcore.Response`.\n """\n with ConnectionPool() as pool:\n return pool.request(\n method=method,\n url=url,\n headers=headers,\n content=content,\n extensions=extensions,\n )\n\n\n@contextlib.contextmanager\ndef stream(\n method: bytes | str,\n url: URL | bytes | str,\n *,\n headers: HeaderTypes = None,\n content: bytes | typing.Iterator[bytes] | None = None,\n extensions: Extensions | None = None,\n) -> typing.Iterator[Response]:\n """\n Sends an HTTP request, returning the response within a content manager.\n\n ```\n with httpcore.stream("GET", "https://www.example.com/") as response:\n ...\n ```\n\n When using the `stream()` function, the body of the response will not be\n automatically read. If you want to access the response body you should\n either use `content = response.read()`, or `for chunk in response.iter_content()`.\n\n Arguments:\n method: The HTTP method for the request. Typically one of `"GET"`,\n `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`.\n url: The URL of the HTTP request. Either as an instance of `httpcore.URL`,\n or as str/bytes.\n headers: The HTTP request headers. Either as a dictionary of str/bytes,\n or as a list of two-tuples of str/bytes.\n content: The content of the request body. Either as bytes,\n or as a bytes iterator.\n extensions: A dictionary of optional extra information included on the request.\n Possible keys include `"timeout"`.\n\n Returns:\n An instance of `httpcore.Response`.\n """\n with ConnectionPool() as pool:\n with pool.stream(\n method=method,\n url=url,\n headers=headers,\n content=content,\n extensions=extensions,\n ) as response:\n yield response\n
|
.venv\Lib\site-packages\httpcore\_api.py
|
_api.py
|
Python
| 3,146 | 0.95 | 0.06383 | 0.024691 |
node-utils
| 778 |
2023-09-08T14:41:15.105101
|
GPL-3.0
| false |
13841642626649d19159c2da103fb35f
|
import contextlib\nimport typing\n\nExceptionMapping = typing.Mapping[typing.Type[Exception], typing.Type[Exception]]\n\n\n@contextlib.contextmanager\ndef map_exceptions(map: ExceptionMapping) -> typing.Iterator[None]:\n try:\n yield\n except Exception as exc: # noqa: PIE786\n for from_exc, to_exc in map.items():\n if isinstance(exc, from_exc):\n raise to_exc(exc) from exc\n raise # pragma: nocover\n\n\nclass ConnectionNotAvailable(Exception):\n pass\n\n\nclass ProxyError(Exception):\n pass\n\n\nclass UnsupportedProtocol(Exception):\n pass\n\n\nclass ProtocolError(Exception):\n pass\n\n\nclass RemoteProtocolError(ProtocolError):\n pass\n\n\nclass LocalProtocolError(ProtocolError):\n pass\n\n\n# Timeout errors\n\n\nclass TimeoutException(Exception):\n pass\n\n\nclass PoolTimeout(TimeoutException):\n pass\n\n\nclass ConnectTimeout(TimeoutException):\n pass\n\n\nclass ReadTimeout(TimeoutException):\n pass\n\n\nclass WriteTimeout(TimeoutException):\n pass\n\n\n# Network errors\n\n\nclass NetworkError(Exception):\n pass\n\n\nclass ConnectError(NetworkError):\n pass\n\n\nclass ReadError(NetworkError):\n pass\n\n\nclass WriteError(NetworkError):\n pass\n
|
.venv\Lib\site-packages\httpcore\_exceptions.py
|
_exceptions.py
|
Python
| 1,184 | 0.95 | 0.234568 | 0.045455 |
react-lib
| 616 |
2023-07-14T04:16:01.276210
|
GPL-3.0
| false |
5e5c9bf32b3d2bd8375ad90ce273067d
|
from __future__ import annotations\n\nimport base64\nimport ssl\nimport typing\nimport urllib.parse\n\n# Functions for typechecking...\n\n\nByteOrStr = typing.Union[bytes, str]\nHeadersAsSequence = typing.Sequence[typing.Tuple[ByteOrStr, ByteOrStr]]\nHeadersAsMapping = typing.Mapping[ByteOrStr, ByteOrStr]\nHeaderTypes = typing.Union[HeadersAsSequence, HeadersAsMapping, None]\n\nExtensions = typing.MutableMapping[str, typing.Any]\n\n\ndef enforce_bytes(value: bytes | str, *, name: str) -> bytes:\n """\n Any arguments that are ultimately represented as bytes can be specified\n either as bytes or as strings.\n\n However we enforce that any string arguments must only contain characters in\n the plain ASCII range. chr(0)...chr(127). If you need to use characters\n outside that range then be precise, and use a byte-wise argument.\n """\n if isinstance(value, str):\n try:\n return value.encode("ascii")\n except UnicodeEncodeError:\n raise TypeError(f"{name} strings may not include unicode characters.")\n elif isinstance(value, bytes):\n return value\n\n seen_type = type(value).__name__\n raise TypeError(f"{name} must be bytes or str, but got {seen_type}.")\n\n\ndef enforce_url(value: URL | bytes | str, *, name: str) -> URL:\n """\n Type check for URL parameters.\n """\n if isinstance(value, (bytes, str)):\n return URL(value)\n elif isinstance(value, URL):\n return value\n\n seen_type = type(value).__name__\n raise TypeError(f"{name} must be a URL, bytes, or str, but got {seen_type}.")\n\n\ndef enforce_headers(\n value: HeadersAsMapping | HeadersAsSequence | None = None, *, name: str\n) -> list[tuple[bytes, bytes]]:\n """\n Convienence function that ensure all items in request or response headers\n are either bytes or strings in the plain ASCII range.\n """\n if value is None:\n return []\n elif isinstance(value, typing.Mapping):\n return [\n (\n enforce_bytes(k, name="header name"),\n enforce_bytes(v, name="header value"),\n )\n for k, v in value.items()\n ]\n elif isinstance(value, typing.Sequence):\n return [\n (\n enforce_bytes(k, name="header name"),\n enforce_bytes(v, name="header value"),\n )\n for k, v in value\n ]\n\n seen_type = type(value).__name__\n raise TypeError(\n f"{name} must be a mapping or sequence of two-tuples, but got {seen_type}."\n )\n\n\ndef enforce_stream(\n value: bytes | typing.Iterable[bytes] | typing.AsyncIterable[bytes] | None,\n *,\n name: str,\n) -> typing.Iterable[bytes] | typing.AsyncIterable[bytes]:\n if value is None:\n return ByteStream(b"")\n elif isinstance(value, bytes):\n return ByteStream(value)\n return value\n\n\n# * https://tools.ietf.org/html/rfc3986#section-3.2.3\n# * https://url.spec.whatwg.org/#url-miscellaneous\n# * https://url.spec.whatwg.org/#scheme-state\nDEFAULT_PORTS = {\n b"ftp": 21,\n b"http": 80,\n b"https": 443,\n b"ws": 80,\n b"wss": 443,\n}\n\n\ndef include_request_headers(\n headers: list[tuple[bytes, bytes]],\n *,\n url: "URL",\n content: None | bytes | typing.Iterable[bytes] | typing.AsyncIterable[bytes],\n) -> list[tuple[bytes, bytes]]:\n headers_set = set(k.lower() for k, v in headers)\n\n if b"host" not in headers_set:\n default_port = DEFAULT_PORTS.get(url.scheme)\n if url.port is None or url.port == default_port:\n header_value = url.host\n else:\n header_value = b"%b:%d" % (url.host, url.port)\n headers = [(b"Host", header_value)] + headers\n\n if (\n content is not None\n and b"content-length" not in headers_set\n and b"transfer-encoding" not in headers_set\n ):\n if isinstance(content, bytes):\n content_length = str(len(content)).encode("ascii")\n headers += [(b"Content-Length", content_length)]\n else:\n headers += [(b"Transfer-Encoding", b"chunked")] # pragma: nocover\n\n return headers\n\n\n# Interfaces for byte streams...\n\n\nclass ByteStream:\n """\n A container for non-streaming content, and that supports both sync and async\n stream iteration.\n """\n\n def __init__(self, content: bytes) -> None:\n self._content = content\n\n def __iter__(self) -> typing.Iterator[bytes]:\n yield self._content\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield self._content\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{len(self._content)} bytes]>"\n\n\nclass Origin:\n def __init__(self, scheme: bytes, host: bytes, port: int) -> None:\n self.scheme = scheme\n self.host = host\n self.port = port\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Origin)\n and self.scheme == other.scheme\n and self.host == other.host\n and self.port == other.port\n )\n\n def __str__(self) -> str:\n scheme = self.scheme.decode("ascii")\n host = self.host.decode("ascii")\n port = str(self.port)\n return f"{scheme}://{host}:{port}"\n\n\nclass URL:\n """\n Represents the URL against which an HTTP request may be made.\n\n The URL may either be specified as a plain string, for convienence:\n\n ```python\n url = httpcore.URL("https://www.example.com/")\n ```\n\n Or be constructed with explicitily pre-parsed components:\n\n ```python\n url = httpcore.URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/')\n ```\n\n Using this second more explicit style allows integrations that are using\n `httpcore` to pass through URLs that have already been parsed in order to use\n libraries such as `rfc-3986` rather than relying on the stdlib. It also ensures\n that URL parsing is treated identically at both the networking level and at any\n higher layers of abstraction.\n\n The four components are important here, as they allow the URL to be precisely\n specified in a pre-parsed format. They also allow certain types of request to\n be created that could not otherwise be expressed.\n\n For example, an HTTP request to `http://www.example.com/` forwarded via a proxy\n at `http://localhost:8080`...\n\n ```python\n # Constructs an HTTP request with a complete URL as the target:\n # GET https://www.example.com/ HTTP/1.1\n url = httpcore.URL(\n scheme=b'http',\n host=b'localhost',\n port=8080,\n target=b'https://www.example.com/'\n )\n request = httpcore.Request(\n method="GET",\n url=url\n )\n ```\n\n Another example is constructing an `OPTIONS *` request...\n\n ```python\n # Constructs an 'OPTIONS *' HTTP request:\n # OPTIONS * HTTP/1.1\n url = httpcore.URL(scheme=b'https', host=b'www.example.com', target=b'*')\n request = httpcore.Request(method="OPTIONS", url=url)\n ```\n\n This kind of request is not possible to formulate with a URL string,\n because the `/` delimiter is always used to demark the target from the\n host/port portion of the URL.\n\n For convenience, string-like arguments may be specified either as strings or\n as bytes. However, once a request is being issue over-the-wire, the URL\n components are always ultimately required to be a bytewise representation.\n\n In order to avoid any ambiguity over character encodings, when strings are used\n as arguments, they must be strictly limited to the ASCII range `chr(0)`-`chr(127)`.\n If you require a bytewise representation that is outside this range you must\n handle the character encoding directly, and pass a bytes instance.\n """\n\n def __init__(\n self,\n url: bytes | str = "",\n *,\n scheme: bytes | str = b"",\n host: bytes | str = b"",\n port: int | None = None,\n target: bytes | str = b"",\n ) -> None:\n """\n Parameters:\n url: The complete URL as a string or bytes.\n scheme: The URL scheme as a string or bytes.\n Typically either `"http"` or `"https"`.\n host: The URL host as a string or bytes. Such as `"www.example.com"`.\n port: The port to connect to. Either an integer or `None`.\n target: The target of the HTTP request. Such as `"/items?search=red"`.\n """\n if url:\n parsed = urllib.parse.urlparse(enforce_bytes(url, name="url"))\n self.scheme = parsed.scheme\n self.host = parsed.hostname or b""\n self.port = parsed.port\n self.target = (parsed.path or b"/") + (\n b"?" + parsed.query if parsed.query else b""\n )\n else:\n self.scheme = enforce_bytes(scheme, name="scheme")\n self.host = enforce_bytes(host, name="host")\n self.port = port\n self.target = enforce_bytes(target, name="target")\n\n @property\n def origin(self) -> Origin:\n default_port = {\n b"http": 80,\n b"https": 443,\n b"ws": 80,\n b"wss": 443,\n b"socks5": 1080,\n b"socks5h": 1080,\n }[self.scheme]\n return Origin(\n scheme=self.scheme, host=self.host, port=self.port or default_port\n )\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, URL)\n and other.scheme == self.scheme\n and other.host == self.host\n and other.port == self.port\n and other.target == self.target\n )\n\n def __bytes__(self) -> bytes:\n if self.port is None:\n return b"%b://%b%b" % (self.scheme, self.host, self.target)\n return b"%b://%b:%d%b" % (self.scheme, self.host, self.port, self.target)\n\n def __repr__(self) -> str:\n return (\n f"{self.__class__.__name__}(scheme={self.scheme!r}, "\n f"host={self.host!r}, port={self.port!r}, target={self.target!r})"\n )\n\n\nclass Request:\n """\n An HTTP request.\n """\n\n def __init__(\n self,\n method: bytes | str,\n url: URL | bytes | str,\n *,\n headers: HeaderTypes = None,\n content: bytes\n | typing.Iterable[bytes]\n | typing.AsyncIterable[bytes]\n | None = None,\n extensions: Extensions | None = None,\n ) -> None:\n """\n Parameters:\n method: The HTTP request method, either as a string or bytes.\n For example: `GET`.\n url: The request URL, either as a `URL` instance, or as a string or bytes.\n For example: `"https://www.example.com".`\n headers: The HTTP request headers.\n content: The content of the request body.\n extensions: A dictionary of optional extra information included on\n the request. Possible keys include `"timeout"`, and `"trace"`.\n """\n self.method: bytes = enforce_bytes(method, name="method")\n self.url: URL = enforce_url(url, name="url")\n self.headers: list[tuple[bytes, bytes]] = enforce_headers(\n headers, name="headers"\n )\n self.stream: typing.Iterable[bytes] | typing.AsyncIterable[bytes] = (\n enforce_stream(content, name="content")\n )\n self.extensions = {} if extensions is None else extensions\n\n if "target" in self.extensions:\n self.url = URL(\n scheme=self.url.scheme,\n host=self.url.host,\n port=self.url.port,\n target=self.extensions["target"],\n )\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{self.method!r}]>"\n\n\nclass Response:\n """\n An HTTP response.\n """\n\n def __init__(\n self,\n status: int,\n *,\n headers: HeaderTypes = None,\n content: bytes\n | typing.Iterable[bytes]\n | typing.AsyncIterable[bytes]\n | None = None,\n extensions: Extensions | None = None,\n ) -> None:\n """\n Parameters:\n status: The HTTP status code of the response. For example `200`.\n headers: The HTTP response headers.\n content: The content of the response body.\n extensions: A dictionary of optional extra information included on\n the responseself.Possible keys include `"http_version"`,\n `"reason_phrase"`, and `"network_stream"`.\n """\n self.status: int = status\n self.headers: list[tuple[bytes, bytes]] = enforce_headers(\n headers, name="headers"\n )\n self.stream: typing.Iterable[bytes] | typing.AsyncIterable[bytes] = (\n enforce_stream(content, name="content")\n )\n self.extensions = {} if extensions is None else extensions\n\n self._stream_consumed = False\n\n @property\n def content(self) -> bytes:\n if not hasattr(self, "_content"):\n if isinstance(self.stream, typing.Iterable):\n raise RuntimeError(\n "Attempted to access 'response.content' on a streaming response. "\n "Call 'response.read()' first."\n )\n else:\n raise RuntimeError(\n "Attempted to access 'response.content' on a streaming response. "\n "Call 'await response.aread()' first."\n )\n return self._content\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{self.status}]>"\n\n # Sync interface...\n\n def read(self) -> bytes:\n if not isinstance(self.stream, typing.Iterable): # pragma: nocover\n raise RuntimeError(\n "Attempted to read an asynchronous response using 'response.read()'. "\n "You should use 'await response.aread()' instead."\n )\n if not hasattr(self, "_content"):\n self._content = b"".join([part for part in self.iter_stream()])\n return self._content\n\n def iter_stream(self) -> typing.Iterator[bytes]:\n if not isinstance(self.stream, typing.Iterable): # pragma: nocover\n raise RuntimeError(\n "Attempted to stream an asynchronous response using 'for ... in "\n "response.iter_stream()'. "\n "You should use 'async for ... in response.aiter_stream()' instead."\n )\n if self._stream_consumed:\n raise RuntimeError(\n "Attempted to call 'for ... in response.iter_stream()' more than once."\n )\n self._stream_consumed = True\n for chunk in self.stream:\n yield chunk\n\n def close(self) -> None:\n if not isinstance(self.stream, typing.Iterable): # pragma: nocover\n raise RuntimeError(\n "Attempted to close an asynchronous response using 'response.close()'. "\n "You should use 'await response.aclose()' instead."\n )\n if hasattr(self.stream, "close"):\n self.stream.close()\n\n # Async interface...\n\n async def aread(self) -> bytes:\n if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover\n raise RuntimeError(\n "Attempted to read an synchronous response using "\n "'await response.aread()'. "\n "You should use 'response.read()' instead."\n )\n if not hasattr(self, "_content"):\n self._content = b"".join([part async for part in self.aiter_stream()])\n return self._content\n\n async def aiter_stream(self) -> typing.AsyncIterator[bytes]:\n if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover\n raise RuntimeError(\n "Attempted to stream an synchronous response using 'async for ... in "\n "response.aiter_stream()'. "\n "You should use 'for ... in response.iter_stream()' instead."\n )\n if self._stream_consumed:\n raise RuntimeError(\n "Attempted to call 'async for ... in response.aiter_stream()' "\n "more than once."\n )\n self._stream_consumed = True\n async for chunk in self.stream:\n yield chunk\n\n async def aclose(self) -> None:\n if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover\n raise RuntimeError(\n "Attempted to close a synchronous response using "\n "'await response.aclose()'. "\n "You should use 'response.close()' instead."\n )\n if hasattr(self.stream, "aclose"):\n await self.stream.aclose()\n\n\nclass Proxy:\n def __init__(\n self,\n url: URL | bytes | str,\n auth: tuple[bytes | str, bytes | str] | None = None,\n headers: HeadersAsMapping | HeadersAsSequence | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ):\n self.url = enforce_url(url, name="url")\n self.headers = enforce_headers(headers, name="headers")\n self.ssl_context = ssl_context\n\n if auth is not None:\n username = enforce_bytes(auth[0], name="auth")\n password = enforce_bytes(auth[1], name="auth")\n userpass = username + b":" + password\n authorization = b"Basic " + base64.b64encode(userpass)\n self.auth: tuple[bytes, bytes] | None = (username, password)\n self.headers = [(b"Proxy-Authorization", authorization)] + self.headers\n else:\n self.auth = None\n
|
.venv\Lib\site-packages\httpcore\_models.py
|
_models.py
|
Python
| 17,623 | 0.95 | 0.162791 | 0.03653 |
react-lib
| 583 |
2025-03-25T13:16:17.973167
|
BSD-3-Clause
| false |
85cf0132820d0281f68fc3f6a8198c64
|
import ssl\n\nimport certifi\n\n\ndef default_ssl_context() -> ssl.SSLContext:\n context = ssl.create_default_context()\n context.load_verify_locations(certifi.where())\n return context\n
|
.venv\Lib\site-packages\httpcore\_ssl.py
|
_ssl.py
|
Python
| 187 | 0.85 | 0.111111 | 0 |
node-utils
| 918 |
2024-01-16T23:29:53.101837
|
Apache-2.0
| false |
48dc08986460a775e586cce59adc56b3
|
from __future__ import annotations\n\nimport inspect\nimport logging\nimport types\nimport typing\n\nfrom ._models import Request\n\n\nclass Trace:\n def __init__(\n self,\n name: str,\n logger: logging.Logger,\n request: Request | None = None,\n kwargs: dict[str, typing.Any] | None = None,\n ) -> None:\n self.name = name\n self.logger = logger\n self.trace_extension = (\n None if request is None else request.extensions.get("trace")\n )\n self.debug = self.logger.isEnabledFor(logging.DEBUG)\n self.kwargs = kwargs or {}\n self.return_value: typing.Any = None\n self.should_trace = self.debug or self.trace_extension is not None\n self.prefix = self.logger.name.split(".")[-1]\n\n def trace(self, name: str, info: dict[str, typing.Any]) -> None:\n if self.trace_extension is not None:\n prefix_and_name = f"{self.prefix}.{name}"\n ret = self.trace_extension(prefix_and_name, info)\n if inspect.iscoroutine(ret): # pragma: no cover\n raise TypeError(\n "If you are using a synchronous interface, "\n "the callback of the `trace` extension should "\n "be a normal function instead of an asynchronous function."\n )\n\n if self.debug:\n if not info or "return_value" in info and info["return_value"] is None:\n message = name\n else:\n args = " ".join([f"{key}={value!r}" for key, value in info.items()])\n message = f"{name} {args}"\n self.logger.debug(message)\n\n def __enter__(self) -> Trace:\n if self.should_trace:\n info = self.kwargs\n self.trace(f"{self.name}.started", info)\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: types.TracebackType | None = None,\n ) -> None:\n if self.should_trace:\n if exc_value is None:\n info = {"return_value": self.return_value}\n self.trace(f"{self.name}.complete", info)\n else:\n info = {"exception": exc_value}\n self.trace(f"{self.name}.failed", info)\n\n async def atrace(self, name: str, info: dict[str, typing.Any]) -> None:\n if self.trace_extension is not None:\n prefix_and_name = f"{self.prefix}.{name}"\n coro = self.trace_extension(prefix_and_name, info)\n if not inspect.iscoroutine(coro): # pragma: no cover\n raise TypeError(\n "If you're using an asynchronous interface, "\n "the callback of the `trace` extension should "\n "be an asynchronous function rather than a normal function."\n )\n await coro\n\n if self.debug:\n if not info or "return_value" in info and info["return_value"] is None:\n message = name\n else:\n args = " ".join([f"{key}={value!r}" for key, value in info.items()])\n message = f"{name} {args}"\n self.logger.debug(message)\n\n async def __aenter__(self) -> Trace:\n if self.should_trace:\n info = self.kwargs\n await self.atrace(f"{self.name}.started", info)\n return self\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: types.TracebackType | None = None,\n ) -> None:\n if self.should_trace:\n if exc_value is None:\n info = {"return_value": self.return_value}\n await self.atrace(f"{self.name}.complete", info)\n else:\n info = {"exception": exc_value}\n await self.atrace(f"{self.name}.failed", info)\n
|
.venv\Lib\site-packages\httpcore\_trace.py
|
_trace.py
|
Python
| 3,952 | 0.95 | 0.271028 | 0 |
node-utils
| 649 |
2024-12-16T20:11:03.795997
|
GPL-3.0
| false |
efa2daa51e9ca9fcd2e3b09f4cb8c21f
|
from ._api import request, stream\nfrom ._async import (\n AsyncConnectionInterface,\n AsyncConnectionPool,\n AsyncHTTP2Connection,\n AsyncHTTP11Connection,\n AsyncHTTPConnection,\n AsyncHTTPProxy,\n AsyncSOCKSProxy,\n)\nfrom ._backends.base import (\n SOCKET_OPTION,\n AsyncNetworkBackend,\n AsyncNetworkStream,\n NetworkBackend,\n NetworkStream,\n)\nfrom ._backends.mock import AsyncMockBackend, AsyncMockStream, MockBackend, MockStream\nfrom ._backends.sync import SyncBackend\nfrom ._exceptions import (\n ConnectError,\n ConnectionNotAvailable,\n ConnectTimeout,\n LocalProtocolError,\n NetworkError,\n PoolTimeout,\n ProtocolError,\n ProxyError,\n ReadError,\n ReadTimeout,\n RemoteProtocolError,\n TimeoutException,\n UnsupportedProtocol,\n WriteError,\n WriteTimeout,\n)\nfrom ._models import URL, Origin, Proxy, Request, Response\nfrom ._ssl import default_ssl_context\nfrom ._sync import (\n ConnectionInterface,\n ConnectionPool,\n HTTP2Connection,\n HTTP11Connection,\n HTTPConnection,\n HTTPProxy,\n SOCKSProxy,\n)\n\n# The 'httpcore.AnyIOBackend' class is conditional on 'anyio' being installed.\ntry:\n from ._backends.anyio import AnyIOBackend\nexcept ImportError: # pragma: nocover\n\n class AnyIOBackend: # type: ignore\n def __init__(self, *args, **kwargs): # type: ignore\n msg = (\n "Attempted to use 'httpcore.AnyIOBackend' but 'anyio' is not installed."\n )\n raise RuntimeError(msg)\n\n\n# The 'httpcore.TrioBackend' class is conditional on 'trio' being installed.\ntry:\n from ._backends.trio import TrioBackend\nexcept ImportError: # pragma: nocover\n\n class TrioBackend: # type: ignore\n def __init__(self, *args, **kwargs): # type: ignore\n msg = "Attempted to use 'httpcore.TrioBackend' but 'trio' is not installed."\n raise RuntimeError(msg)\n\n\n__all__ = [\n # top-level requests\n "request",\n "stream",\n # models\n "Origin",\n "URL",\n "Request",\n "Response",\n "Proxy",\n # async\n "AsyncHTTPConnection",\n "AsyncConnectionPool",\n "AsyncHTTPProxy",\n "AsyncHTTP11Connection",\n "AsyncHTTP2Connection",\n "AsyncConnectionInterface",\n "AsyncSOCKSProxy",\n # sync\n "HTTPConnection",\n "ConnectionPool",\n "HTTPProxy",\n "HTTP11Connection",\n "HTTP2Connection",\n "ConnectionInterface",\n "SOCKSProxy",\n # network backends, implementations\n "SyncBackend",\n "AnyIOBackend",\n "TrioBackend",\n # network backends, mock implementations\n "AsyncMockBackend",\n "AsyncMockStream",\n "MockBackend",\n "MockStream",\n # network backends, interface\n "AsyncNetworkStream",\n "AsyncNetworkBackend",\n "NetworkStream",\n "NetworkBackend",\n # util\n "default_ssl_context",\n "SOCKET_OPTION",\n # exceptions\n "ConnectionNotAvailable",\n "ProxyError",\n "ProtocolError",\n "LocalProtocolError",\n "RemoteProtocolError",\n "UnsupportedProtocol",\n "TimeoutException",\n "PoolTimeout",\n "ConnectTimeout",\n "ReadTimeout",\n "WriteTimeout",\n "NetworkError",\n "ConnectError",\n "ReadError",\n "WriteError",\n]\n\n__version__ = "1.0.9"\n\n\n__locals = locals()\nfor __name in __all__:\n # Exclude SOCKET_OPTION, it causes AttributeError on Python 3.14\n if not __name.startswith(("__", "SOCKET_OPTION")):\n setattr(__locals[__name], "__module__", "httpcore") # noqa\n
|
.venv\Lib\site-packages\httpcore\__init__.py
|
__init__.py
|
Python
| 3,445 | 0.95 | 0.070922 | 0.091603 |
python-kit
| 965 |
2024-10-24T10:47:44.291204
|
MIT
| false |
617176dff9692c89a14e5e5aaeb52f3f
|
from __future__ import annotations\n\nimport itertools\nimport logging\nimport ssl\nimport types\nimport typing\n\nfrom .._backends.auto import AutoBackend\nfrom .._backends.base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream\nfrom .._exceptions import ConnectError, ConnectTimeout\nfrom .._models import Origin, Request, Response\nfrom .._ssl import default_ssl_context\nfrom .._synchronization import AsyncLock\nfrom .._trace import Trace\nfrom .http11 import AsyncHTTP11Connection\nfrom .interfaces import AsyncConnectionInterface\n\nRETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc.\n\n\nlogger = logging.getLogger("httpcore.connection")\n\n\ndef exponential_backoff(factor: float) -> typing.Iterator[float]:\n """\n Generate a geometric sequence that has a ratio of 2 and starts with 0.\n\n For example:\n - `factor = 2`: `0, 2, 4, 8, 16, 32, 64, ...`\n - `factor = 3`: `0, 3, 6, 12, 24, 48, 96, ...`\n """\n yield 0\n for n in itertools.count():\n yield factor * 2**n\n\n\nclass AsyncHTTPConnection(AsyncConnectionInterface):\n def __init__(\n self,\n origin: Origin,\n ssl_context: ssl.SSLContext | None = None,\n keepalive_expiry: float | None = None,\n http1: bool = True,\n http2: bool = False,\n retries: int = 0,\n local_address: str | None = None,\n uds: str | None = None,\n network_backend: AsyncNetworkBackend | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> None:\n self._origin = origin\n self._ssl_context = ssl_context\n self._keepalive_expiry = keepalive_expiry\n self._http1 = http1\n self._http2 = http2\n self._retries = retries\n self._local_address = local_address\n self._uds = uds\n\n self._network_backend: AsyncNetworkBackend = (\n AutoBackend() if network_backend is None else network_backend\n )\n self._connection: AsyncConnectionInterface | None = None\n self._connect_failed: bool = False\n self._request_lock = AsyncLock()\n self._socket_options = socket_options\n\n async def handle_async_request(self, request: Request) -> Response:\n if not self.can_handle_request(request.url.origin):\n raise RuntimeError(\n f"Attempted to send request to {request.url.origin} on connection to {self._origin}"\n )\n\n try:\n async with self._request_lock:\n if self._connection is None:\n stream = await self._connect(request)\n\n ssl_object = stream.get_extra_info("ssl_object")\n http2_negotiated = (\n ssl_object is not None\n and ssl_object.selected_alpn_protocol() == "h2"\n )\n if http2_negotiated or (self._http2 and not self._http1):\n from .http2 import AsyncHTTP2Connection\n\n self._connection = AsyncHTTP2Connection(\n origin=self._origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n else:\n self._connection = AsyncHTTP11Connection(\n origin=self._origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n except BaseException as exc:\n self._connect_failed = True\n raise exc\n\n return await self._connection.handle_async_request(request)\n\n async def _connect(self, request: Request) -> AsyncNetworkStream:\n timeouts = request.extensions.get("timeout", {})\n sni_hostname = request.extensions.get("sni_hostname", None)\n timeout = timeouts.get("connect", None)\n\n retries_left = self._retries\n delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR)\n\n while True:\n try:\n if self._uds is None:\n kwargs = {\n "host": self._origin.host.decode("ascii"),\n "port": self._origin.port,\n "local_address": self._local_address,\n "timeout": timeout,\n "socket_options": self._socket_options,\n }\n async with Trace("connect_tcp", logger, request, kwargs) as trace:\n stream = await self._network_backend.connect_tcp(**kwargs)\n trace.return_value = stream\n else:\n kwargs = {\n "path": self._uds,\n "timeout": timeout,\n "socket_options": self._socket_options,\n }\n async with Trace(\n "connect_unix_socket", logger, request, kwargs\n ) as trace:\n stream = await self._network_backend.connect_unix_socket(\n **kwargs\n )\n trace.return_value = stream\n\n if self._origin.scheme in (b"https", b"wss"):\n ssl_context = (\n default_ssl_context()\n if self._ssl_context is None\n else self._ssl_context\n )\n alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]\n ssl_context.set_alpn_protocols(alpn_protocols)\n\n kwargs = {\n "ssl_context": ssl_context,\n "server_hostname": sni_hostname\n or self._origin.host.decode("ascii"),\n "timeout": timeout,\n }\n async with Trace("start_tls", logger, request, kwargs) as trace:\n stream = await stream.start_tls(**kwargs)\n trace.return_value = stream\n return stream\n except (ConnectError, ConnectTimeout):\n if retries_left <= 0:\n raise\n retries_left -= 1\n delay = next(delays)\n async with Trace("retry", logger, request, kwargs) as trace:\n await self._network_backend.sleep(delay)\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._origin\n\n async def aclose(self) -> None:\n if self._connection is not None:\n async with Trace("close", logger, None, {}):\n await self._connection.aclose()\n\n def is_available(self) -> bool:\n if self._connection is None:\n # If HTTP/2 support is enabled, and the resulting connection could\n # end up as HTTP/2 then we should indicate the connection as being\n # available to service multiple requests.\n return (\n self._http2\n and (self._origin.scheme == b"https" or not self._http1)\n and not self._connect_failed\n )\n return self._connection.is_available()\n\n def has_expired(self) -> bool:\n if self._connection is None:\n return self._connect_failed\n return self._connection.has_expired()\n\n def is_idle(self) -> bool:\n if self._connection is None:\n return self._connect_failed\n return self._connection.is_idle()\n\n def is_closed(self) -> bool:\n if self._connection is None:\n return self._connect_failed\n return self._connection.is_closed()\n\n def info(self) -> str:\n if self._connection is None:\n return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"\n return self._connection.info()\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{self.info()}]>"\n\n # These context managers are not used in the standard flow, but are\n # useful for testing or working with connection instances directly.\n\n async def __aenter__(self) -> AsyncHTTPConnection:\n return self\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: types.TracebackType | None = None,\n ) -> None:\n await self.aclose()\n
|
.venv\Lib\site-packages\httpcore\_async\connection.py
|
connection.py
|
Python
| 8,449 | 0.95 | 0.162162 | 0.031579 |
vue-tools
| 533 |
2023-09-21T22:54:42.376058
|
BSD-3-Clause
| false |
9854e89cedd74dbf30724bd130f30702
|
from __future__ import annotations\n\nimport enum\nimport logging\nimport ssl\nimport time\nimport types\nimport typing\n\nimport h11\n\nfrom .._backends.base import AsyncNetworkStream\nfrom .._exceptions import (\n ConnectionNotAvailable,\n LocalProtocolError,\n RemoteProtocolError,\n WriteError,\n map_exceptions,\n)\nfrom .._models import Origin, Request, Response\nfrom .._synchronization import AsyncLock, AsyncShieldCancellation\nfrom .._trace import Trace\nfrom .interfaces import AsyncConnectionInterface\n\nlogger = logging.getLogger("httpcore.http11")\n\n\n# A subset of `h11.Event` types supported by `_send_event`\nH11SendEvent = typing.Union[\n h11.Request,\n h11.Data,\n h11.EndOfMessage,\n]\n\n\nclass HTTPConnectionState(enum.IntEnum):\n NEW = 0\n ACTIVE = 1\n IDLE = 2\n CLOSED = 3\n\n\nclass AsyncHTTP11Connection(AsyncConnectionInterface):\n READ_NUM_BYTES = 64 * 1024\n MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024\n\n def __init__(\n self,\n origin: Origin,\n stream: AsyncNetworkStream,\n keepalive_expiry: float | None = None,\n ) -> None:\n self._origin = origin\n self._network_stream = stream\n self._keepalive_expiry: float | None = keepalive_expiry\n self._expire_at: float | None = None\n self._state = HTTPConnectionState.NEW\n self._state_lock = AsyncLock()\n self._request_count = 0\n self._h11_state = h11.Connection(\n our_role=h11.CLIENT,\n max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE,\n )\n\n async def handle_async_request(self, request: Request) -> Response:\n if not self.can_handle_request(request.url.origin):\n raise RuntimeError(\n f"Attempted to send request to {request.url.origin} on connection "\n f"to {self._origin}"\n )\n\n async with self._state_lock:\n if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE):\n self._request_count += 1\n self._state = HTTPConnectionState.ACTIVE\n self._expire_at = None\n else:\n raise ConnectionNotAvailable()\n\n try:\n kwargs = {"request": request}\n try:\n async with Trace(\n "send_request_headers", logger, request, kwargs\n ) as trace:\n await self._send_request_headers(**kwargs)\n async with Trace("send_request_body", logger, request, kwargs) as trace:\n await self._send_request_body(**kwargs)\n except WriteError:\n # If we get a write error while we're writing the request,\n # then we supress this error and move on to attempting to\n # read the response. Servers can sometimes close the request\n # pre-emptively and then respond with a well formed HTTP\n # error response.\n pass\n\n async with Trace(\n "receive_response_headers", logger, request, kwargs\n ) as trace:\n (\n http_version,\n status,\n reason_phrase,\n headers,\n trailing_data,\n ) = await self._receive_response_headers(**kwargs)\n trace.return_value = (\n http_version,\n status,\n reason_phrase,\n headers,\n )\n\n network_stream = self._network_stream\n\n # CONNECT or Upgrade request\n if (status == 101) or (\n (request.method == b"CONNECT") and (200 <= status < 300)\n ):\n network_stream = AsyncHTTP11UpgradeStream(network_stream, trailing_data)\n\n return Response(\n status=status,\n headers=headers,\n content=HTTP11ConnectionByteStream(self, request),\n extensions={\n "http_version": http_version,\n "reason_phrase": reason_phrase,\n "network_stream": network_stream,\n },\n )\n except BaseException as exc:\n with AsyncShieldCancellation():\n async with Trace("response_closed", logger, request) as trace:\n await self._response_closed()\n raise exc\n\n # Sending the request...\n\n async def _send_request_headers(self, request: Request) -> None:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("write", None)\n\n with map_exceptions({h11.LocalProtocolError: LocalProtocolError}):\n event = h11.Request(\n method=request.method,\n target=request.url.target,\n headers=request.headers,\n )\n await self._send_event(event, timeout=timeout)\n\n async def _send_request_body(self, request: Request) -> None:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("write", None)\n\n assert isinstance(request.stream, typing.AsyncIterable)\n async for chunk in request.stream:\n event = h11.Data(data=chunk)\n await self._send_event(event, timeout=timeout)\n\n await self._send_event(h11.EndOfMessage(), timeout=timeout)\n\n async def _send_event(self, event: h11.Event, timeout: float | None = None) -> None:\n bytes_to_send = self._h11_state.send(event)\n if bytes_to_send is not None:\n await self._network_stream.write(bytes_to_send, timeout=timeout)\n\n # Receiving the response...\n\n async def _receive_response_headers(\n self, request: Request\n ) -> tuple[bytes, int, bytes, list[tuple[bytes, bytes]], bytes]:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("read", None)\n\n while True:\n event = await self._receive_event(timeout=timeout)\n if isinstance(event, h11.Response):\n break\n if (\n isinstance(event, h11.InformationalResponse)\n and event.status_code == 101\n ):\n break\n\n http_version = b"HTTP/" + event.http_version\n\n # h11 version 0.11+ supports a `raw_items` interface to get the\n # raw header casing, rather than the enforced lowercase headers.\n headers = event.headers.raw_items()\n\n trailing_data, _ = self._h11_state.trailing_data\n\n return http_version, event.status_code, event.reason, headers, trailing_data\n\n async def _receive_response_body(\n self, request: Request\n ) -> typing.AsyncIterator[bytes]:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("read", None)\n\n while True:\n event = await self._receive_event(timeout=timeout)\n if isinstance(event, h11.Data):\n yield bytes(event.data)\n elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)):\n break\n\n async def _receive_event(\n self, timeout: float | None = None\n ) -> h11.Event | type[h11.PAUSED]:\n while True:\n with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}):\n event = self._h11_state.next_event()\n\n if event is h11.NEED_DATA:\n data = await self._network_stream.read(\n self.READ_NUM_BYTES, timeout=timeout\n )\n\n # If we feed this case through h11 we'll raise an exception like:\n #\n # httpcore.RemoteProtocolError: can't handle event type\n # ConnectionClosed when role=SERVER and state=SEND_RESPONSE\n #\n # Which is accurate, but not very informative from an end-user\n # perspective. Instead we handle this case distinctly and treat\n # it as a ConnectError.\n if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE:\n msg = "Server disconnected without sending a response."\n raise RemoteProtocolError(msg)\n\n self._h11_state.receive_data(data)\n else:\n # mypy fails to narrow the type in the above if statement above\n return event # type: ignore[return-value]\n\n async def _response_closed(self) -> None:\n async with self._state_lock:\n if (\n self._h11_state.our_state is h11.DONE\n and self._h11_state.their_state is h11.DONE\n ):\n self._state = HTTPConnectionState.IDLE\n self._h11_state.start_next_cycle()\n if self._keepalive_expiry is not None:\n now = time.monotonic()\n self._expire_at = now + self._keepalive_expiry\n else:\n await self.aclose()\n\n # Once the connection is no longer required...\n\n async def aclose(self) -> None:\n # Note that this method unilaterally closes the connection, and does\n # not have any kind of locking in place around it.\n self._state = HTTPConnectionState.CLOSED\n await self._network_stream.aclose()\n\n # The AsyncConnectionInterface methods provide information about the state of\n # the connection, allowing for a connection pooling implementation to\n # determine when to reuse and when to close the connection...\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._origin\n\n def is_available(self) -> bool:\n # Note that HTTP/1.1 connections in the "NEW" state are not treated as\n # being "available". The control flow which created the connection will\n # be able to send an outgoing request, but the connection will not be\n # acquired from the connection pool for any other request.\n return self._state == HTTPConnectionState.IDLE\n\n def has_expired(self) -> bool:\n now = time.monotonic()\n keepalive_expired = self._expire_at is not None and now > self._expire_at\n\n # If the HTTP connection is idle but the socket is readable, then the\n # only valid state is that the socket is about to return b"", indicating\n # a server-initiated disconnect.\n server_disconnected = (\n self._state == HTTPConnectionState.IDLE\n and self._network_stream.get_extra_info("is_readable")\n )\n\n return keepalive_expired or server_disconnected\n\n def is_idle(self) -> bool:\n return self._state == HTTPConnectionState.IDLE\n\n def is_closed(self) -> bool:\n return self._state == HTTPConnectionState.CLOSED\n\n def info(self) -> str:\n origin = str(self._origin)\n return (\n f"{origin!r}, HTTP/1.1, {self._state.name}, "\n f"Request Count: {self._request_count}"\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n origin = str(self._origin)\n return (\n f"<{class_name} [{origin!r}, {self._state.name}, "\n f"Request Count: {self._request_count}]>"\n )\n\n # These context managers are not used in the standard flow, but are\n # useful for testing or working with connection instances directly.\n\n async def __aenter__(self) -> AsyncHTTP11Connection:\n return self\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: types.TracebackType | None = None,\n ) -> None:\n await self.aclose()\n\n\nclass HTTP11ConnectionByteStream:\n def __init__(self, connection: AsyncHTTP11Connection, request: Request) -> None:\n self._connection = connection\n self._request = request\n self._closed = False\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n kwargs = {"request": self._request}\n try:\n async with Trace("receive_response_body", logger, self._request, kwargs):\n async for chunk in self._connection._receive_response_body(**kwargs):\n yield chunk\n except BaseException as exc:\n # If we get an exception while streaming the response,\n # we want to close the response (and possibly the connection)\n # before raising that exception.\n with AsyncShieldCancellation():\n await self.aclose()\n raise exc\n\n async def aclose(self) -> None:\n if not self._closed:\n self._closed = True\n async with Trace("response_closed", logger, self._request):\n await self._connection._response_closed()\n\n\nclass AsyncHTTP11UpgradeStream(AsyncNetworkStream):\n def __init__(self, stream: AsyncNetworkStream, leading_data: bytes) -> None:\n self._stream = stream\n self._leading_data = leading_data\n\n async def read(self, max_bytes: int, timeout: float | None = None) -> bytes:\n if self._leading_data:\n buffer = self._leading_data[:max_bytes]\n self._leading_data = self._leading_data[max_bytes:]\n return buffer\n else:\n return await self._stream.read(max_bytes, timeout)\n\n async def write(self, buffer: bytes, timeout: float | None = None) -> None:\n await self._stream.write(buffer, timeout)\n\n async def aclose(self) -> None:\n await self._stream.aclose()\n\n async def start_tls(\n self,\n ssl_context: ssl.SSLContext,\n server_hostname: str | None = None,\n timeout: float | None = None,\n ) -> AsyncNetworkStream:\n return await self._stream.start_tls(ssl_context, server_hostname, timeout)\n\n def get_extra_info(self, info: str) -> typing.Any:\n return self._stream.get_extra_info(info)\n
|
.venv\Lib\site-packages\httpcore\_async\http11.py
|
http11.py
|
Python
| 13,880 | 0.95 | 0.155673 | 0.121019 |
node-utils
| 326 |
2024-02-05T20:47:29.680251
|
MIT
| false |
2b517859f19362c2508759d12cf284ff
|
from __future__ import annotations\n\nimport enum\nimport logging\nimport time\nimport types\nimport typing\n\nimport h2.config\nimport h2.connection\nimport h2.events\nimport h2.exceptions\nimport h2.settings\n\nfrom .._backends.base import AsyncNetworkStream\nfrom .._exceptions import (\n ConnectionNotAvailable,\n LocalProtocolError,\n RemoteProtocolError,\n)\nfrom .._models import Origin, Request, Response\nfrom .._synchronization import AsyncLock, AsyncSemaphore, AsyncShieldCancellation\nfrom .._trace import Trace\nfrom .interfaces import AsyncConnectionInterface\n\nlogger = logging.getLogger("httpcore.http2")\n\n\ndef has_body_headers(request: Request) -> bool:\n return any(\n k.lower() == b"content-length" or k.lower() == b"transfer-encoding"\n for k, v in request.headers\n )\n\n\nclass HTTPConnectionState(enum.IntEnum):\n ACTIVE = 1\n IDLE = 2\n CLOSED = 3\n\n\nclass AsyncHTTP2Connection(AsyncConnectionInterface):\n READ_NUM_BYTES = 64 * 1024\n CONFIG = h2.config.H2Configuration(validate_inbound_headers=False)\n\n def __init__(\n self,\n origin: Origin,\n stream: AsyncNetworkStream,\n keepalive_expiry: float | None = None,\n ):\n self._origin = origin\n self._network_stream = stream\n self._keepalive_expiry: float | None = keepalive_expiry\n self._h2_state = h2.connection.H2Connection(config=self.CONFIG)\n self._state = HTTPConnectionState.IDLE\n self._expire_at: float | None = None\n self._request_count = 0\n self._init_lock = AsyncLock()\n self._state_lock = AsyncLock()\n self._read_lock = AsyncLock()\n self._write_lock = AsyncLock()\n self._sent_connection_init = False\n self._used_all_stream_ids = False\n self._connection_error = False\n\n # Mapping from stream ID to response stream events.\n self._events: dict[\n int,\n list[\n h2.events.ResponseReceived\n | h2.events.DataReceived\n | h2.events.StreamEnded\n | h2.events.StreamReset,\n ],\n ] = {}\n\n # Connection terminated events are stored as state since\n # we need to handle them for all streams.\n self._connection_terminated: h2.events.ConnectionTerminated | None = None\n\n self._read_exception: Exception | None = None\n self._write_exception: Exception | None = None\n\n async def handle_async_request(self, request: Request) -> Response:\n if not self.can_handle_request(request.url.origin):\n # This cannot occur in normal operation, since the connection pool\n # will only send requests on connections that handle them.\n # It's in place simply for resilience as a guard against incorrect\n # usage, for anyone working directly with httpcore connections.\n raise RuntimeError(\n f"Attempted to send request to {request.url.origin} on connection "\n f"to {self._origin}"\n )\n\n async with self._state_lock:\n if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE):\n self._request_count += 1\n self._expire_at = None\n self._state = HTTPConnectionState.ACTIVE\n else:\n raise ConnectionNotAvailable()\n\n async with self._init_lock:\n if not self._sent_connection_init:\n try:\n sci_kwargs = {"request": request}\n async with Trace(\n "send_connection_init", logger, request, sci_kwargs\n ):\n await self._send_connection_init(**sci_kwargs)\n except BaseException as exc:\n with AsyncShieldCancellation():\n await self.aclose()\n raise exc\n\n self._sent_connection_init = True\n\n # Initially start with just 1 until the remote server provides\n # its max_concurrent_streams value\n self._max_streams = 1\n\n local_settings_max_streams = (\n self._h2_state.local_settings.max_concurrent_streams\n )\n self._max_streams_semaphore = AsyncSemaphore(local_settings_max_streams)\n\n for _ in range(local_settings_max_streams - self._max_streams):\n await self._max_streams_semaphore.acquire()\n\n await self._max_streams_semaphore.acquire()\n\n try:\n stream_id = self._h2_state.get_next_available_stream_id()\n self._events[stream_id] = []\n except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover\n self._used_all_stream_ids = True\n self._request_count -= 1\n raise ConnectionNotAvailable()\n\n try:\n kwargs = {"request": request, "stream_id": stream_id}\n async with Trace("send_request_headers", logger, request, kwargs):\n await self._send_request_headers(request=request, stream_id=stream_id)\n async with Trace("send_request_body", logger, request, kwargs):\n await self._send_request_body(request=request, stream_id=stream_id)\n async with Trace(\n "receive_response_headers", logger, request, kwargs\n ) as trace:\n status, headers = await self._receive_response(\n request=request, stream_id=stream_id\n )\n trace.return_value = (status, headers)\n\n return Response(\n status=status,\n headers=headers,\n content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id),\n extensions={\n "http_version": b"HTTP/2",\n "network_stream": self._network_stream,\n "stream_id": stream_id,\n },\n )\n except BaseException as exc: # noqa: PIE786\n with AsyncShieldCancellation():\n kwargs = {"stream_id": stream_id}\n async with Trace("response_closed", logger, request, kwargs):\n await self._response_closed(stream_id=stream_id)\n\n if isinstance(exc, h2.exceptions.ProtocolError):\n # One case where h2 can raise a protocol error is when a\n # closed frame has been seen by the state machine.\n #\n # This happens when one stream is reading, and encounters\n # a GOAWAY event. Other flows of control may then raise\n # a protocol error at any point they interact with the 'h2_state'.\n #\n # In this case we'll have stored the event, and should raise\n # it as a RemoteProtocolError.\n if self._connection_terminated: # pragma: nocover\n raise RemoteProtocolError(self._connection_terminated)\n # If h2 raises a protocol error in some other state then we\n # must somehow have made a protocol violation.\n raise LocalProtocolError(exc) # pragma: nocover\n\n raise exc\n\n async def _send_connection_init(self, request: Request) -> None:\n """\n The HTTP/2 connection requires some initial setup before we can start\n using individual request/response streams on it.\n """\n # Need to set these manually here instead of manipulating via\n # __setitem__() otherwise the H2Connection will emit SettingsUpdate\n # frames in addition to sending the undesired defaults.\n self._h2_state.local_settings = h2.settings.Settings(\n client=True,\n initial_values={\n # Disable PUSH_PROMISE frames from the server since we don't do anything\n # with them for now. Maybe when we support caching?\n h2.settings.SettingCodes.ENABLE_PUSH: 0,\n # These two are taken from h2 for safe defaults\n h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,\n h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536,\n },\n )\n\n # Some websites (*cough* Yahoo *cough*) balk at this setting being\n # present in the initial handshake since it's not defined in the original\n # RFC despite the RFC mandating ignoring settings you don't know about.\n del self._h2_state.local_settings[\n h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL\n ]\n\n self._h2_state.initiate_connection()\n self._h2_state.increment_flow_control_window(2**24)\n await self._write_outgoing_data(request)\n\n # Sending the request...\n\n async def _send_request_headers(self, request: Request, stream_id: int) -> None:\n """\n Send the request headers to a given stream ID.\n """\n end_stream = not has_body_headers(request)\n\n # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'.\n # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require\n # HTTP/1.1 style headers, and map them appropriately if we end up on\n # an HTTP/2 connection.\n authority = [v for k, v in request.headers if k.lower() == b"host"][0]\n\n headers = [\n (b":method", request.method),\n (b":authority", authority),\n (b":scheme", request.url.scheme),\n (b":path", request.url.target),\n ] + [\n (k.lower(), v)\n for k, v in request.headers\n if k.lower()\n not in (\n b"host",\n b"transfer-encoding",\n )\n ]\n\n self._h2_state.send_headers(stream_id, headers, end_stream=end_stream)\n self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id)\n await self._write_outgoing_data(request)\n\n async def _send_request_body(self, request: Request, stream_id: int) -> None:\n """\n Iterate over the request body sending it to a given stream ID.\n """\n if not has_body_headers(request):\n return\n\n assert isinstance(request.stream, typing.AsyncIterable)\n async for data in request.stream:\n await self._send_stream_data(request, stream_id, data)\n await self._send_end_stream(request, stream_id)\n\n async def _send_stream_data(\n self, request: Request, stream_id: int, data: bytes\n ) -> None:\n """\n Send a single chunk of data in one or more data frames.\n """\n while data:\n max_flow = await self._wait_for_outgoing_flow(request, stream_id)\n chunk_size = min(len(data), max_flow)\n chunk, data = data[:chunk_size], data[chunk_size:]\n self._h2_state.send_data(stream_id, chunk)\n await self._write_outgoing_data(request)\n\n async def _send_end_stream(self, request: Request, stream_id: int) -> None:\n """\n Send an empty data frame on on a given stream ID with the END_STREAM flag set.\n """\n self._h2_state.end_stream(stream_id)\n await self._write_outgoing_data(request)\n\n # Receiving the response...\n\n async def _receive_response(\n self, request: Request, stream_id: int\n ) -> tuple[int, list[tuple[bytes, bytes]]]:\n """\n Return the response status code and headers for a given stream ID.\n """\n while True:\n event = await self._receive_stream_event(request, stream_id)\n if isinstance(event, h2.events.ResponseReceived):\n break\n\n status_code = 200\n headers = []\n assert event.headers is not None\n for k, v in event.headers:\n if k == b":status":\n status_code = int(v.decode("ascii", errors="ignore"))\n elif not k.startswith(b":"):\n headers.append((k, v))\n\n return (status_code, headers)\n\n async def _receive_response_body(\n self, request: Request, stream_id: int\n ) -> typing.AsyncIterator[bytes]:\n """\n Iterator that returns the bytes of the response body for a given stream ID.\n """\n while True:\n event = await self._receive_stream_event(request, stream_id)\n if isinstance(event, h2.events.DataReceived):\n assert event.flow_controlled_length is not None\n assert event.data is not None\n amount = event.flow_controlled_length\n self._h2_state.acknowledge_received_data(amount, stream_id)\n await self._write_outgoing_data(request)\n yield event.data\n elif isinstance(event, h2.events.StreamEnded):\n break\n\n async def _receive_stream_event(\n self, request: Request, stream_id: int\n ) -> h2.events.ResponseReceived | h2.events.DataReceived | h2.events.StreamEnded:\n """\n Return the next available event for a given stream ID.\n\n Will read more data from the network if required.\n """\n while not self._events.get(stream_id):\n await self._receive_events(request, stream_id)\n event = self._events[stream_id].pop(0)\n if isinstance(event, h2.events.StreamReset):\n raise RemoteProtocolError(event)\n return event\n\n async def _receive_events(\n self, request: Request, stream_id: int | None = None\n ) -> None:\n """\n Read some data from the network until we see one or more events\n for a given stream ID.\n """\n async with self._read_lock:\n if self._connection_terminated is not None:\n last_stream_id = self._connection_terminated.last_stream_id\n if stream_id and last_stream_id and stream_id > last_stream_id:\n self._request_count -= 1\n raise ConnectionNotAvailable()\n raise RemoteProtocolError(self._connection_terminated)\n\n # This conditional is a bit icky. We don't want to block reading if we've\n # actually got an event to return for a given stream. We need to do that\n # check *within* the atomic read lock. Though it also need to be optional,\n # because when we call it from `_wait_for_outgoing_flow` we *do* want to\n # block until we've available flow control, event when we have events\n # pending for the stream ID we're attempting to send on.\n if stream_id is None or not self._events.get(stream_id):\n events = await self._read_incoming_data(request)\n for event in events:\n if isinstance(event, h2.events.RemoteSettingsChanged):\n async with Trace(\n "receive_remote_settings", logger, request\n ) as trace:\n await self._receive_remote_settings_change(event)\n trace.return_value = event\n\n elif isinstance(\n event,\n (\n h2.events.ResponseReceived,\n h2.events.DataReceived,\n h2.events.StreamEnded,\n h2.events.StreamReset,\n ),\n ):\n if event.stream_id in self._events:\n self._events[event.stream_id].append(event)\n\n elif isinstance(event, h2.events.ConnectionTerminated):\n self._connection_terminated = event\n\n await self._write_outgoing_data(request)\n\n async def _receive_remote_settings_change(\n self, event: h2.events.RemoteSettingsChanged\n ) -> None:\n max_concurrent_streams = event.changed_settings.get(\n h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS\n )\n if max_concurrent_streams:\n new_max_streams = min(\n max_concurrent_streams.new_value,\n self._h2_state.local_settings.max_concurrent_streams,\n )\n if new_max_streams and new_max_streams != self._max_streams:\n while new_max_streams > self._max_streams:\n await self._max_streams_semaphore.release()\n self._max_streams += 1\n while new_max_streams < self._max_streams:\n await self._max_streams_semaphore.acquire()\n self._max_streams -= 1\n\n async def _response_closed(self, stream_id: int) -> None:\n await self._max_streams_semaphore.release()\n del self._events[stream_id]\n async with self._state_lock:\n if self._connection_terminated and not self._events:\n await self.aclose()\n\n elif self._state == HTTPConnectionState.ACTIVE and not self._events:\n self._state = HTTPConnectionState.IDLE\n if self._keepalive_expiry is not None:\n now = time.monotonic()\n self._expire_at = now + self._keepalive_expiry\n if self._used_all_stream_ids: # pragma: nocover\n await self.aclose()\n\n async def aclose(self) -> None:\n # Note that this method unilaterally closes the connection, and does\n # not have any kind of locking in place around it.\n self._h2_state.close_connection()\n self._state = HTTPConnectionState.CLOSED\n await self._network_stream.aclose()\n\n # Wrappers around network read/write operations...\n\n async def _read_incoming_data(self, request: Request) -> list[h2.events.Event]:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("read", None)\n\n if self._read_exception is not None:\n raise self._read_exception # pragma: nocover\n\n try:\n data = await self._network_stream.read(self.READ_NUM_BYTES, timeout)\n if data == b"":\n raise RemoteProtocolError("Server disconnected")\n except Exception as exc:\n # If we get a network error we should:\n #\n # 1. Save the exception and just raise it immediately on any future reads.\n # (For example, this means that a single read timeout or disconnect will\n # immediately close all pending streams. Without requiring multiple\n # sequential timeouts.)\n # 2. Mark the connection as errored, so that we don't accept any other\n # incoming requests.\n self._read_exception = exc\n self._connection_error = True\n raise exc\n\n events: list[h2.events.Event] = self._h2_state.receive_data(data)\n\n return events\n\n async def _write_outgoing_data(self, request: Request) -> None:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("write", None)\n\n async with self._write_lock:\n data_to_send = self._h2_state.data_to_send()\n\n if self._write_exception is not None:\n raise self._write_exception # pragma: nocover\n\n try:\n await self._network_stream.write(data_to_send, timeout)\n except Exception as exc: # pragma: nocover\n # If we get a network error we should:\n #\n # 1. Save the exception and just raise it immediately on any future write.\n # (For example, this means that a single write timeout or disconnect will\n # immediately close all pending streams. Without requiring multiple\n # sequential timeouts.)\n # 2. Mark the connection as errored, so that we don't accept any other\n # incoming requests.\n self._write_exception = exc\n self._connection_error = True\n raise exc\n\n # Flow control...\n\n async def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int:\n """\n Returns the maximum allowable outgoing flow for a given stream.\n\n If the allowable flow is zero, then waits on the network until\n WindowUpdated frames have increased the flow rate.\n https://tools.ietf.org/html/rfc7540#section-6.9\n """\n local_flow: int = self._h2_state.local_flow_control_window(stream_id)\n max_frame_size: int = self._h2_state.max_outbound_frame_size\n flow = min(local_flow, max_frame_size)\n while flow == 0:\n await self._receive_events(request)\n local_flow = self._h2_state.local_flow_control_window(stream_id)\n max_frame_size = self._h2_state.max_outbound_frame_size\n flow = min(local_flow, max_frame_size)\n return flow\n\n # Interface for connection pooling...\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._origin\n\n def is_available(self) -> bool:\n return (\n self._state != HTTPConnectionState.CLOSED\n and not self._connection_error\n and not self._used_all_stream_ids\n and not (\n self._h2_state.state_machine.state\n == h2.connection.ConnectionState.CLOSED\n )\n )\n\n def has_expired(self) -> bool:\n now = time.monotonic()\n return self._expire_at is not None and now > self._expire_at\n\n def is_idle(self) -> bool:\n return self._state == HTTPConnectionState.IDLE\n\n def is_closed(self) -> bool:\n return self._state == HTTPConnectionState.CLOSED\n\n def info(self) -> str:\n origin = str(self._origin)\n return (\n f"{origin!r}, HTTP/2, {self._state.name}, "\n f"Request Count: {self._request_count}"\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n origin = str(self._origin)\n return (\n f"<{class_name} [{origin!r}, {self._state.name}, "\n f"Request Count: {self._request_count}]>"\n )\n\n # These context managers are not used in the standard flow, but are\n # useful for testing or working with connection instances directly.\n\n async def __aenter__(self) -> AsyncHTTP2Connection:\n return self\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: types.TracebackType | None = None,\n ) -> None:\n await self.aclose()\n\n\nclass HTTP2ConnectionByteStream:\n def __init__(\n self, connection: AsyncHTTP2Connection, request: Request, stream_id: int\n ) -> None:\n self._connection = connection\n self._request = request\n self._stream_id = stream_id\n self._closed = False\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n kwargs = {"request": self._request, "stream_id": self._stream_id}\n try:\n async with Trace("receive_response_body", logger, self._request, kwargs):\n async for chunk in self._connection._receive_response_body(\n request=self._request, stream_id=self._stream_id\n ):\n yield chunk\n except BaseException as exc:\n # If we get an exception while streaming the response,\n # we want to close the response (and possibly the connection)\n # before raising that exception.\n with AsyncShieldCancellation():\n await self.aclose()\n raise exc\n\n async def aclose(self) -> None:\n if not self._closed:\n self._closed = True\n kwargs = {"stream_id": self._stream_id}\n async with Trace("response_closed", logger, self._request, kwargs):\n await self._connection._response_closed(stream_id=self._stream_id)\n
|
.venv\Lib\site-packages\httpcore\_async\http2.py
|
http2.py
|
Python
| 23,936 | 0.95 | 0.165541 | 0.131631 |
awesome-app
| 50 |
2023-08-12T07:01:19.359151
|
GPL-3.0
| false |
34f43745ba1b3ce72e7588aca826a4c5
|
from __future__ import annotations\n\nimport base64\nimport logging\nimport ssl\nimport typing\n\nfrom .._backends.base import SOCKET_OPTION, AsyncNetworkBackend\nfrom .._exceptions import ProxyError\nfrom .._models import (\n URL,\n Origin,\n Request,\n Response,\n enforce_bytes,\n enforce_headers,\n enforce_url,\n)\nfrom .._ssl import default_ssl_context\nfrom .._synchronization import AsyncLock\nfrom .._trace import Trace\nfrom .connection import AsyncHTTPConnection\nfrom .connection_pool import AsyncConnectionPool\nfrom .http11 import AsyncHTTP11Connection\nfrom .interfaces import AsyncConnectionInterface\n\nByteOrStr = typing.Union[bytes, str]\nHeadersAsSequence = typing.Sequence[typing.Tuple[ByteOrStr, ByteOrStr]]\nHeadersAsMapping = typing.Mapping[ByteOrStr, ByteOrStr]\n\n\nlogger = logging.getLogger("httpcore.proxy")\n\n\ndef merge_headers(\n default_headers: typing.Sequence[tuple[bytes, bytes]] | None = None,\n override_headers: typing.Sequence[tuple[bytes, bytes]] | None = None,\n) -> list[tuple[bytes, bytes]]:\n """\n Append default_headers and override_headers, de-duplicating if a key exists\n in both cases.\n """\n default_headers = [] if default_headers is None else list(default_headers)\n override_headers = [] if override_headers is None else list(override_headers)\n has_override = set(key.lower() for key, value in override_headers)\n default_headers = [\n (key, value)\n for key, value in default_headers\n if key.lower() not in has_override\n ]\n return default_headers + override_headers\n\n\nclass AsyncHTTPProxy(AsyncConnectionPool): # pragma: nocover\n """\n A connection pool that sends requests via an HTTP proxy.\n """\n\n def __init__(\n self,\n proxy_url: URL | bytes | str,\n proxy_auth: tuple[bytes | str, bytes | str] | None = None,\n proxy_headers: HeadersAsMapping | HeadersAsSequence | None = None,\n ssl_context: ssl.SSLContext | None = None,\n proxy_ssl_context: ssl.SSLContext | None = None,\n max_connections: int | None = 10,\n max_keepalive_connections: int | None = None,\n keepalive_expiry: float | None = None,\n http1: bool = True,\n http2: bool = False,\n retries: int = 0,\n local_address: str | None = None,\n uds: str | None = None,\n network_backend: AsyncNetworkBackend | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> None:\n """\n A connection pool for making HTTP requests.\n\n Parameters:\n proxy_url: The URL to use when connecting to the proxy server.\n For example `"http://127.0.0.1:8080/"`.\n proxy_auth: Any proxy authentication as a two-tuple of\n (username, password). May be either bytes or ascii-only str.\n proxy_headers: Any HTTP headers to use for the proxy requests.\n For example `{"Proxy-Authorization": "Basic <username>:<password>"}`.\n ssl_context: An SSL context to use for verifying connections.\n If not specified, the default `httpcore.default_ssl_context()`\n will be used.\n proxy_ssl_context: The same as `ssl_context`, but for a proxy server rather than a remote origin.\n max_connections: The maximum number of concurrent HTTP connections that\n the pool should allow. Any attempt to send a request on a pool that\n would exceed this amount will block until a connection is available.\n max_keepalive_connections: The maximum number of idle HTTP connections\n that will be maintained in the pool.\n keepalive_expiry: The duration in seconds that an idle HTTP connection\n may be maintained for before being expired from the pool.\n http1: A boolean indicating if HTTP/1.1 requests should be supported\n by the connection pool. Defaults to True.\n http2: A boolean indicating if HTTP/2 requests should be supported by\n the connection pool. Defaults to False.\n retries: The maximum number of retries when trying to establish\n a connection.\n local_address: Local address to connect from. Can also be used to\n connect using a particular address family. Using\n `local_address="0.0.0.0"` will connect using an `AF_INET` address\n (IPv4), while using `local_address="::"` will connect using an\n `AF_INET6` address (IPv6).\n uds: Path to a Unix Domain Socket to use instead of TCP sockets.\n network_backend: A backend instance to use for handling network I/O.\n """\n super().__init__(\n ssl_context=ssl_context,\n max_connections=max_connections,\n max_keepalive_connections=max_keepalive_connections,\n keepalive_expiry=keepalive_expiry,\n http1=http1,\n http2=http2,\n network_backend=network_backend,\n retries=retries,\n local_address=local_address,\n uds=uds,\n socket_options=socket_options,\n )\n\n self._proxy_url = enforce_url(proxy_url, name="proxy_url")\n if (\n self._proxy_url.scheme == b"http" and proxy_ssl_context is not None\n ): # pragma: no cover\n raise RuntimeError(\n "The `proxy_ssl_context` argument is not allowed for the http scheme"\n )\n\n self._ssl_context = ssl_context\n self._proxy_ssl_context = proxy_ssl_context\n self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")\n if proxy_auth is not None:\n username = enforce_bytes(proxy_auth[0], name="proxy_auth")\n password = enforce_bytes(proxy_auth[1], name="proxy_auth")\n userpass = username + b":" + password\n authorization = b"Basic " + base64.b64encode(userpass)\n self._proxy_headers = [\n (b"Proxy-Authorization", authorization)\n ] + self._proxy_headers\n\n def create_connection(self, origin: Origin) -> AsyncConnectionInterface:\n if origin.scheme == b"http":\n return AsyncForwardHTTPConnection(\n proxy_origin=self._proxy_url.origin,\n proxy_headers=self._proxy_headers,\n remote_origin=origin,\n keepalive_expiry=self._keepalive_expiry,\n network_backend=self._network_backend,\n proxy_ssl_context=self._proxy_ssl_context,\n )\n return AsyncTunnelHTTPConnection(\n proxy_origin=self._proxy_url.origin,\n proxy_headers=self._proxy_headers,\n remote_origin=origin,\n ssl_context=self._ssl_context,\n proxy_ssl_context=self._proxy_ssl_context,\n keepalive_expiry=self._keepalive_expiry,\n http1=self._http1,\n http2=self._http2,\n network_backend=self._network_backend,\n )\n\n\nclass AsyncForwardHTTPConnection(AsyncConnectionInterface):\n def __init__(\n self,\n proxy_origin: Origin,\n remote_origin: Origin,\n proxy_headers: HeadersAsMapping | HeadersAsSequence | None = None,\n keepalive_expiry: float | None = None,\n network_backend: AsyncNetworkBackend | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n proxy_ssl_context: ssl.SSLContext | None = None,\n ) -> None:\n self._connection = AsyncHTTPConnection(\n origin=proxy_origin,\n keepalive_expiry=keepalive_expiry,\n network_backend=network_backend,\n socket_options=socket_options,\n ssl_context=proxy_ssl_context,\n )\n self._proxy_origin = proxy_origin\n self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")\n self._remote_origin = remote_origin\n\n async def handle_async_request(self, request: Request) -> Response:\n headers = merge_headers(self._proxy_headers, request.headers)\n url = URL(\n scheme=self._proxy_origin.scheme,\n host=self._proxy_origin.host,\n port=self._proxy_origin.port,\n target=bytes(request.url),\n )\n proxy_request = Request(\n method=request.method,\n url=url,\n headers=headers,\n content=request.stream,\n extensions=request.extensions,\n )\n return await self._connection.handle_async_request(proxy_request)\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._remote_origin\n\n async def aclose(self) -> None:\n await self._connection.aclose()\n\n def info(self) -> str:\n return self._connection.info()\n\n def is_available(self) -> bool:\n return self._connection.is_available()\n\n def has_expired(self) -> bool:\n return self._connection.has_expired()\n\n def is_idle(self) -> bool:\n return self._connection.is_idle()\n\n def is_closed(self) -> bool:\n return self._connection.is_closed()\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{self.info()}]>"\n\n\nclass AsyncTunnelHTTPConnection(AsyncConnectionInterface):\n def __init__(\n self,\n proxy_origin: Origin,\n remote_origin: Origin,\n ssl_context: ssl.SSLContext | None = None,\n proxy_ssl_context: ssl.SSLContext | None = None,\n proxy_headers: typing.Sequence[tuple[bytes, bytes]] | None = None,\n keepalive_expiry: float | None = None,\n http1: bool = True,\n http2: bool = False,\n network_backend: AsyncNetworkBackend | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> None:\n self._connection: AsyncConnectionInterface = AsyncHTTPConnection(\n origin=proxy_origin,\n keepalive_expiry=keepalive_expiry,\n network_backend=network_backend,\n socket_options=socket_options,\n ssl_context=proxy_ssl_context,\n )\n self._proxy_origin = proxy_origin\n self._remote_origin = remote_origin\n self._ssl_context = ssl_context\n self._proxy_ssl_context = proxy_ssl_context\n self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")\n self._keepalive_expiry = keepalive_expiry\n self._http1 = http1\n self._http2 = http2\n self._connect_lock = AsyncLock()\n self._connected = False\n\n async def handle_async_request(self, request: Request) -> Response:\n timeouts = request.extensions.get("timeout", {})\n timeout = timeouts.get("connect", None)\n\n async with self._connect_lock:\n if not self._connected:\n target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port)\n\n connect_url = URL(\n scheme=self._proxy_origin.scheme,\n host=self._proxy_origin.host,\n port=self._proxy_origin.port,\n target=target,\n )\n connect_headers = merge_headers(\n [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers\n )\n connect_request = Request(\n method=b"CONNECT",\n url=connect_url,\n headers=connect_headers,\n extensions=request.extensions,\n )\n connect_response = await self._connection.handle_async_request(\n connect_request\n )\n\n if connect_response.status < 200 or connect_response.status > 299:\n reason_bytes = connect_response.extensions.get("reason_phrase", b"")\n reason_str = reason_bytes.decode("ascii", errors="ignore")\n msg = "%d %s" % (connect_response.status, reason_str)\n await self._connection.aclose()\n raise ProxyError(msg)\n\n stream = connect_response.extensions["network_stream"]\n\n # Upgrade the stream to SSL\n ssl_context = (\n default_ssl_context()\n if self._ssl_context is None\n else self._ssl_context\n )\n alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]\n ssl_context.set_alpn_protocols(alpn_protocols)\n\n kwargs = {\n "ssl_context": ssl_context,\n "server_hostname": self._remote_origin.host.decode("ascii"),\n "timeout": timeout,\n }\n async with Trace("start_tls", logger, request, kwargs) as trace:\n stream = await stream.start_tls(**kwargs)\n trace.return_value = stream\n\n # Determine if we should be using HTTP/1.1 or HTTP/2\n ssl_object = stream.get_extra_info("ssl_object")\n http2_negotiated = (\n ssl_object is not None\n and ssl_object.selected_alpn_protocol() == "h2"\n )\n\n # Create the HTTP/1.1 or HTTP/2 connection\n if http2_negotiated or (self._http2 and not self._http1):\n from .http2 import AsyncHTTP2Connection\n\n self._connection = AsyncHTTP2Connection(\n origin=self._remote_origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n else:\n self._connection = AsyncHTTP11Connection(\n origin=self._remote_origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n\n self._connected = True\n return await self._connection.handle_async_request(request)\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._remote_origin\n\n async def aclose(self) -> None:\n await self._connection.aclose()\n\n def info(self) -> str:\n return self._connection.info()\n\n def is_available(self) -> bool:\n return self._connection.is_available()\n\n def has_expired(self) -> bool:\n return self._connection.has_expired()\n\n def is_idle(self) -> bool:\n return self._connection.is_idle()\n\n def is_closed(self) -> bool:\n return self._connection.is_closed()\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{self.info()}]>"\n
|
.venv\Lib\site-packages\httpcore\_async\http_proxy.py
|
http_proxy.py
|
Python
| 14,701 | 0.95 | 0.138965 | 0.009346 |
python-kit
| 874 |
2024-05-26T16:34:17.371325
|
GPL-3.0
| false |
2a2662150be0474accebd3f3fb13e213
|
from __future__ import annotations\n\nimport contextlib\nimport typing\n\nfrom .._models import (\n URL,\n Extensions,\n HeaderTypes,\n Origin,\n Request,\n Response,\n enforce_bytes,\n enforce_headers,\n enforce_url,\n include_request_headers,\n)\n\n\nclass AsyncRequestInterface:\n async def request(\n self,\n method: bytes | str,\n url: URL | bytes | str,\n *,\n headers: HeaderTypes = None,\n content: bytes | typing.AsyncIterator[bytes] | None = None,\n extensions: Extensions | None = None,\n ) -> Response:\n # Strict type checking on our parameters.\n method = enforce_bytes(method, name="method")\n url = enforce_url(url, name="url")\n headers = enforce_headers(headers, name="headers")\n\n # Include Host header, and optionally Content-Length or Transfer-Encoding.\n headers = include_request_headers(headers, url=url, content=content)\n\n request = Request(\n method=method,\n url=url,\n headers=headers,\n content=content,\n extensions=extensions,\n )\n response = await self.handle_async_request(request)\n try:\n await response.aread()\n finally:\n await response.aclose()\n return response\n\n @contextlib.asynccontextmanager\n async def stream(\n self,\n method: bytes | str,\n url: URL | bytes | str,\n *,\n headers: HeaderTypes = None,\n content: bytes | typing.AsyncIterator[bytes] | None = None,\n extensions: Extensions | None = None,\n ) -> typing.AsyncIterator[Response]:\n # Strict type checking on our parameters.\n method = enforce_bytes(method, name="method")\n url = enforce_url(url, name="url")\n headers = enforce_headers(headers, name="headers")\n\n # Include Host header, and optionally Content-Length or Transfer-Encoding.\n headers = include_request_headers(headers, url=url, content=content)\n\n request = Request(\n method=method,\n url=url,\n headers=headers,\n content=content,\n extensions=extensions,\n )\n response = await self.handle_async_request(request)\n try:\n yield response\n finally:\n await response.aclose()\n\n async def handle_async_request(self, request: Request) -> Response:\n raise NotImplementedError() # pragma: nocover\n\n\nclass AsyncConnectionInterface(AsyncRequestInterface):\n async def aclose(self) -> None:\n raise NotImplementedError() # pragma: nocover\n\n def info(self) -> str:\n raise NotImplementedError() # pragma: nocover\n\n def can_handle_request(self, origin: Origin) -> bool:\n raise NotImplementedError() # pragma: nocover\n\n def is_available(self) -> bool:\n """\n Return `True` if the connection is currently able to accept an\n outgoing request.\n\n An HTTP/1.1 connection will only be available if it is currently idle.\n\n An HTTP/2 connection will be available so long as the stream ID space is\n not yet exhausted, and the connection is not in an error state.\n\n While the connection is being established we may not yet know if it is going\n to result in an HTTP/1.1 or HTTP/2 connection. The connection should be\n treated as being available, but might ultimately raise `NewConnectionRequired`\n required exceptions if multiple requests are attempted over a connection\n that ends up being established as HTTP/1.1.\n """\n raise NotImplementedError() # pragma: nocover\n\n def has_expired(self) -> bool:\n """\n Return `True` if the connection is in a state where it should be closed.\n\n This either means that the connection is idle and it has passed the\n expiry time on its keep-alive, or that server has sent an EOF.\n """\n raise NotImplementedError() # pragma: nocover\n\n def is_idle(self) -> bool:\n """\n Return `True` if the connection is currently idle.\n """\n raise NotImplementedError() # pragma: nocover\n\n def is_closed(self) -> bool:\n """\n Return `True` if the connection has been closed.\n\n Used when a response is closed to determine if the connection may be\n returned to the connection pool or not.\n """\n raise NotImplementedError() # pragma: nocover\n
|
.venv\Lib\site-packages\httpcore\_async\interfaces.py
|
interfaces.py
|
Python
| 4,455 | 0.95 | 0.160584 | 0.052632 |
awesome-app
| 341 |
2025-01-31T12:49:11.909867
|
GPL-3.0
| false |
0ae28a3a53232d2bc2d746cf4b905563
|
from __future__ import annotations\n\nimport logging\nimport ssl\n\nimport socksio\n\nfrom .._backends.auto import AutoBackend\nfrom .._backends.base import AsyncNetworkBackend, AsyncNetworkStream\nfrom .._exceptions import ConnectionNotAvailable, ProxyError\nfrom .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url\nfrom .._ssl import default_ssl_context\nfrom .._synchronization import AsyncLock\nfrom .._trace import Trace\nfrom .connection_pool import AsyncConnectionPool\nfrom .http11 import AsyncHTTP11Connection\nfrom .interfaces import AsyncConnectionInterface\n\nlogger = logging.getLogger("httpcore.socks")\n\n\nAUTH_METHODS = {\n b"\x00": "NO AUTHENTICATION REQUIRED",\n b"\x01": "GSSAPI",\n b"\x02": "USERNAME/PASSWORD",\n b"\xff": "NO ACCEPTABLE METHODS",\n}\n\nREPLY_CODES = {\n b"\x00": "Succeeded",\n b"\x01": "General SOCKS server failure",\n b"\x02": "Connection not allowed by ruleset",\n b"\x03": "Network unreachable",\n b"\x04": "Host unreachable",\n b"\x05": "Connection refused",\n b"\x06": "TTL expired",\n b"\x07": "Command not supported",\n b"\x08": "Address type not supported",\n}\n\n\nasync def _init_socks5_connection(\n stream: AsyncNetworkStream,\n *,\n host: bytes,\n port: int,\n auth: tuple[bytes, bytes] | None = None,\n) -> None:\n conn = socksio.socks5.SOCKS5Connection()\n\n # Auth method request\n auth_method = (\n socksio.socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED\n if auth is None\n else socksio.socks5.SOCKS5AuthMethod.USERNAME_PASSWORD\n )\n conn.send(socksio.socks5.SOCKS5AuthMethodsRequest([auth_method]))\n outgoing_bytes = conn.data_to_send()\n await stream.write(outgoing_bytes)\n\n # Auth method response\n incoming_bytes = await stream.read(max_bytes=4096)\n response = conn.receive_data(incoming_bytes)\n assert isinstance(response, socksio.socks5.SOCKS5AuthReply)\n if response.method != auth_method:\n requested = AUTH_METHODS.get(auth_method, "UNKNOWN")\n responded = AUTH_METHODS.get(response.method, "UNKNOWN")\n raise ProxyError(\n f"Requested {requested} from proxy server, but got {responded}."\n )\n\n if response.method == socksio.socks5.SOCKS5AuthMethod.USERNAME_PASSWORD:\n # Username/password request\n assert auth is not None\n username, password = auth\n conn.send(socksio.socks5.SOCKS5UsernamePasswordRequest(username, password))\n outgoing_bytes = conn.data_to_send()\n await stream.write(outgoing_bytes)\n\n # Username/password response\n incoming_bytes = await stream.read(max_bytes=4096)\n response = conn.receive_data(incoming_bytes)\n assert isinstance(response, socksio.socks5.SOCKS5UsernamePasswordReply)\n if not response.success:\n raise ProxyError("Invalid username/password")\n\n # Connect request\n conn.send(\n socksio.socks5.SOCKS5CommandRequest.from_address(\n socksio.socks5.SOCKS5Command.CONNECT, (host, port)\n )\n )\n outgoing_bytes = conn.data_to_send()\n await stream.write(outgoing_bytes)\n\n # Connect response\n incoming_bytes = await stream.read(max_bytes=4096)\n response = conn.receive_data(incoming_bytes)\n assert isinstance(response, socksio.socks5.SOCKS5Reply)\n if response.reply_code != socksio.socks5.SOCKS5ReplyCode.SUCCEEDED:\n reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN")\n raise ProxyError(f"Proxy Server could not connect: {reply_code}.")\n\n\nclass AsyncSOCKSProxy(AsyncConnectionPool): # pragma: nocover\n """\n A connection pool that sends requests via an HTTP proxy.\n """\n\n def __init__(\n self,\n proxy_url: URL | bytes | str,\n proxy_auth: tuple[bytes | str, bytes | str] | None = None,\n ssl_context: ssl.SSLContext | None = None,\n max_connections: int | None = 10,\n max_keepalive_connections: int | None = None,\n keepalive_expiry: float | None = None,\n http1: bool = True,\n http2: bool = False,\n retries: int = 0,\n network_backend: AsyncNetworkBackend | None = None,\n ) -> None:\n """\n A connection pool for making HTTP requests.\n\n Parameters:\n proxy_url: The URL to use when connecting to the proxy server.\n For example `"http://127.0.0.1:8080/"`.\n ssl_context: An SSL context to use for verifying connections.\n If not specified, the default `httpcore.default_ssl_context()`\n will be used.\n max_connections: The maximum number of concurrent HTTP connections that\n the pool should allow. Any attempt to send a request on a pool that\n would exceed this amount will block until a connection is available.\n max_keepalive_connections: The maximum number of idle HTTP connections\n that will be maintained in the pool.\n keepalive_expiry: The duration in seconds that an idle HTTP connection\n may be maintained for before being expired from the pool.\n http1: A boolean indicating if HTTP/1.1 requests should be supported\n by the connection pool. Defaults to True.\n http2: A boolean indicating if HTTP/2 requests should be supported by\n the connection pool. Defaults to False.\n retries: The maximum number of retries when trying to establish\n a connection.\n local_address: Local address to connect from. Can also be used to\n connect using a particular address family. Using\n `local_address="0.0.0.0"` will connect using an `AF_INET` address\n (IPv4), while using `local_address="::"` will connect using an\n `AF_INET6` address (IPv6).\n uds: Path to a Unix Domain Socket to use instead of TCP sockets.\n network_backend: A backend instance to use for handling network I/O.\n """\n super().__init__(\n ssl_context=ssl_context,\n max_connections=max_connections,\n max_keepalive_connections=max_keepalive_connections,\n keepalive_expiry=keepalive_expiry,\n http1=http1,\n http2=http2,\n network_backend=network_backend,\n retries=retries,\n )\n self._ssl_context = ssl_context\n self._proxy_url = enforce_url(proxy_url, name="proxy_url")\n if proxy_auth is not None:\n username, password = proxy_auth\n username_bytes = enforce_bytes(username, name="proxy_auth")\n password_bytes = enforce_bytes(password, name="proxy_auth")\n self._proxy_auth: tuple[bytes, bytes] | None = (\n username_bytes,\n password_bytes,\n )\n else:\n self._proxy_auth = None\n\n def create_connection(self, origin: Origin) -> AsyncConnectionInterface:\n return AsyncSocks5Connection(\n proxy_origin=self._proxy_url.origin,\n remote_origin=origin,\n proxy_auth=self._proxy_auth,\n ssl_context=self._ssl_context,\n keepalive_expiry=self._keepalive_expiry,\n http1=self._http1,\n http2=self._http2,\n network_backend=self._network_backend,\n )\n\n\nclass AsyncSocks5Connection(AsyncConnectionInterface):\n def __init__(\n self,\n proxy_origin: Origin,\n remote_origin: Origin,\n proxy_auth: tuple[bytes, bytes] | None = None,\n ssl_context: ssl.SSLContext | None = None,\n keepalive_expiry: float | None = None,\n http1: bool = True,\n http2: bool = False,\n network_backend: AsyncNetworkBackend | None = None,\n ) -> None:\n self._proxy_origin = proxy_origin\n self._remote_origin = remote_origin\n self._proxy_auth = proxy_auth\n self._ssl_context = ssl_context\n self._keepalive_expiry = keepalive_expiry\n self._http1 = http1\n self._http2 = http2\n\n self._network_backend: AsyncNetworkBackend = (\n AutoBackend() if network_backend is None else network_backend\n )\n self._connect_lock = AsyncLock()\n self._connection: AsyncConnectionInterface | None = None\n self._connect_failed = False\n\n async def handle_async_request(self, request: Request) -> Response:\n timeouts = request.extensions.get("timeout", {})\n sni_hostname = request.extensions.get("sni_hostname", None)\n timeout = timeouts.get("connect", None)\n\n async with self._connect_lock:\n if self._connection is None:\n try:\n # Connect to the proxy\n kwargs = {\n "host": self._proxy_origin.host.decode("ascii"),\n "port": self._proxy_origin.port,\n "timeout": timeout,\n }\n async with Trace("connect_tcp", logger, request, kwargs) as trace:\n stream = await self._network_backend.connect_tcp(**kwargs)\n trace.return_value = stream\n\n # Connect to the remote host using socks5\n kwargs = {\n "stream": stream,\n "host": self._remote_origin.host.decode("ascii"),\n "port": self._remote_origin.port,\n "auth": self._proxy_auth,\n }\n async with Trace(\n "setup_socks5_connection", logger, request, kwargs\n ) as trace:\n await _init_socks5_connection(**kwargs)\n trace.return_value = stream\n\n # Upgrade the stream to SSL\n if self._remote_origin.scheme == b"https":\n ssl_context = (\n default_ssl_context()\n if self._ssl_context is None\n else self._ssl_context\n )\n alpn_protocols = (\n ["http/1.1", "h2"] if self._http2 else ["http/1.1"]\n )\n ssl_context.set_alpn_protocols(alpn_protocols)\n\n kwargs = {\n "ssl_context": ssl_context,\n "server_hostname": sni_hostname\n or self._remote_origin.host.decode("ascii"),\n "timeout": timeout,\n }\n async with Trace("start_tls", logger, request, kwargs) as trace:\n stream = await stream.start_tls(**kwargs)\n trace.return_value = stream\n\n # Determine if we should be using HTTP/1.1 or HTTP/2\n ssl_object = stream.get_extra_info("ssl_object")\n http2_negotiated = (\n ssl_object is not None\n and ssl_object.selected_alpn_protocol() == "h2"\n )\n\n # Create the HTTP/1.1 or HTTP/2 connection\n if http2_negotiated or (\n self._http2 and not self._http1\n ): # pragma: nocover\n from .http2 import AsyncHTTP2Connection\n\n self._connection = AsyncHTTP2Connection(\n origin=self._remote_origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n else:\n self._connection = AsyncHTTP11Connection(\n origin=self._remote_origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n except Exception as exc:\n self._connect_failed = True\n raise exc\n elif not self._connection.is_available(): # pragma: nocover\n raise ConnectionNotAvailable()\n\n return await self._connection.handle_async_request(request)\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._remote_origin\n\n async def aclose(self) -> None:\n if self._connection is not None:\n await self._connection.aclose()\n\n def is_available(self) -> bool:\n if self._connection is None: # pragma: nocover\n # If HTTP/2 support is enabled, and the resulting connection could\n # end up as HTTP/2 then we should indicate the connection as being\n # available to service multiple requests.\n return (\n self._http2\n and (self._remote_origin.scheme == b"https" or not self._http1)\n and not self._connect_failed\n )\n return self._connection.is_available()\n\n def has_expired(self) -> bool:\n if self._connection is None: # pragma: nocover\n return self._connect_failed\n return self._connection.has_expired()\n\n def is_idle(self) -> bool:\n if self._connection is None: # pragma: nocover\n return self._connect_failed\n return self._connection.is_idle()\n\n def is_closed(self) -> bool:\n if self._connection is None: # pragma: nocover\n return self._connect_failed\n return self._connection.is_closed()\n\n def info(self) -> str:\n if self._connection is None: # pragma: nocover\n return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"\n return self._connection.info()\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{self.info()}]>"\n
|
.venv\Lib\site-packages\httpcore\_async\socks_proxy.py
|
socks_proxy.py
|
Python
| 13,841 | 0.95 | 0.1261 | 0.049834 |
react-lib
| 30 |
2024-12-02T15:59:39.087070
|
MIT
| false |
155b1e54aef0d32c261f257202ece034
|
from .connection import AsyncHTTPConnection\nfrom .connection_pool import AsyncConnectionPool\nfrom .http11 import AsyncHTTP11Connection\nfrom .http_proxy import AsyncHTTPProxy\nfrom .interfaces import AsyncConnectionInterface\n\ntry:\n from .http2 import AsyncHTTP2Connection\nexcept ImportError: # pragma: nocover\n\n class AsyncHTTP2Connection: # type: ignore\n def __init__(self, *args, **kwargs) -> None: # type: ignore\n raise RuntimeError(\n "Attempted to use http2 support, but the `h2` package is not "\n "installed. Use 'pip install httpcore[http2]'."\n )\n\n\ntry:\n from .socks_proxy import AsyncSOCKSProxy\nexcept ImportError: # pragma: nocover\n\n class AsyncSOCKSProxy: # type: ignore\n def __init__(self, *args, **kwargs) -> None: # type: ignore\n raise RuntimeError(\n "Attempted to use SOCKS support, but the `socksio` package is not "\n "installed. Use 'pip install httpcore[socks]'."\n )\n\n\n__all__ = [\n "AsyncHTTPConnection",\n "AsyncConnectionPool",\n "AsyncHTTPProxy",\n "AsyncHTTP11Connection",\n "AsyncHTTP2Connection",\n "AsyncConnectionInterface",\n "AsyncSOCKSProxy",\n]\n
|
.venv\Lib\site-packages\httpcore\_async\__init__.py
|
__init__.py
|
Python
| 1,221 | 0.95 | 0.153846 | 0 |
node-utils
| 806 |
2024-09-23T22:12:45.726221
|
MIT
| false |
f5017575787cab4ced014351fe31ed3b
|
\n\n
|
.venv\Lib\site-packages\httpcore\_async\__pycache__\connection.cpython-313.pyc
|
connection.cpython-313.pyc
|
Other
| 11,985 | 0.8 | 0 | 0 |
awesome-app
| 841 |
2024-10-14T19:32:22.468779
|
GPL-3.0
| false |
418b3918223ac44ecfe7ae3e2b1d54ef
|
\n\n
|
.venv\Lib\site-packages\httpcore\_async\__pycache__\connection_pool.cpython-313.pyc
|
connection_pool.cpython-313.pyc
|
Other
| 19,674 | 0.8 | 0.039823 | 0 |
vue-tools
| 58 |
2024-06-28T15:15:30.211266
|
Apache-2.0
| false |
7d911f78b333a828dd7b6a4e38cdf0fe
|
\n\n
|
.venv\Lib\site-packages\httpcore\_async\__pycache__\http11.cpython-313.pyc
|
http11.cpython-313.pyc
|
Other
| 20,505 | 0.8 | 0 | 0.005988 |
python-kit
| 9 |
2024-12-01T14:18:19.128623
|
GPL-3.0
| false |
ec209a8b305e231850a3c53cc48881ac
|
\n\n
|
.venv\Lib\site-packages\httpcore\_async\__pycache__\http2.cpython-313.pyc
|
http2.cpython-313.pyc
|
Other
| 31,642 | 0.95 | 0.024291 | 0 |
python-kit
| 840 |
2023-10-08T20:29:43.261626
|
MIT
| false |
670911e141c6f8e12576a96c9714c2df
|
\n\n
|
.venv\Lib\site-packages\httpcore\_async\__pycache__\http_proxy.cpython-313.pyc
|
http_proxy.cpython-313.pyc
|
Other
| 17,879 | 0.8 | 0.069182 | 0 |
awesome-app
| 874 |
2024-05-08T10:07:51.519643
|
BSD-3-Clause
| false |
e79a97c2e2757079856e29bec719752b
|
\n\n
|
.venv\Lib\site-packages\httpcore\_async\__pycache__\interfaces.cpython-313.pyc
|
interfaces.cpython-313.pyc
|
Other
| 5,616 | 0.95 | 0.105263 | 0 |
awesome-app
| 520 |
2024-02-14T08:20:57.227684
|
Apache-2.0
| false |
5c3edc70e96c6b277f18935c0aabcfad
|
\n\n
|
.venv\Lib\site-packages\httpcore\_async\__pycache__\socks_proxy.cpython-313.pyc
|
socks_proxy.cpython-313.pyc
|
Other
| 16,903 | 0.8 | 0.04142 | 0 |
python-kit
| 406 |
2025-05-31T03:08:13.897341
|
GPL-3.0
| false |
e0a3c6c0154e0e2d7a86b347b243cdcf
|
\n\n
|
.venv\Lib\site-packages\httpcore\_async\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 1,701 | 0.7 | 0 | 0 |
awesome-app
| 235 |
2024-06-29T01:06:19.572288
|
MIT
| false |
08b7d19a770a4330618df0843b49868d
|
from __future__ import annotations\n\nimport ssl\nimport typing\n\nimport anyio\n\nfrom .._exceptions import (\n ConnectError,\n ConnectTimeout,\n ReadError,\n ReadTimeout,\n WriteError,\n WriteTimeout,\n map_exceptions,\n)\nfrom .._utils import is_socket_readable\nfrom .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream\n\n\nclass AnyIOStream(AsyncNetworkStream):\n def __init__(self, stream: anyio.abc.ByteStream) -> None:\n self._stream = stream\n\n async def read(self, max_bytes: int, timeout: float | None = None) -> bytes:\n exc_map = {\n TimeoutError: ReadTimeout,\n anyio.BrokenResourceError: ReadError,\n anyio.ClosedResourceError: ReadError,\n anyio.EndOfStream: ReadError,\n }\n with map_exceptions(exc_map):\n with anyio.fail_after(timeout):\n try:\n return await self._stream.receive(max_bytes=max_bytes)\n except anyio.EndOfStream: # pragma: nocover\n return b""\n\n async def write(self, buffer: bytes, timeout: float | None = None) -> None:\n if not buffer:\n return\n\n exc_map = {\n TimeoutError: WriteTimeout,\n anyio.BrokenResourceError: WriteError,\n anyio.ClosedResourceError: WriteError,\n }\n with map_exceptions(exc_map):\n with anyio.fail_after(timeout):\n await self._stream.send(item=buffer)\n\n async def aclose(self) -> None:\n await self._stream.aclose()\n\n async def start_tls(\n self,\n ssl_context: ssl.SSLContext,\n server_hostname: str | None = None,\n timeout: float | None = None,\n ) -> AsyncNetworkStream:\n exc_map = {\n TimeoutError: ConnectTimeout,\n anyio.BrokenResourceError: ConnectError,\n anyio.EndOfStream: ConnectError,\n ssl.SSLError: ConnectError,\n }\n with map_exceptions(exc_map):\n try:\n with anyio.fail_after(timeout):\n ssl_stream = await anyio.streams.tls.TLSStream.wrap(\n self._stream,\n ssl_context=ssl_context,\n hostname=server_hostname,\n standard_compatible=False,\n server_side=False,\n )\n except Exception as exc: # pragma: nocover\n await self.aclose()\n raise exc\n return AnyIOStream(ssl_stream)\n\n def get_extra_info(self, info: str) -> typing.Any:\n if info == "ssl_object":\n return self._stream.extra(anyio.streams.tls.TLSAttribute.ssl_object, None)\n if info == "client_addr":\n return self._stream.extra(anyio.abc.SocketAttribute.local_address, None)\n if info == "server_addr":\n return self._stream.extra(anyio.abc.SocketAttribute.remote_address, None)\n if info == "socket":\n return self._stream.extra(anyio.abc.SocketAttribute.raw_socket, None)\n if info == "is_readable":\n sock = self._stream.extra(anyio.abc.SocketAttribute.raw_socket, None)\n return is_socket_readable(sock)\n return None\n\n\nclass AnyIOBackend(AsyncNetworkBackend):\n async def connect_tcp(\n self,\n host: str,\n port: int,\n timeout: float | None = None,\n local_address: str | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> AsyncNetworkStream: # pragma: nocover\n if socket_options is None:\n socket_options = []\n exc_map = {\n TimeoutError: ConnectTimeout,\n OSError: ConnectError,\n anyio.BrokenResourceError: ConnectError,\n }\n with map_exceptions(exc_map):\n with anyio.fail_after(timeout):\n stream: anyio.abc.ByteStream = await anyio.connect_tcp(\n remote_host=host,\n remote_port=port,\n local_host=local_address,\n )\n # By default TCP sockets opened in `asyncio` include TCP_NODELAY.\n for option in socket_options:\n stream._raw_socket.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover\n return AnyIOStream(stream)\n\n async def connect_unix_socket(\n self,\n path: str,\n timeout: float | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> AsyncNetworkStream: # pragma: nocover\n if socket_options is None:\n socket_options = []\n exc_map = {\n TimeoutError: ConnectTimeout,\n OSError: ConnectError,\n anyio.BrokenResourceError: ConnectError,\n }\n with map_exceptions(exc_map):\n with anyio.fail_after(timeout):\n stream: anyio.abc.ByteStream = await anyio.connect_unix(path)\n for option in socket_options:\n stream._raw_socket.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover\n return AnyIOStream(stream)\n\n async def sleep(self, seconds: float) -> None:\n await anyio.sleep(seconds) # pragma: nocover\n
|
.venv\Lib\site-packages\httpcore\_backends\anyio.py
|
anyio.py
|
Python
| 5,252 | 0.95 | 0.157534 | 0.007634 |
react-lib
| 630 |
2024-01-09T10:27:57.333239
|
GPL-3.0
| false |
72a339f39034f7999017c5d1dd70411a
|
from __future__ import annotations\n\nimport typing\n\nfrom .._synchronization import current_async_library\nfrom .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream\n\n\nclass AutoBackend(AsyncNetworkBackend):\n async def _init_backend(self) -> None:\n if not (hasattr(self, "_backend")):\n backend = current_async_library()\n if backend == "trio":\n from .trio import TrioBackend\n\n self._backend: AsyncNetworkBackend = TrioBackend()\n else:\n from .anyio import AnyIOBackend\n\n self._backend = AnyIOBackend()\n\n async def connect_tcp(\n self,\n host: str,\n port: int,\n timeout: float | None = None,\n local_address: str | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> AsyncNetworkStream:\n await self._init_backend()\n return await self._backend.connect_tcp(\n host,\n port,\n timeout=timeout,\n local_address=local_address,\n socket_options=socket_options,\n )\n\n async def connect_unix_socket(\n self,\n path: str,\n timeout: float | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> AsyncNetworkStream: # pragma: nocover\n await self._init_backend()\n return await self._backend.connect_unix_socket(\n path, timeout=timeout, socket_options=socket_options\n )\n\n async def sleep(self, seconds: float) -> None: # pragma: nocover\n await self._init_backend()\n return await self._backend.sleep(seconds)\n
|
.venv\Lib\site-packages\httpcore\_backends\auto.py
|
auto.py
|
Python
| 1,662 | 0.95 | 0.134615 | 0 |
node-utils
| 673 |
2025-02-23T02:29:58.176244
|
Apache-2.0
| false |
b05af13f5f8fb1078526b5c19b0c8132
|
from __future__ import annotations\n\nimport ssl\nimport time\nimport typing\n\nSOCKET_OPTION = typing.Union[\n typing.Tuple[int, int, int],\n typing.Tuple[int, int, typing.Union[bytes, bytearray]],\n typing.Tuple[int, int, None, int],\n]\n\n\nclass NetworkStream:\n def read(self, max_bytes: int, timeout: float | None = None) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n def write(self, buffer: bytes, timeout: float | None = None) -> None:\n raise NotImplementedError() # pragma: nocover\n\n def close(self) -> None:\n raise NotImplementedError() # pragma: nocover\n\n def start_tls(\n self,\n ssl_context: ssl.SSLContext,\n server_hostname: str | None = None,\n timeout: float | None = None,\n ) -> NetworkStream:\n raise NotImplementedError() # pragma: nocover\n\n def get_extra_info(self, info: str) -> typing.Any:\n return None # pragma: nocover\n\n\nclass NetworkBackend:\n def connect_tcp(\n self,\n host: str,\n port: int,\n timeout: float | None = None,\n local_address: str | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> NetworkStream:\n raise NotImplementedError() # pragma: nocover\n\n def connect_unix_socket(\n self,\n path: str,\n timeout: float | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> NetworkStream:\n raise NotImplementedError() # pragma: nocover\n\n def sleep(self, seconds: float) -> None:\n time.sleep(seconds) # pragma: nocover\n\n\nclass AsyncNetworkStream:\n async def read(self, max_bytes: int, timeout: float | None = None) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n async def write(self, buffer: bytes, timeout: float | None = None) -> None:\n raise NotImplementedError() # pragma: nocover\n\n async def aclose(self) -> None:\n raise NotImplementedError() # pragma: nocover\n\n async def start_tls(\n self,\n ssl_context: ssl.SSLContext,\n server_hostname: str | None = None,\n timeout: float | None = None,\n ) -> AsyncNetworkStream:\n raise NotImplementedError() # pragma: nocover\n\n def get_extra_info(self, info: str) -> typing.Any:\n return None # pragma: nocover\n\n\nclass AsyncNetworkBackend:\n async def connect_tcp(\n self,\n host: str,\n port: int,\n timeout: float | None = None,\n local_address: str | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> AsyncNetworkStream:\n raise NotImplementedError() # pragma: nocover\n\n async def connect_unix_socket(\n self,\n path: str,\n timeout: float | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> AsyncNetworkStream:\n raise NotImplementedError() # pragma: nocover\n\n async def sleep(self, seconds: float) -> None:\n raise NotImplementedError() # pragma: nocover\n
|
.venv\Lib\site-packages\httpcore\_backends\base.py
|
base.py
|
Python
| 3,042 | 0.95 | 0.19802 | 0 |
node-utils
| 950 |
2024-02-03T04:56:40.840868
|
GPL-3.0
| false |
52468c2668de30d6cc5c9f3b7e13c352
|
from __future__ import annotations\n\nimport ssl\nimport typing\n\nfrom .._exceptions import ReadError\nfrom .base import (\n SOCKET_OPTION,\n AsyncNetworkBackend,\n AsyncNetworkStream,\n NetworkBackend,\n NetworkStream,\n)\n\n\nclass MockSSLObject:\n def __init__(self, http2: bool):\n self._http2 = http2\n\n def selected_alpn_protocol(self) -> str:\n return "h2" if self._http2 else "http/1.1"\n\n\nclass MockStream(NetworkStream):\n def __init__(self, buffer: list[bytes], http2: bool = False) -> None:\n self._buffer = buffer\n self._http2 = http2\n self._closed = False\n\n def read(self, max_bytes: int, timeout: float | None = None) -> bytes:\n if self._closed:\n raise ReadError("Connection closed")\n if not self._buffer:\n return b""\n return self._buffer.pop(0)\n\n def write(self, buffer: bytes, timeout: float | None = None) -> None:\n pass\n\n def close(self) -> None:\n self._closed = True\n\n def start_tls(\n self,\n ssl_context: ssl.SSLContext,\n server_hostname: str | None = None,\n timeout: float | None = None,\n ) -> NetworkStream:\n return self\n\n def get_extra_info(self, info: str) -> typing.Any:\n return MockSSLObject(http2=self._http2) if info == "ssl_object" else None\n\n def __repr__(self) -> str:\n return "<httpcore.MockStream>"\n\n\nclass MockBackend(NetworkBackend):\n def __init__(self, buffer: list[bytes], http2: bool = False) -> None:\n self._buffer = buffer\n self._http2 = http2\n\n def connect_tcp(\n self,\n host: str,\n port: int,\n timeout: float | None = None,\n local_address: str | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> NetworkStream:\n return MockStream(list(self._buffer), http2=self._http2)\n\n def connect_unix_socket(\n self,\n path: str,\n timeout: float | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> NetworkStream:\n return MockStream(list(self._buffer), http2=self._http2)\n\n def sleep(self, seconds: float) -> None:\n pass\n\n\nclass AsyncMockStream(AsyncNetworkStream):\n def __init__(self, buffer: list[bytes], http2: bool = False) -> None:\n self._buffer = buffer\n self._http2 = http2\n self._closed = False\n\n async def read(self, max_bytes: int, timeout: float | None = None) -> bytes:\n if self._closed:\n raise ReadError("Connection closed")\n if not self._buffer:\n return b""\n return self._buffer.pop(0)\n\n async def write(self, buffer: bytes, timeout: float | None = None) -> None:\n pass\n\n async def aclose(self) -> None:\n self._closed = True\n\n async def start_tls(\n self,\n ssl_context: ssl.SSLContext,\n server_hostname: str | None = None,\n timeout: float | None = None,\n ) -> AsyncNetworkStream:\n return self\n\n def get_extra_info(self, info: str) -> typing.Any:\n return MockSSLObject(http2=self._http2) if info == "ssl_object" else None\n\n def __repr__(self) -> str:\n return "<httpcore.AsyncMockStream>"\n\n\nclass AsyncMockBackend(AsyncNetworkBackend):\n def __init__(self, buffer: list[bytes], http2: bool = False) -> None:\n self._buffer = buffer\n self._http2 = http2\n\n async def connect_tcp(\n self,\n host: str,\n port: int,\n timeout: float | None = None,\n local_address: str | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> AsyncNetworkStream:\n return AsyncMockStream(list(self._buffer), http2=self._http2)\n\n async def connect_unix_socket(\n self,\n path: str,\n timeout: float | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> AsyncNetworkStream:\n return AsyncMockStream(list(self._buffer), http2=self._http2)\n\n async def sleep(self, seconds: float) -> None:\n pass\n
|
.venv\Lib\site-packages\httpcore\_backends\mock.py
|
mock.py
|
Python
| 4,077 | 0.85 | 0.251748 | 0 |
react-lib
| 713 |
2025-01-14T12:46:03.477422
|
MIT
| false |
a391c8c563639335f979633ee559db4a
|
from __future__ import annotations\n\nimport functools\nimport socket\nimport ssl\nimport sys\nimport typing\n\nfrom .._exceptions import (\n ConnectError,\n ConnectTimeout,\n ExceptionMapping,\n ReadError,\n ReadTimeout,\n WriteError,\n WriteTimeout,\n map_exceptions,\n)\nfrom .._utils import is_socket_readable\nfrom .base import SOCKET_OPTION, NetworkBackend, NetworkStream\n\n\nclass TLSinTLSStream(NetworkStream): # pragma: no cover\n """\n Because the standard `SSLContext.wrap_socket` method does\n not work for `SSLSocket` objects, we need this class\n to implement TLS stream using an underlying `SSLObject`\n instance in order to support TLS on top of TLS.\n """\n\n # Defined in RFC 8449\n TLS_RECORD_SIZE = 16384\n\n def __init__(\n self,\n sock: socket.socket,\n ssl_context: ssl.SSLContext,\n server_hostname: str | None = None,\n timeout: float | None = None,\n ):\n self._sock = sock\n self._incoming = ssl.MemoryBIO()\n self._outgoing = ssl.MemoryBIO()\n\n self.ssl_obj = ssl_context.wrap_bio(\n incoming=self._incoming,\n outgoing=self._outgoing,\n server_hostname=server_hostname,\n )\n\n self._sock.settimeout(timeout)\n self._perform_io(self.ssl_obj.do_handshake)\n\n def _perform_io(\n self,\n func: typing.Callable[..., typing.Any],\n ) -> typing.Any:\n ret = None\n\n while True:\n errno = None\n try:\n ret = func()\n except (ssl.SSLWantReadError, ssl.SSLWantWriteError) as e:\n errno = e.errno\n\n self._sock.sendall(self._outgoing.read())\n\n if errno == ssl.SSL_ERROR_WANT_READ:\n buf = self._sock.recv(self.TLS_RECORD_SIZE)\n\n if buf:\n self._incoming.write(buf)\n else:\n self._incoming.write_eof()\n if errno is None:\n return ret\n\n def read(self, max_bytes: int, timeout: float | None = None) -> bytes:\n exc_map: ExceptionMapping = {socket.timeout: ReadTimeout, OSError: ReadError}\n with map_exceptions(exc_map):\n self._sock.settimeout(timeout)\n return typing.cast(\n bytes, self._perform_io(functools.partial(self.ssl_obj.read, max_bytes))\n )\n\n def write(self, buffer: bytes, timeout: float | None = None) -> None:\n exc_map: ExceptionMapping = {socket.timeout: WriteTimeout, OSError: WriteError}\n with map_exceptions(exc_map):\n self._sock.settimeout(timeout)\n while buffer:\n nsent = self._perform_io(functools.partial(self.ssl_obj.write, buffer))\n buffer = buffer[nsent:]\n\n def close(self) -> None:\n self._sock.close()\n\n def start_tls(\n self,\n ssl_context: ssl.SSLContext,\n server_hostname: str | None = None,\n timeout: float | None = None,\n ) -> NetworkStream:\n raise NotImplementedError()\n\n def get_extra_info(self, info: str) -> typing.Any:\n if info == "ssl_object":\n return self.ssl_obj\n if info == "client_addr":\n return self._sock.getsockname()\n if info == "server_addr":\n return self._sock.getpeername()\n if info == "socket":\n return self._sock\n if info == "is_readable":\n return is_socket_readable(self._sock)\n return None\n\n\nclass SyncStream(NetworkStream):\n def __init__(self, sock: socket.socket) -> None:\n self._sock = sock\n\n def read(self, max_bytes: int, timeout: float | None = None) -> bytes:\n exc_map: ExceptionMapping = {socket.timeout: ReadTimeout, OSError: ReadError}\n with map_exceptions(exc_map):\n self._sock.settimeout(timeout)\n return self._sock.recv(max_bytes)\n\n def write(self, buffer: bytes, timeout: float | None = None) -> None:\n if not buffer:\n return\n\n exc_map: ExceptionMapping = {socket.timeout: WriteTimeout, OSError: WriteError}\n with map_exceptions(exc_map):\n while buffer:\n self._sock.settimeout(timeout)\n n = self._sock.send(buffer)\n buffer = buffer[n:]\n\n def close(self) -> None:\n self._sock.close()\n\n def start_tls(\n self,\n ssl_context: ssl.SSLContext,\n server_hostname: str | None = None,\n timeout: float | None = None,\n ) -> NetworkStream:\n exc_map: ExceptionMapping = {\n socket.timeout: ConnectTimeout,\n OSError: ConnectError,\n }\n with map_exceptions(exc_map):\n try:\n if isinstance(self._sock, ssl.SSLSocket): # pragma: no cover\n # If the underlying socket has already been upgraded\n # to the TLS layer (i.e. is an instance of SSLSocket),\n # we need some additional smarts to support TLS-in-TLS.\n return TLSinTLSStream(\n self._sock, ssl_context, server_hostname, timeout\n )\n else:\n self._sock.settimeout(timeout)\n sock = ssl_context.wrap_socket(\n self._sock, server_hostname=server_hostname\n )\n except Exception as exc: # pragma: nocover\n self.close()\n raise exc\n return SyncStream(sock)\n\n def get_extra_info(self, info: str) -> typing.Any:\n if info == "ssl_object" and isinstance(self._sock, ssl.SSLSocket):\n return self._sock._sslobj # type: ignore\n if info == "client_addr":\n return self._sock.getsockname()\n if info == "server_addr":\n return self._sock.getpeername()\n if info == "socket":\n return self._sock\n if info == "is_readable":\n return is_socket_readable(self._sock)\n return None\n\n\nclass SyncBackend(NetworkBackend):\n def connect_tcp(\n self,\n host: str,\n port: int,\n timeout: float | None = None,\n local_address: str | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> NetworkStream:\n # Note that we automatically include `TCP_NODELAY`\n # in addition to any other custom socket options.\n if socket_options is None:\n socket_options = [] # pragma: no cover\n address = (host, port)\n source_address = None if local_address is None else (local_address, 0)\n exc_map: ExceptionMapping = {\n socket.timeout: ConnectTimeout,\n OSError: ConnectError,\n }\n\n with map_exceptions(exc_map):\n sock = socket.create_connection(\n address,\n timeout,\n source_address=source_address,\n )\n for option in socket_options:\n sock.setsockopt(*option) # pragma: no cover\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n return SyncStream(sock)\n\n def connect_unix_socket(\n self,\n path: str,\n timeout: float | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> NetworkStream: # pragma: nocover\n if sys.platform == "win32":\n raise RuntimeError(\n "Attempted to connect to a UNIX socket on a Windows system."\n )\n if socket_options is None:\n socket_options = []\n\n exc_map: ExceptionMapping = {\n socket.timeout: ConnectTimeout,\n OSError: ConnectError,\n }\n with map_exceptions(exc_map):\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n for option in socket_options:\n sock.setsockopt(*option)\n sock.settimeout(timeout)\n sock.connect(path)\n return SyncStream(sock)\n
|
.venv\Lib\site-packages\httpcore\_backends\sync.py
|
sync.py
|
Python
| 7,977 | 0.95 | 0.190871 | 0.028571 |
vue-tools
| 711 |
2024-12-05T18:14:36.929413
|
MIT
| false |
e9c9e4711c13ff50cc776e5afb4e2153
|
\n\n
|
.venv\Lib\site-packages\httpcore\_backends\__pycache__\anyio.cpython-313.pyc
|
anyio.cpython-313.pyc
|
Other
| 8,608 | 0.8 | 0 | 0 |
react-lib
| 987 |
2023-07-22T05:43:00.591329
|
Apache-2.0
| false |
1d7da8c3b5e7b5f73db3be74eceeb764
|
\n\n
|
.venv\Lib\site-packages\httpcore\_backends\__pycache__\auto.cpython-313.pyc
|
auto.cpython-313.pyc
|
Other
| 2,746 | 0.8 | 0 | 0 |
react-lib
| 590 |
2025-02-23T14:05:19.238824
|
MIT
| false |
a90fbb94d2487494dad910b801f4a7ed
|
\n\n
|
.venv\Lib\site-packages\httpcore\_backends\__pycache__\base.cpython-313.pyc
|
base.cpython-313.pyc
|
Other
| 5,070 | 0.8 | 0 | 0 |
node-utils
| 160 |
2025-05-27T14:03:03.792272
|
MIT
| false |
a13bc9da49bcecaa13bfe2626711e710
|
\n\n
|
.venv\Lib\site-packages\httpcore\_backends\__pycache__\mock.cpython-313.pyc
|
mock.cpython-313.pyc
|
Other
| 7,383 | 0.8 | 0 | 0 |
vue-tools
| 44 |
2024-02-29T07:50:22.790955
|
BSD-3-Clause
| false |
dc6195e9a75a43e6ea0fdbdb6b3e2ff2
|
\n\n
|
.venv\Lib\site-packages\httpcore\_backends\__pycache__\sync.cpython-313.pyc
|
sync.cpython-313.pyc
|
Other
| 11,610 | 0.8 | 0.014925 | 0.007874 |
vue-tools
| 605 |
2025-05-02T16:54:11.638478
|
GPL-3.0
| false |
15de0c2cf0ed848a999bba978fb3895c
|
\n\n
|
.venv\Lib\site-packages\httpcore\_backends\__pycache__\trio.cpython-313.pyc
|
trio.cpython-313.pyc
|
Other
| 9,119 | 0.8 | 0 | 0 |
vue-tools
| 362 |
2023-12-26T05:11:13.367909
|
GPL-3.0
| false |
3a59ba3b375841f3a5f709e0df79da64
|
\n\n
|
.venv\Lib\site-packages\httpcore\_backends\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 193 | 0.7 | 0 | 0 |
node-utils
| 949 |
2024-11-04T00:30:25.690125
|
MIT
| false |
00aa71ba4fe45b9485024684349b3636
|
from __future__ import annotations\n\nimport itertools\nimport logging\nimport ssl\nimport types\nimport typing\n\nfrom .._backends.sync import SyncBackend\nfrom .._backends.base import SOCKET_OPTION, NetworkBackend, NetworkStream\nfrom .._exceptions import ConnectError, ConnectTimeout\nfrom .._models import Origin, Request, Response\nfrom .._ssl import default_ssl_context\nfrom .._synchronization import Lock\nfrom .._trace import Trace\nfrom .http11 import HTTP11Connection\nfrom .interfaces import ConnectionInterface\n\nRETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc.\n\n\nlogger = logging.getLogger("httpcore.connection")\n\n\ndef exponential_backoff(factor: float) -> typing.Iterator[float]:\n """\n Generate a geometric sequence that has a ratio of 2 and starts with 0.\n\n For example:\n - `factor = 2`: `0, 2, 4, 8, 16, 32, 64, ...`\n - `factor = 3`: `0, 3, 6, 12, 24, 48, 96, ...`\n """\n yield 0\n for n in itertools.count():\n yield factor * 2**n\n\n\nclass HTTPConnection(ConnectionInterface):\n def __init__(\n self,\n origin: Origin,\n ssl_context: ssl.SSLContext | None = None,\n keepalive_expiry: float | None = None,\n http1: bool = True,\n http2: bool = False,\n retries: int = 0,\n local_address: str | None = None,\n uds: str | None = None,\n network_backend: NetworkBackend | None = None,\n socket_options: typing.Iterable[SOCKET_OPTION] | None = None,\n ) -> None:\n self._origin = origin\n self._ssl_context = ssl_context\n self._keepalive_expiry = keepalive_expiry\n self._http1 = http1\n self._http2 = http2\n self._retries = retries\n self._local_address = local_address\n self._uds = uds\n\n self._network_backend: NetworkBackend = (\n SyncBackend() if network_backend is None else network_backend\n )\n self._connection: ConnectionInterface | None = None\n self._connect_failed: bool = False\n self._request_lock = Lock()\n self._socket_options = socket_options\n\n def handle_request(self, request: Request) -> Response:\n if not self.can_handle_request(request.url.origin):\n raise RuntimeError(\n f"Attempted to send request to {request.url.origin} on connection to {self._origin}"\n )\n\n try:\n with self._request_lock:\n if self._connection is None:\n stream = self._connect(request)\n\n ssl_object = stream.get_extra_info("ssl_object")\n http2_negotiated = (\n ssl_object is not None\n and ssl_object.selected_alpn_protocol() == "h2"\n )\n if http2_negotiated or (self._http2 and not self._http1):\n from .http2 import HTTP2Connection\n\n self._connection = HTTP2Connection(\n origin=self._origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n else:\n self._connection = HTTP11Connection(\n origin=self._origin,\n stream=stream,\n keepalive_expiry=self._keepalive_expiry,\n )\n except BaseException as exc:\n self._connect_failed = True\n raise exc\n\n return self._connection.handle_request(request)\n\n def _connect(self, request: Request) -> NetworkStream:\n timeouts = request.extensions.get("timeout", {})\n sni_hostname = request.extensions.get("sni_hostname", None)\n timeout = timeouts.get("connect", None)\n\n retries_left = self._retries\n delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR)\n\n while True:\n try:\n if self._uds is None:\n kwargs = {\n "host": self._origin.host.decode("ascii"),\n "port": self._origin.port,\n "local_address": self._local_address,\n "timeout": timeout,\n "socket_options": self._socket_options,\n }\n with Trace("connect_tcp", logger, request, kwargs) as trace:\n stream = self._network_backend.connect_tcp(**kwargs)\n trace.return_value = stream\n else:\n kwargs = {\n "path": self._uds,\n "timeout": timeout,\n "socket_options": self._socket_options,\n }\n with Trace(\n "connect_unix_socket", logger, request, kwargs\n ) as trace:\n stream = self._network_backend.connect_unix_socket(\n **kwargs\n )\n trace.return_value = stream\n\n if self._origin.scheme in (b"https", b"wss"):\n ssl_context = (\n default_ssl_context()\n if self._ssl_context is None\n else self._ssl_context\n )\n alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]\n ssl_context.set_alpn_protocols(alpn_protocols)\n\n kwargs = {\n "ssl_context": ssl_context,\n "server_hostname": sni_hostname\n or self._origin.host.decode("ascii"),\n "timeout": timeout,\n }\n with Trace("start_tls", logger, request, kwargs) as trace:\n stream = stream.start_tls(**kwargs)\n trace.return_value = stream\n return stream\n except (ConnectError, ConnectTimeout):\n if retries_left <= 0:\n raise\n retries_left -= 1\n delay = next(delays)\n with Trace("retry", logger, request, kwargs) as trace:\n self._network_backend.sleep(delay)\n\n def can_handle_request(self, origin: Origin) -> bool:\n return origin == self._origin\n\n def close(self) -> None:\n if self._connection is not None:\n with Trace("close", logger, None, {}):\n self._connection.close()\n\n def is_available(self) -> bool:\n if self._connection is None:\n # If HTTP/2 support is enabled, and the resulting connection could\n # end up as HTTP/2 then we should indicate the connection as being\n # available to service multiple requests.\n return (\n self._http2\n and (self._origin.scheme == b"https" or not self._http1)\n and not self._connect_failed\n )\n return self._connection.is_available()\n\n def has_expired(self) -> bool:\n if self._connection is None:\n return self._connect_failed\n return self._connection.has_expired()\n\n def is_idle(self) -> bool:\n if self._connection is None:\n return self._connect_failed\n return self._connection.is_idle()\n\n def is_closed(self) -> bool:\n if self._connection is None:\n return self._connect_failed\n return self._connection.is_closed()\n\n def info(self) -> str:\n if self._connection is None:\n return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"\n return self._connection.info()\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__} [{self.info()}]>"\n\n # These context managers are not used in the standard flow, but are\n # useful for testing or working with connection instances directly.\n\n def __enter__(self) -> HTTPConnection:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None = None,\n exc_value: BaseException | None = None,\n traceback: types.TracebackType | None = None,\n ) -> None:\n self.close()\n
|
.venv\Lib\site-packages\httpcore\_sync\connection.py
|
connection.py
|
Python
| 8,238 | 0.95 | 0.162162 | 0.031579 |
python-kit
| 91 |
2023-11-29T09:31:53.674159
|
GPL-3.0
| false |
fd77fd268a88b1f4095978351b74e4a5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.